diff --git "a/4287.jsonl" "b/4287.jsonl" new file mode 100644--- /dev/null +++ "b/4287.jsonl" @@ -0,0 +1,734 @@ +{"seq_id":"163917093","text":"\"\"\"Example script showing use of postproc.py module.\n* Input tables (Mat1 and Mat2) containing routing information \n have already been created using sfr_classes.py \n* A shapefile is supplied for model grid input instead of a MODFLOW DIS file, \n as the model grid is rotated in this case \n (the DIS file reader in SFRmaker does not support rotated grids)\n\"\"\"\n\nimport sys\nsys.path.insert(0, 'D:/ATLData/Documents/GitHub/SFR')\n#sys.path.append('D:\\JointBaseModel\\SFRMakerData\\TestModel')\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom rasterstats import zonal_stats\nfrom postproc import SFRdata\n\npath = 'D:/ATLData/SFR_testing/TestModel/'\n\n# intantiate SFRdata object\nsfr = SFRdata(Mat1=path + 'SFR_GWVmat1.txt',\n Mat2=path + 'SFR_GWVmat2.txt',\n mfgridshp='ForsyGrd.shp',\n mfgridshp_node_field='CellNum', \n mfgridshp_row_field='ROW', mfgridshp_column_field='COLUMN')\n\n# create columns in Mat2 of Min and Max elevation for each segment\n# (these columns are not created by sfr_classes.py)\nsfr.update_Mat2_elevations()\n\n# update the reach elevations in Mat1 with minimum elevations from DEM\nsfr.reset_m1_streambed_top_from_dem(dem=path + 'forsy_lid')\n\n# trace routing from headwater segments to outlets; assign outlet to each segment\nsfr.map_outsegs()\n\n# creates table of segment confluences\nsfr.map_confluences()\n\n# smooth DEM elevations in segment interiors so that they decrease in downstream direction\nsfr.smooth_interior_elevations()\n\n# read in the DIS file (this is needed for some of the methods below;\n# e.g. model top elevations are added to the stream profiles by default)\nsfr.read_dis2(mfdis='Forsy.DIS', mfnam='ForsySFRMaker.nam')\n\n# plot profiles of streambed elevations in comparison to model top and DEM minimum\nsfr.plot_stream_profiles(add_profiles={'Minimum DEM elevation': 'landsurface'})\n\n# enforce only one SFR conductance for each model cell \n# (other reaches in cell assigned near-zero conductance)\nsfr.consolidate_conductance()\n\n# adjust model grid so that all SFR reaches are in layer 1\n# outputs a new DIS file for model\nsfr.reset_model_top_2streambed(outdisfile='Forsy_adjusted_to_streambed.dis')\n\n# run suite of diagnostics to test for common problems with SFR package\nsfr.run_diagnostics()\n\n# create shapefile for visualizing SFR package\nsfr.write_shapefile(outshp='Forsy.shp', prj='ForsyGrd.prj')\n\n# write updated tables\nsfr.write_tables(basename='Forsy')\n\n# write an SFR package file\nsfr.write_sfr_package(basename='Forsy')\n","sub_path":"Examples/Example_postproc_workflow2.py","file_name":"Example_postproc_workflow2.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"580314720","text":"import nltk\r\nfrom nltk.tokenize import RegexpTokenizer\r\nimport operator\r\nfrom nltk.tokenize import word_tokenize\r\n\r\n\r\n\r\ntokenizer = RegexpTokenizer(r'\\w+')\r\n\r\nfile = open(\"textrank_hotels_output2.txt\", \"r\")\r\ndata = dict()\r\ndictList = [];\r\ndictNumber = 0;\r\n\r\n# counts \r\nfor line in file:\r\n if line.strip():\r\n word = tokenizer.tokenize(line)\r\n if len(word) > 0:\r\n if (word[0].isupper()):\r\n dictList.append(data)\r\n data = dict()\r\n data[line] = 0\r\n continue\r\n continue\r\n## if word[0] not in data:\r\n## data[word[0]] = 1\r\n## else:\r\n## data[word[0]] = data[word[0]] + 1\r\n for single in word:\r\n if not single.isdigit():\r\n if single not in data:\r\n data[single] = 1\r\n else:\r\n data[single] = data[single] + 1\r\n \r\n else:\r\n dictList.append(data)\r\n data = dict()\r\n data[\"********* NAMELESS **********\"] = 0\r\n \r\n\r\n# print (dictList)\r\n\r\n\r\nfor dataset in dictList:\r\n dataset2 = dict(dataset);\r\n for item in dataset2:\r\n if len(item) == 1:\r\n del dataset[item]\r\n\r\n\r\n\r\nfor dataset in dictList:\r\n sorted_data = sorted(dataset.items(), key=operator.itemgetter(1))\r\n for line in sorted_data:\r\n if line[0].isupper():\r\n print (\"\\n\")\r\n print (line[0])\r\n print (\"\\n\")\r\n else:\r\n print(line)\r\n\r\n##data2 = dict(data)\r\n##\r\n### filtering:\r\n### 1. number of instances\r\n### 2. manual items\r\n### 3. parsing issue\r\n### 4. non-nouns\r\n##for item in data2:\r\n## if data[item] < 3:\r\n## del data[item]\r\n## elif \"hotel\" in item or \"stay\" in item or \"excellent\" in item or \"phoenix\" in item or \"tablet\" in item or \"android\" in item or \"free\" in item or \"book\" in item or \"mount\" in item or \"page\" in item:\r\n## del data[item]\r\n## elif len(item) == 1:\r\n## del data[item]\r\n## else:\r\n## text = word_tokenize(item)\r\n## sample_line = nltk.pos_tag(text)\r\n## if sample_line[0][1] not in 'NN':\r\n## del data[item]\r\n## \r\n### sorts dict for printing\r\n##sorted_data = sorted(data.items(), key=operator.itemgetter(1))\r\n##\r\n##for line in sorted_data:\r\n## print(line)\r\n","sub_path":"wordCountEditedv2.py","file_name":"wordCountEditedv2.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"548677601","text":"import os\nimport pickle\nimport shutil\nimport sys \nimport re\nimport time\nimport functools\nimport filecmp\nfrom subprocess import call\nfrom collections import namedtuple, OrderedDict, Hashable\n\nfrom geosoft_api import gxapi\n\nGXApiCollectionInfo = namedtuple('GXApiCollectionInfo', ['classes', 'known_classes', 'known_class_handles', 'known_methods', 'known_definitions', 'known_definition_values'])\nglobal_collection = 0\n\nclass memoized:\n '''Decorator. Caches a function's return value each time it is called.\n If called later with the same arguments, the cached value is returned\n (not reevaluated).\n '''\n def __init__(self, func):\n self.func = func\n self.cache = {}\n def __call__(self, *args):\n if not isinstance(args, Hashable):\n # uncacheable. a list, for instance.\n # better to not cache than blow up.\n return self.func(*args)\n if args in self.cache:\n return self.cache[args]\n else:\n value = self.func(*args)\n self.cache[args] = value\n return value\n def __repr__(self):\n '''Return the function's docstring.'''\n return self.func.__doc__\n def __get__(self, obj, objtype):\n '''Support instance methods.'''\n return functools.partial(self.__call__, obj)\n\ndef convert_camel_case(name):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', name)\n s2 = re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n return s2.replace(\"3_d\", \"_3d\")\n\ndef is_class(type_name):\n return type_name in global_collection.known_classes or type_name in global_collection.known_class_handles\n\ndef get_class(type_name):\n if type_name in global_collection.known_classes:\n return global_collection.known_classes[type_name]\n elif type_name in global_collection.known_class_handles:\n return global_collection.known_class_handles[type_name]\n else:\n return None\n\ndef get_python_defintion_value(defintion_value):\n if defintion_value.val in global_collection.known_definition_values:\n return global_collection.known_definition_values[defintion_value.val]['defined_value'].get_python_value()\n else:\n if defintion_value.type == 'System.String':\n return '\"' + defintion_value.val + '\"'\n else:\n return \"(\" + defintion_value.get_cpp_const_type() + \")\" + defintion_value.get_value_without_casts()\n\ndef parse_type(type_name):\n if type_name in global_collection.known_class_handles:\n return global_collection.known_class_handles[type_name].name\n else:\n return type_name\n\ndef get_cpp_return_cast(type_name):\n if type_name == \"bool\":\n return \"0 != \"\n elif type_name in global_collection.known_definitions:\n return \"(\" + type_name + \")\"\n else:\n return \"\"\n\ndef get_is_cpp_long_equivalent(type_name):\n if type_name == 'int32_t':\n return True\n elif type_name in global_collection.known_definitions:\n return not global_collection.known_definitions[type_name].constant\n return False\n\ndef get_cpp_type(type_name, no_pointer=False):\n if is_class(type_name):\n type_name = \"GX\" + parse_type(type_name)\n if not no_pointer:\n return type_name + \"Ptr\"\n else:\n return type_name\n else:\n return {\n 'real': 'double',\n 'int': 'int32_t',\n 'intval': 'int32_t',\n 'string': 'const gx_string_type&',\n 'var string': 'gx_string_type&',\n 'CRC': 'int32_t',\n 'WND': 'int32_t',\n 'PTMP': 'int32_t',\n 'FILTER': 'int32_t',\n 'DGW_OBJ': 'int32_t',\n 'TB_FIELD': 'int32_t',\n 'DB_SELECT': 'int32_t',\n 'DB_SYMB': 'int32_t',\n 'META_TOKEN': 'int32_t',\n 'HANDLE': 'int32_t',\n 'GEO_BOOL': 'bool'\n }.get(type_name, type_name)\n\n\ndef restructured_directive(start, contents):\n indent = '\\n' + ' ' * (len(start) + 1)\n return start + \" \" + indent.join(contents.strip().split('\\n')) + \"\\n\"\n\ndef word_to_ref(word, allow_classes):\n if allow_classes and word in global_collection.known_classes:\n return \"\\\\ :class:`geosoft.gxapi.GX\" + word + \"`\\\\ \"\n elif word in global_collection.known_methods:\n method_info = global_collection.known_methods[word]\n method = method_info['method']\n gxclass = method_info['gxclass']\n return \"\\\\ :func:`geosoft.gxapi.GX\" + gxclass.name + \".\" + gxclass.py_method_name(method) + \"`\\\\ \"\n else:\n return word\n\ndef all_refs_repl(matchobj):\n return word_to_ref(matchobj.group(0), True)\n\ndef subst_all_refs(description):\n return re.sub('\\w+', all_refs_repl, description)\n\ndef non_class_refs_repl(matchobj):\n return word_to_ref(matchobj.group(0), False)\n\ndef subst_non_class_refs(description):\n return re.sub('\\w+', non_class_refs_repl, description)\n\ndef define_refs_repl(matchobj):\n if matchobj.group(1) == \"GEO_BOOL\":\n return \"bool\"\n else:\n definition_name = matchobj.group(1)\n if definition_name in global_collection.known_definitions:\n definition = global_collection.known_definitions[definition_name]\n if definition.null_handle:\n return \"\\\\ :func:`geosoft.gxapi.GX\" + definition_name.replace(\"_NULL\", \"\") + \".null()`\\\\ \"\n return \"\\\\ :ref:`\" + matchobj.group(1) + \"`\\\\ \"\n\ndef subst_defines(description):\n return re.sub('(.+?)', define_refs_repl, description)\n\ndef docstring_fixes(description):\n description = description.replace(\"GS_TRUE\", \"``True``\")\n description = description.replace(\"GS_FALSE\", \"``False``\")\n description = subst_defines(description)\n description = description.replace(\"*\", \"\\\\ `*`\\\\ \")\n description = description.replace(\"|\", \"\\\\ `|`\\\\ \")\n return description\n\ndef docstring_literal_para(description, para_id='.. parsed-literal::', sub_all_refs=False):\n if sub_all_refs:\n description = subst_all_refs(description)\n else:\n description = subst_non_class_refs(description)\n description = docstring_fixes(description)\n return '\\n\"\\\\n' + para_id + '\\\\n\\\\n\"\\n\" ' + '\\\\n\"\\n\" '.join(description.replace(\"\\\\\", \"\\\\\\\\\").replace(\"\\\"\", \"\\\\\\\"\").split('\\n')) + '\\\\n\\\\n\"\\n'\n\ndef docstring_literal_note(description):\n return '\\n\"\\\\n\\\\n**Note:**\\\\n\\\\n\"\\n' + docstring_literal_para(description)\n\ndef docstring_literal_seealso(description):\n return docstring_literal_para(description, para_id = '.. seealso::', sub_all_refs=True)\n\ndef docstring_literal_version(version):\n return '\\n\"\\\\n.. versionadded:: ' + version + '\\\\n\\\\n\"\\n'\n\ndef multi_line_fixup(description):\n return description.replace(\"\\\\\", \"\\\\\\\\\").replace(\"\\\"\", \"\\\\\\\"\")\n\ndef docstring_multi_line(description):\n description = docstring_fixes(description)\n return '\\n\"' + '\\\\n\"\\n\"'.join(description.replace(\"\\\\\", \"\\\\\\\\\").replace(\"\\\"\", \"\\\\\\\"\").split('\\n')) + '\\\\n\"\\n'\n\ndef generate_sphinx_description(description):\n return (' '.join(description.replace(\"\\\\\", \"\\\\\\\\\").replace(\"\\\"\", \"\\\\\\\"\").split('\\n'))).strip()\n\nclass defined_value_class(gxapi.defined_value.typeDefinition()):\n @memoized\n def get_value_without_casts(self):\n return self.val.replace(\"(unsigned long) \", \"\").replace(\"(__GS_INT64) \", \"\").replace(\"(__GS_UINT64) \", \"\")\n\n @memoized\n def get_cpp_value(self):\n if self.type == 'System.String':\n return 'gx_string_literal(\"' + self.val + '\")'\n else:\n return self.get_value_without_casts()\n\n @memoized\n def get_python_value(self):\n return get_python_defintion_value(self)\n\n @memoized\n def get_cpp_const_type(self):\n if self.cpp_type:\n return self.cpp_type\n if self.type == 'System.String':\n return \"gx_string_char_type*\"\n if self.type == 'System.Int32':\n return \"int32_t\"\n if self.type == 'System.Single':\n return \"float\"\n if self.type == 'System.Double':\n return \"double\"\n\n @memoized\n def get_spec_type(self):\n if self.cpp_type:\n return 'Type.{}'.format(self.cpp_type.upper())\n elif self.type == 'System.String':\n return \"Type.STRING\"\n elif self.type == 'System.Int32':\n return 'Type.INT32_T'\n elif self.type == 'System.Single':\n return 'Type.FLOAT'\n elif self.type == 'System.Double':\n return 'Type.DOUBLE'\n else:\n return '\"{}\"'.format(self.type)\n\n @memoized\n def get_sphinx_docstring(self):\n return docstring_fixes(subst_non_class_refs(self.description))\n\ngxapi.defined_value.typeDefinition()._SetSupersedingClass(defined_value_class)\t\t\t \n\nclass definition_class(gxapi.definition.typeDefinition()):\n @memoized\n def get_cpp_const_name(self, defined_value):\n if self.single_constant:\n return defined_value.name\n else:\n value_name = defined_value.name\n parts = self.name.split('_')\n for part in parts:\n if value_name.startswith(part + \"_\"):\n value_name = value_name[len(part) + 1:]\n return value_name\n\n @memoized\n def get_cpp_const_declaration(self, defined_value):\n return 'static const ' + defined_value.get_cpp_const_type() + \" \" + self.get_cpp_const_name(defined_value) + \" = \" + defined_value.get_cpp_value() + \";\"\n\n @memoized\n def get_cpp_defined_value_name(self, defined_value):\n if self.cpp_prefix:\n return self.cpp_prefix + defined_value.name\n else:\n return defined_value.name\n\n @memoized\n def get_sphinx_docstring(self):\n return docstring_fixes(subst_non_class_refs(self.description))\ngxapi.definition.typeDefinition()._SetSupersedingClass(definition_class)\t\t\t\n\ndef resolve_enum_type_from_description(type_name, description):\n if description and type_name == \"int\":\n defines = re.findall(\"(.+?)\", description)\n if len(defines) == 1:\n if not defines[0] in global_collection.known_definitions:\n raise Exception('Unknown definition indicated for parameter or return value: ' + defines[0])\n return defines[0]\n return type_name\n \nclass parameter_class(gxapi.parameter.typeDefinition()):\n @memoized\n def is_class(self):\n return is_class(self.type)\n\n @memoized\n def is_var(self):\n return self.type.startswith(\"var \")\n\n @memoized\n def is_var_type(self):\n return self.type != \"var string\" and self.is_var()\n\n @memoized\n def get_spec_type(self):\n type = self.type[4:] if self.is_var() else self.type\n if type == 'string':\n return \"Type.STRING\"\n elif type == 'int' or type == \"intval\":\n return 'Type.INT32_T'\n elif type == 'real':\n return 'Type.DOUBLE'\n else:\n return '\"{}\"'.format(self.type)\n\n @memoized\n def get_type(self):\n if self.is_var_type():\n return self.type[4:]\n else:\n return self.type\n\n @memoized\n def __cpp_type(self, no_pointer):\n return get_cpp_type(resolve_enum_type_from_description(self.get_type(), self.description), no_pointer)\n\n def cpp_type(self, no_pointer=False):\n return self.__cpp_type(no_pointer)\n\n @memoized\n def cpp_python_wrap_type(self):\n if self.is_cpp_long_equivalent():\n type_name = \"int32_t\"\n else:\n type_name = self.cpp_type()\n\n if type_name == \"gx_string_type&\":\n return \"str_ref&\"\n elif self.is_var_type():\n if type_name == \"int32_t\":\n return \"int_ref&\"\n elif type_name == \"double\":\n return \"float_ref&\"\n elif type_name == \"bool\":\n return \"bool_ref&\"\n else:\n raise Exception(\"Unexpected var type: \" + type_name)\n else:\n return type_name\n\n @memoized\n def cpp_python_docstring_type(self):\n if self.is_cpp_long_equivalent():\n type_name = \"int32_t\"\n else:\n type_name = self.cpp_type(no_pointer=True)\n if type_name == \"double\":\n type_name = \"float\"\n elif type_name == \"int32_t\":\n type_name = \"int\"\n elif type_name == \"const gx_string_type&\":\n type_name = \"str\"\n elif type_name == \"gx_string_type&\":\n type_name = \"str_ref\"\n if self.is_var_type():\n type_name = type_name + \"_ref\"\n return type_name\n\n @memoized\n def cpp_python_wrap_cast(self):\n type_name = self.cpp_type()\n\n if not type_name == \"int32_t\" and self.is_cpp_long_equivalent():\n if self.is_var_type():\n return \"(\" + type_name + \"&)\"\n else:\n return \"(\" + type_name + \")\"\n else:\n return \"\"\n\n @memoized\n def is_val_type(self):\n return self.get_type() == 'intval' or self.get_type() == 'HWND' or self.get_type() == 'HDC'\n\n @memoized\n def is_param_in_type(self):\n return self.get_type().startswith(\"void (\")\n\n @memoized\n def is_cpp_long_equivalent(self):\n if not self.is_val_type():\n return get_is_cpp_long_equivalent(self.cpp_type())\n else:\n return False\n\n @memoized\n def cpp_cast_start(self):\n if self.is_cpp_long_equivalent():\n if self.is_var_type():\n return \"reinterpret_cast(\"\n else:\n return \"reinterpret_cast(\"\n else:\n return \"\"\n\n @memoized\n def cpp_cast_end(self):\n if self.is_cpp_long_equivalent():\n return \")\"\n else:\n return \"\"\n\n @memoized\n def get_python_docstring(self):\n return docstring_literal_para(self.description)\ngxapi.parameter.typeDefinition()._SetSupersedingClass(parameter_class)\n\n\nclass method_class(gxapi.method.typeDefinition()):\n @memoized\n def external_name(self):\n if self.externalname:\n return self.externalname\n else:\n return self.name\n\n @memoized\n def returns_class(self):\n return is_class(self.returnval.type)\n\n @memoized\n def get_return_class(self):\n return get_class(self.returnval.type)\n\n @memoized\n def __cpp_return_type(self, no_pointer):\n return get_cpp_type(resolve_enum_type_from_description(self.returnval.type, self.returnval.description), no_pointer = no_pointer)\n\n def cpp_return_type(self, no_pointer=False):\n return self.__cpp_return_type(no_pointer)\n\n @memoized\n def __python_wrap_return_type(self, no_pointer):\n type_name = self.cpp_return_type(no_pointer = no_pointer)\n if get_is_cpp_long_equivalent(type_name):\n return \"int32_t\"\n else:\n return type_name\n\n def python_wrap_return_type(self, no_pointer=False):\n return self.__python_wrap_return_type(no_pointer)\n\n @memoized\n def cpp_return_cast(self):\n return get_cpp_return_cast(self.cpp_return_type())\n\n def get_spec_lic(self):\n if self.license.startswith('_public'):\n return 'Availability.PUBLIC'\n elif self.license.startswith('_license'):\n return 'Availability.LICENSED'\n elif self.license.startswith('_ext'):\n return 'Availability.EXTENSION'\n else:\n return 'Availability.UNKNOWN'\n\n def get_spec_ret_type(self):\n if self.returnval.type == 'int':\n return 'Type.INT32_T'\n elif self.returnval.type == 'real':\n return 'Type.DOUBLE'\n elif self.returnval.type == 'void':\n return 'Type.VOID'\n else:\n return '\"{}\"'.format(self.returnval.type)\n\n @memoized\n def is_app(self):\n return self.license.endswith('_app')\ngxapi.method.typeDefinition()._SetSupersedingClass(method_class)\n\nclass ext_parameter_info:\n def __init__(self, index=None, parameter=None, size_parameter=None, size_parameter_index=None, real_index=None):\n self.index = index\n self.parameter = parameter\n self.size_parameter_index = size_parameter_index\n self.size_parameter = size_parameter\n self.real_index = real_index\n\nclass int_parameter_info:\n def __init__(self, self_handle=False, ext_index=None, parameter=None, size_parameter=None, size_parameter_index=None,gxclass=None):\n self.self_handle = self_handle\n self.parameter = parameter\n self.size_parameter_index = size_parameter_index\n self.size_parameter = size_parameter\n self.ext_index = ext_index\n self.gxclass = gxclass\n\n\ndef get_rest_docstring_type_name(type_name):\n if type_name in [\"float\", \"bool\", \"int\", \"str\", \"None\"]:\n return type_name\n else:\n return \":class:`geosoft.gxapi.\" + type_name + \"`\"\n\nclass gx_class(gxapi.gxclass.typeDefinition()):\n @memoized\n def is_method_static(self, method):\n if len(method.parameters.parameter) > 0:\n return parse_type(method.parameters.parameter[0].type) != self.name\n else:\n return True\n\n @memoized\n def get_method_ext_parameter_infos(self, method):\n ext_parameter_infos = []\n static = self.is_method_static(method)\n size_of_parameters_set = set()\n for parameter in method.parameters.parameter:\n if parameter.size_of_param:\n size_of_parameters_set.add(parameter.size_of_param)\n\n index = 1\n for i, parameter in enumerate(method.parameters.parameter):\n if (i == 0 and not static) or i in size_of_parameters_set:\n continue\n size_parameter_index = parameter.size_of_param\n size_parameter = None\n if size_parameter_index:\n size_parameter = method.parameters.parameter[size_parameter_index]\n ext_parameter_infos.append(ext_parameter_info(index=index, parameter=parameter, size_parameter=size_parameter, size_parameter_index=size_parameter_index, real_index=i))\n index = index + 1\n return ext_parameter_infos\n\n @memoized\n def get_method_int_parameter_infos(self, method):\n int_parameter_infos = []\n static = self.is_method_static(method)\n size_of_parameters_set = set()\n for parameter in method.parameters.parameter:\n if parameter.size_of_param:\n size_of_parameters_set.add(parameter.size_of_param)\n\n index = 1\n for i, parameter in enumerate(method.parameters.parameter):\n self_handle = False\n if i in size_of_parameters_set:\n ext_index = None\n elif i == 0 and not static:\n ext_index = None\n self_handle = True\n else:\n ext_index = index\n index = index + 1\n size_parameter_index = parameter.size_of_param\n size_parameter = None\n if size_parameter_index:\n size_parameter = method.parameters.parameter[size_parameter_index]\n int_parameter_infos.append(int_parameter_info(self_handle=self_handle, ext_index=ext_index, parameter=parameter, size_parameter=size_parameter, size_parameter_index=size_parameter_index, gxclass=get_class(parameter.type)))\n return int_parameter_infos\n\n @memoized\n def is_static(self):\n for methodgroup in self.methodgroups.methodgroup:\n for method in methodgroup.method:\n if not self.is_method_static(method):\n return False\n return True\n\n def _ext_method_name_camel(self, method):\n if method.name == \"iCheckError_SYS\":\n return \"iCheckError\"\n method_postfix = \"_\" + self.name\n method_name = method.external_name()\n if method.name.endswith(method_postfix):\n method_name = method_name[0 : len(method_name) - len(method_postfix)]\n if method_name.startswith(\"_\") or (method_name.startswith(\"I\") and len(method_name) > 2 and (method_name[1] == 'i' or (method_name[1] >= 'A' and method_name[1] <= 'Z'))):\n return method_name[1:]\n return method_name\n\n def _ext_method_name_no_camel(self, method):\n return convert_camel_case(self._ext_method_name_camel(method))\n\n def _ext_method_name_real_to_double(self, method):\n return self._ext_method_name_no_camel((method)).replace(\"_real\", \"_double\")\n\n def _ext_method_name_no_polish(self, method):\n method_name = self._ext_method_name_real_to_double(method)\n return_type = method.cpp_return_type()\n if method_name.startswith(\"i_\") or method_name.startswith(\"r_\"):\n return method_name[2:]\n else:\n return method_name\n\n @memoized\n def ext_method_name(self, method):\n method_name = self._ext_method_name_no_polish(method)\n if method.cpp_pre:\n method_name = method.cpp_pre + method_name\n if method.cpp_post:\n method_name = method_name + method.cpp_post\n if self.name == \"MATH\":\n method_name = method_name + \"_\" # Stops keyword and macro collisions everywhere\n return method_name\n \n @memoized\n def py_method_name(self, method):\n method_name = self.ext_method_name(method)\n return method_name.strip(\"_\")\n\n def get_python_docstring(self):\n docstring = docstring_literal_para(self.description)\n if self.notes:\n docstring = docstring + docstring_literal_note(self.notes)\n return docstring\n\n def generate_sphinx_description(self):\n return generate_sphinx_description(self.description)\n\n def get_python_method_docstring(self, method):\n return_type = method.python_wrap_return_type(no_pointer = True)\n if return_type == \"void\":\n return_type = \"None\"\n elif return_type == \"double\":\n return_type = \"float\"\n elif return_type == \"int32_t\":\n return_type = \"int\"\n\n signature = self.py_method_name(method) + \"(\"\n restructured_text_params = \"\"\n\n first_parameter = False\n for parameter_info in self.get_method_ext_parameter_infos(method):\n if first_parameter:\n signature = signature + \", \"\n else:\n first_parameter = True\n type_name = parameter_info.parameter.cpp_python_docstring_type()\n arg_name = \"arg\" + str(parameter_info.index)\n\n signature = signature + \"(\" + type_name + \")\" + arg_name\n\n restructured_text_params = restructured_text_params + restructured_directive(\":param \" + arg_name + \":\", parameter_info.parameter.description)\n restructured_text_params = restructured_text_params + restructured_directive(\":type \" + arg_name + \":\", get_rest_docstring_type_name(type_name))\n\n signature = signature + \") -> \" + return_type + \":\"\n if method.returnval.description:\n restructured_text_params = restructured_text_params + restructured_directive(\":returns:\", method.returnval.description)\n restructured_text_params = restructured_text_params + restructured_directive(\":rtype:\", get_rest_docstring_type_name(return_type))\n\n docstring = docstring_multi_line(signature) + docstring_literal_para(method.description) + docstring_multi_line(restructured_text_params) + docstring_literal_version(method.available)\n\n if method.notes:\n docstring = docstring + docstring_literal_note(method.notes)\n\n if method.see_also:\n docstring = docstring + docstring_literal_seealso(method.see_also)\n\n return docstring\n\ngxapi.gxclass.typeDefinition()._SetSupersedingClass(gx_class)\n\ndef object_from_pickled_file(pickled_file_path):\n with open(pickled_file_path, 'rb') as f:\n return pickle.load(f)\n\ndef pickle_object_with_makedir(object, pickled_file_path):\n pickled_file_dir = os.path.dirname(pickled_file_path)\n if not os.path.exists(pickled_file_dir):\n os.makedirs(pickled_file_dir)\n with open(pickled_file_path, 'wb') as f:\n pickle.dump(object, f, pickle.HIGHEST_PROTOCOL)\n\ndef gxapi_pickle(api_file_path, pickled_file_path):\n pickle_object_with_makedir(gxapi.CreateFromDocument(open(api_file_path).read()), pickled_file_path)\n\n\ndef gxapi_api_collection_pickle(pickle_source_dirs, pickled_file_path):\n api_coll = GXApiCollectionInfo([], {}, {}, {}, {}, {})\n\n files_dict = {}\n for source_dir in pickle_source_dirs.split(';'):\n for root, subFolders, files in os.walk(source_dir):\n for file in files:\n files_dict[file] = os.path.join(root, file)\n\n for file in OrderedDict(sorted(files_dict.items())):\n gxclass = object_from_pickled_file(files_dict[file])\n api_coll.known_classes[gxclass.name] = gxclass\n if (gxclass.handlename):\n api_coll.known_class_handles[gxclass.handlename] = gxclass\n if gxclass.name == \"GEOSOFT\":\n api_coll.classes.insert(0, gxclass)\n else:\n api_coll.classes.append(gxclass)\n\n for methodgroup in gxclass.methodgroups.methodgroup:\n for method in methodgroup.method:\n api_coll.known_methods[method.name] = { 'gxclass': gxclass, 'method': method }\n\n for definition in gxclass.definitions.definition:\n api_coll.known_definitions[definition.name] = definition\n for defined_value in definition.defined_value:\n api_coll.known_definition_values[defined_value.name] = { 'gxclass': gxclass, 'definition': definition, 'defined_value': defined_value }\n\n pickle_object_with_makedir(api_coll, pickled_file_path)\n\ndef render_template(j2env, namespace_parts, build_version, output_dir, template_name, sort_classes=False):\n output_file = os.path.join(output_dir, template_name)\n print('Rendering: ' + output_file)\n template = j2env.get_template(template_name)\n open(output_file, 'w+').write(template.render(build_version=build_version, classes=sorted(global_collection.classes , key=lambda gxclass: gxclass.name) if sort_classes else global_collection.classes, namespace_parts=namespace_parts))\n\n\ndef render_python_imports(j2env, namespace_parts, output_dir):\n template = j2env.get_template('python_import.cpp')\n\n for gxclass in global_collection.classes:\n # TODO Expose CGEO::GetPtrVM and CGEO::GetPtrVV the way we do in C# (remove\n # comments from python_module.cpp when completed)\n # TODO expose void * and callback methods in PG class in a sensible way and\n # remove the nocpp=\"true\" atribute on them\n if not gxclass.name == \"GEO\":\n output_file = os.path.join(output_dir, \"python_import_\" + gxclass.name + \".cpp\")\n print('Rendering: ' + output_file)\n open(output_file, 'w+').write(template.render(gxclass=gxclass, namespace_parts=namespace_parts))\n\n\ndef render_sphinx_rsts(j2env, namespace_parts, output_dir):\n template = j2env.get_template('class.rst')\n\n for gxclass in global_collection.classes:\n # TODO Expose CGEO::GetPtrVM and CGEO::GetPtrVV the way we do in C# (remove\n # comments from python_module.cpp when completed)\n # TODO expose void * and callback methods in PG class in a sensible way and\n # remove the nocpp=\"true\" atribute on them\n if not gxclass.name == \"GEO\":\n output_file = os.path.join(output_dir, \"GX\" + gxclass.name + \".rst\")\n print('Rendering: ' + output_file)\n open(output_file, 'w+').write(template.render(gxclass=gxclass, namespace_parts=namespace_parts))\n\ndef generate_code(pickled_collection_file, namespace, build_version, output_dir):\n from jinja2 import Environment, FileSystemLoader\n global global_collection\n global __j2env\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n namespace_parts = namespace.split('::')\n\n tools_root = os.getenv('TOOLS_ROOT')\n templates_dir = os.path.join(tools_root, 'msbuild', 'gx_python', 'gxapi_templates')\n astyle_tool = os.path.join(tools_root, 'utils', 'astyle.exe')\n\n j2env = Environment(loader=FileSystemLoader(templates_dir),\n trim_blocks = True,\n lstrip_blocks = True)\n\n global_collection = object_from_pickled_file(pickled_collection_file)\n\n start = time.perf_counter()\n render_template(j2env, namespace_parts, build_version, output_dir, 'gxcpp_geogx.h')\n render_template(j2env, namespace_parts, build_version, output_dir, 'python_ref_wrappers.h')\n render_template(j2env, namespace_parts, build_version, output_dir, 'python_module.cpp')\n render_template(j2env, namespace_parts, build_version, output_dir, 'index.rst', sort_classes=True)\n render_template(j2env, namespace_parts, build_version, output_dir, 'toc.rst', sort_classes=True)\n render_python_imports(j2env, namespace_parts, output_dir)\n render_sphinx_rsts(j2env, namespace_parts, output_dir)\n\n print('Formatting source code...')\n if not 0 == call([astyle_tool, '-n', '-N', '--style=allman', os.path.join(output_dir, '*.h')]):\n raise Exception(astyle_tool + \" error!\")\n if not 0 == call([astyle_tool, '-n', '-N', '--style=allman', os.path.join(output_dir, '*.cpp')]):\n raise Exception(astyle_tool + \" error!\")\n elapsed = time.perf_counter() - start\n print(\"Generation completed in %s seconds\" % elapsed)\n","sub_path":"docs/transform/gxgenutils.py","file_name":"gxgenutils.py","file_ext":"py","file_size_in_byte":29439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"449590914","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/lib64/python3.6/site-packages/ioflo/base/test/_testFraming.py\n# Compiled at: 2017-12-17 08:35:26\n# Size of source mod 2**32: 2613 bytes\n\n\ndef TestFrame():\n \"\"\"Module Common self test\n\n \"\"\"\n import acting, poking, needing, goaling, doing, traiting, fiating, wanting\n try:\n Frame.Clear()\n f1 = Frame(name='Primero')\n f2 = Frame()\n f3 = Frame()\n f1.attach(f2)\n f1.attach(f3)\n f4 = Frame()\n f5 = Frame()\n f2.attach(f4)\n f3.attach(f5)\n Act = acting.Act\n Transact = acting.Transact\n need = acting.need\n goal = acting.goal\n deed = acting.deed\n trait = acting.trait\n spec = acting.spec\n fiat = acting.fiat\n a = Act(action=need, act=(need.checkDepth), parms=dict(depth=5.0))\n f2.beacts.append(a)\n a = Act(action=goal, act=(goal.setDepth), parms=dict(depth=2.0))\n f2.enacts.append(a)\n a = Act(action=trait, act=(trait.useDepth), parms=dict(depth=3.0))\n f2.reacts.append(a)\n a = Act(action=deed, act=(deed.doDepth), parms=dict(depth=1.0))\n f2.reacts.append(a)\n a = Act(action=trait, act=(trait.useDepth), parms=dict(depth=6.0))\n f2.exacts.append(a)\n t = Transact()\n a = Act(action=need, act=(need.checkDepth), parms=dict(depth=4.0))\n t.needs.append(a)\n t.far = f5\n f2.preacts.append(t)\n a = Act(action=deed, act=(deed.doDepth), parms=dict(depth=1.0))\n f2.preacts.append(a)\n t = Transact()\n a = Act(action=need, act=(need.checkDepth), parms=dict(depth=1.5))\n t.needs.append(a)\n t.far = f4\n f5.preacts.append(t)\n f6 = Frame()\n a = Act(action=trait, act=(trait.useDepth), parms=dict(depth=10.0))\n f6.reacts.append(a)\n fr1 = Framer()\n fr1.first = f6\n f3.auxes.append(fr1)\n fr2 = Framer()\n fr2.first = f1\n fr2.runner.send(START)\n for i in xrange(3):\n status = fr2.runner.send(RUN)\n\n except excepting.ParameterError as ex:\n console.terse(ex)\n raise\n\n return f1\n\n\ndef Test():\n \"\"\"Module Common self test\n\n \"\"\"\n TestFrame()\n\n\nif __name__ == '__main__':\n Test()","sub_path":"pycfiles/ioflo-py3.6-dev-1.7.5.linux-x86_64.tar/_testFraming.cpython-36.py","file_name":"_testFraming.cpython-36.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"555527107","text":"import re\nfrom ..where_abstract import WhereAbstract\nfrom ...mapping.field import Field\nfrom ...common.common_define import CommonDefine\n\n\nclass Where(WhereAbstract):\n sql = \"\"\n param_dict = {}\n operator_dict = {\n \"eq\": \"=\",\n \"neq\": \"<>\",\n \"gt\": \">\",\n \"ge\": \">=\",\n \"lt\": \"<\",\n \"le\": \"<=\",\n \"in\": \"IN\",\n \"nin\": \"NOT IN\",\n \"like\": \"LIKE\",\n \"fis\": \"FIND_IN_SET\"\n }\n\n def add_and(self, *args):\n args = args + (self,)\n self.sql = self.__get_sql_expression(args, 1)\n return self\n\n def add_or(self, *args):\n args = args + (self,)\n self.sql = self.__get_sql_expression(args, 2)\n return self\n\n def get_and(self, *args):\n result = self.__get_sql_expression(args, 1)\n return result\n\n def get_or(self, *args):\n result = self.__get_sql_expression(args, 2)\n return result\n\n def or_and(self, *args):\n sql = self.__get_sql_expression(args, 1)\n return self.add_or(sql)\n\n def and_or(self, *args):\n sql = self.__get_sql_expression(args, 2)\n return self.add_and(sql)\n\n def __get_sql_expression(self, args, set_type):\n if args is None:\n return \"\"\n\n sql = \"\"\n for item in args:\n temp_sql = \"\"\n if type(item) == str and item != \"\":\n temp_sql = item\n elif isinstance(item, Where):\n temp_sql = item.sql\n self.param_dict = {**self.param_dict, **item.param_dict}\n else:\n expression_dict = self.get_expression(item, self.param_dict)\n temp_sql = expression_dict[\"sql\"]\n self.param_dict = expression_dict[\"param_dict\"]\n\n temp_str = re.sub(r\"\\(.*\\)\", \"\", temp_sql)\n if (\" AND \" in temp_str and set_type == 2) or (\" OR \" in temp_str and set_type == 1):\n temp_sql = \"(\" + temp_sql + \")\"\n\n if not temp_sql:\n continue\n\n if not sql:\n sql = temp_sql\n else:\n if set_type == 1:\n sql += \" AND \" + temp_sql\n else:\n sql += \" OR \" + temp_sql\n return sql\n\n def get_expression(self, condition, param_dict):\n sql = \"\"\n field_name = condition.field_name\n if condition.alias_table_name is not None:\n field_name = condition.alias_table_name + \".\" + field_name\n\n if isinstance(condition.value, Field):\n if condition.value.alias_table_name is not None:\n sql = field_name + self.operator_dict[\n condition.operator] + condition.value.alias_table_name + \".\" + condition.value.field_name\n else:\n sql = field_name + \\\n self.operator_dict[condition.operator] + \\\n condition.value.field_name\n else:\n\n if condition.value is None:\n if condition.operator == CommonDefine.OPERATOR_EQ:\n sql = field_name+\" IS NULL\"\n elif condition.operator == CommonDefine.OPERATOR_NEQ:\n sql = field_name + \" IS NOT NULL\"\n else:\n param_name = \"p\" + str(CommonDefine.SQL_PARAMETER_INDEX)\n CommonDefine.SQL_PARAMETER_INDEX += 1\n if condition.operator == CommonDefine.OPERATOR_IN or condition.operator == CommonDefine.OPERATOR_NIN:\n in_str = \"\"\n temp_value = []\n if isinstance(condition.value, str):\n temp_value = condition.value.split(',')\n if isinstance(temp_value, list) or isinstance(temp_value, tuple):\n in_index = 0\n for value in temp_value:\n if value:\n temp_name = param_name + \"_\" + in_index\n in_str = \":\"+temp_name + \",\"\n param_dict[temp_name] = value\n CommonDefine.SQL_PARAMETER_INDEX += 1\n\n in_str = in_str.strip(',')\n sql = field_name + \" \" + \\\n self.operator_dict[condition.operator] + \\\n \"(:\" + param_name + \")\"\n elif (condition.operator == CommonDefine.OPERATOR_FIND_IN_SET):\n param_dict[param_name] = condition.value\n sql = self.operator_dict[condition.operator] + \\\n \"(:\" + param_name + \",\" + field_name + \")\"\n else:\n param_dict[param_name] = condition.value\n sql = field_name + \" \" + \\\n self.operator_dict[condition.operator] + \\\n \" :\" + param_name\n\n return {\"sql\": sql, \"param_dict\": param_dict}\n","sub_path":"lingorm/drivers/mysql/where.py","file_name":"where.py","file_ext":"py","file_size_in_byte":4929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"344067628","text":"#!/usr/bin/env python3\n\n# Author: wwong3\n# Date: 2019-Jan-31\n# Purpose: 03-python Grad Grid Homework\n\n\n\"\"\"grid\"\"\"\n\nimport os\nimport sys\n\ndef main():\n num = sys.argv[1:]\n \n\n# Error message with usage\n if len(num) == 0:\n print('Usage: {} NUM'.format(os.path.basename(sys.argv[0])))\n sys.exit(1)\n\n# Error message if more than one arguments\n if len(num)>1:\n print('Usage: {} NUM'.format(os.path.basename(sys.argv[0])))\n sys.exit(1) \n\n# First if statement: Error if num is not between 2 and 9\n# Elif statement: If num is between 2 and 9, will display grid \n num=int(num[0]) #changes num from a list to an integer\n if not 2<= num <10:\n print('NUM ({}) must be between 1 and 9'.format(num))\n sys.exit(1)\n elif 1 < num < 10:\n last_num=num+1 # to include last number through indexing\n for j in range (1,last_num): # iterate through cols\n for i in range(1,last_num): # iterate through rows \n print('{:>3d}'.format(i+num*(j-1)),end='')\n # i+num*(j-1) is pattern for grid output\n # don't use {:>2d) with end=' ' > will give you extra space at end of line\n #{:>3d} is string formatting\n # 3=num of character field, d=decimal, > means right-aligned\n print('') #print new line\n \n exit(0)\nmain()\n","sub_path":"assignments/03-python-grad/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"134722792","text":"from bs4 import BeautifulSoup\nimport requests\n\nurl1 = 'https://webcams.nyctmc.org/multiview2.php'\nr1 = requests.get(url1)\nsoup1 = BeautifulSoup(r1.text, 'html.parser')\n\nhtmlTables = [[\"Manhattan\",\"tableCam\"],[\"Brooklyn\",\"tableCam2\"]]\nfor htmlTable in htmlTables:\n cameraList = soup1.find('table', id=htmlTable[1])\n\n cameraIDs = []\n cameraURLs = []\n cameraNames = []\n boroughName = htmlTable[0]\n\n for i in cameraList('tr'):\n idRow = i.find('input')\n try:\n if 'value' in idRow.attrs:\n cameraIDs.append(idRow.get('value'))\n except:\n pass\n\n for cameraID in cameraIDs: \n url2 = 'https://webcams.nyctmc.org/multiview2.php?listcam=' + cameraID\n r2 = requests.get(url2)\n soup2 = BeautifulSoup(r2.text, 'html.parser')\n cameraURL = soup2.find('img', id=\"repCamView__ct0_imgLink\").get('src')\n cameraURLs.append(cameraURL)\n cameraName = soup2.find('td', {\"class\": \"TitleCam\"}).getText()\n cameraNames.append(cameraName)\n\n for i in range(0,len(cameraIDs)):\n print(cameraNames[i] + \",\" + cameraIDs[i] + \",\" + cameraURLs[i] + \",\" + boroughName)\n","sub_path":"scraper/nycdotcams.py","file_name":"nycdotcams.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"233032055","text":"# Создать список и заполнить его элементами различных типов данных.\r\n# Реализовать скрипт проверки типа данных каждого элемента.\r\n# Использовать функцию type() для проверки типа.\r\n# Элементы списка можно не запрашивать у пользователя, а указать явно, в программе.\r\n\r\nmy_list = [\r\n # целое число (int)\r\n 12345,\r\n\r\n # дробное число (float)\r\n 12345.99,\r\n\r\n # строка (str)\r\n '12345',\r\n 'one two three four five',\r\n\r\n # список (list)\r\n [12345, '12345'],\r\n list('12345'),\r\n\r\n # кортеж (tuple)\r\n (12345, '12345'),\r\n tuple('12345'),\r\n\r\n # множество (set и frozenset)\r\n {1, 2, 3},\r\n frozenset({1, 2, 3}),\r\n set('123'),\r\n frozenset('123'),\r\n\r\n # словарь (dict)\r\n {'one': 1, 'two': 2},\r\n dict(one=1, two=2),\r\n\r\n # булеан (bool)\r\n True,\r\n False,\r\n\r\n # байты (bytes и bytearray)\r\n b'text',\r\n bytes(b'text'),\r\n bytearray(b'text'),\r\n\r\n # NoneType\r\n None,\r\n]\r\n\r\nfor el in my_list:\r\n print(f'Элемент = {el}, тип элемента = {type(el)}')\r\n","sub_path":"2. Lesson_2/les02_1.py","file_name":"les02_1.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"330247433","text":"\"\"\"\nYou are given an m x n integer matrix heights representing the height\nof each unit cell in a continent. The Pacific ocean touches the continent's\nleft and top edges, and the Atlantic ocean touches the continent's right and bottom edges.\n\nWater can only flow in four directions: up, down, left, and right. Water\nflows from a cell to an adjacent one with an equal or lower height.\n\nReturn a list of grid coordinates where water can flow to both the\nPacific and Atlantic oceans.\n\nStatus: Incomplete\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def pacificAtlantic(self, heights: List[List[int]]) -> List[List[int]]:\n n_rows, n_cols = len(heights), len(heights[0])\n visit = [[\"unvisited\"] * n_cols for _ in range(n_rows)]\n atlantic = [[False] * n_cols for _ in range(n_rows - 1)] + [[True] * n_cols]\n pacific = [[True] * n_cols] + [[False] * n_cols for _ in range(n_rows - 1)]\n res = []\n\n for row in range(n_rows):\n pacific[row][0] = atlantic[row][-1] = True\n\n print(atlantic)\n print(pacific)\n\n def get_valid_neighbours(row, col):\n res = [(row - 1, col), (row + 1, col), (row, col - 1), (row, col + 1)]\n res = [(row, col) for row, col in res\n if 0 <= row < n_rows and 0 <= col < n_cols] # and visit[row][col] == \"unvisited\"\n return [(r, c) for r, c in res if heights[r][c] <= heights[row][col]]\n\n def dfs(row, col):\n print(row, col, atlantic)\n if visit[row][col] == \"visited\":\n return\n visit[row][col] = \"visiting\"\n if not (atlantic[row][col] and pacific[row][col]):\n neighbours = get_valid_neighbours(row, col)\n print(neighbours)\n for neighbour_row, neighbour_col in neighbours:\n dfs(neighbour_row, neighbour_col)\n if atlantic[neighbour_row][neighbour_col]:\n atlantic[row][col] = True\n if pacific[neighbour_row][neighbour_col]:\n pacific[row][col] = True\n visit[row][col] = \"visited\"\n if atlantic[row][col] and pacific[row][col]:\n res.append([row,col])\n\n for row in range(n_rows):\n for col in range(n_cols):\n if visit[row][col] == \"unvisited\":\n dfs(row, col)\n print(\"---\")\n\n print(atlantic)\n print(pacific)\n\n return res\n\n\ns = Solution()\nprint(s.pacificAtlantic([[1,2,2,3,5],[3,2,3,4,4],[2,4,5,3,1],[6,7,1,4,5],[5,1,1,2,4]]))\n# print(s.pacificAtlantic([[2, 1], [1, 2]]))\n","sub_path":"Pacific Atlantic Water Flow.py","file_name":"Pacific Atlantic Water Flow.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"298355270","text":"import csv\nfrom pygal.maps.world import COUNTRIES, World\nfrom pygal.style import RotateStyle as RS, LightColorizedStyle as LCS\n\nfrom country_codes import get_country_code\n\nfilename='inflation_csv.csv'\nwith open(filename) as f:\n\tdata=csv.reader(f)\n\theader_row=next(data)\n\theader_row=next(data)\n\theader_row=next(data)\n\theader_row=next(data)\n\theader_row=next(data)\n#countries={}\n\tcountries_list={}\n\t\n\tfor countries in data:\n\t\tcountry=countries[0]\n\t\tvalue=countries[51]\n\t\tprint(value)\n\t\tcode=get_country_code(country)\n\t\tif code:\n\t\t\tcountries_list[code]=value\n\n# Group the countries into 3 population levels.\ncc_pops_1, cc_pops_2, cc_pops_3 = {},{},{}\nfor cc, pop in countries_list.items():\n\tif pop < 1:\n\t\tcc_pops_1[cc] = pop\n\telif pop < 10:\n\t\tcc_pops_2[cc] = pop\n\telse:\n\t\tcc_pops_3[cc] = pop\n\t\t\n# Styling world maps in pygal\nwm_style = RS('#108080',base_style=LCS)\nwm= World(style=wm_style)\n\n# See how many countries are in each level.\nprint(len(cc_pops_1), len(cc_pops_2), len(cc_pops_3))\n\n#wm=World()\nwm.title='World GDP in 2016, by Country'\nwm.add('0-10m', cc_pops_1)\nwm.add('10m-1b', cc_pops_2)\nwm.add('>1bn', cc_pops_3)\n\nwm.render_to_file('world_gdp_2016.svg')\n","sub_path":"chapter16/inflation.py","file_name":"inflation.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"403551638","text":"'''PSR Exercise sheet 5'''\nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.optimize as optimize\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom scipy.interpolate import spline\nfrom sklearn.decomposition import PCA\nimport random \n\n#comment\n# known dimensions to load the data\nM = 50\nm = 677970\n\ndef load_data(fname, skipset_columns = {}):\n print (\"Data loading...\")\n # load from folder\n raw_array = np.fromfile(fname, dtype=np.float32)\n D = [[ raw_array[item_idx] for item_idx in range(row_idx*M, (row_idx+1)*M) if item_idx not in {x+row_idx*M for x in skipset_columns}] for row_idx in range(m) if row_idx%100==0]\n print(\"Data loaded.\")\n return D\n\ndef plotPCA2d(data):\n plt.title(\"PCA\")\n plt.xlabel(\"First dim of pca\")\n plt.ylabel(\"Second dim of pca\")\n s = [2 for n in range(len(data))]\n plt.scatter([val[0] for val in data], [ val[1] for val in data], s=s)\n plt.grid(True)\n plt.savefig('pca_result2d.png')\n plt.show()\n\ndef plotPCA3d(data):\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n s = [1 for n in range(len(data))]\n ax.scatter([val[0] for val in data], [ val[1] for val in data], [ val[2] for val in data], s=s)\n plt.grid(True)\n plt.savefig('pca_result3d.png')\n plt.show()\n\ndef GMM(data, numberOfClusters):\n init()\n # expectation()\n # maximization()\n\n # return means, covarianceMatrices, priors\n\ndef random_points_mean(data):\n #choose 5 random values\n #ran_val = random.sample(data, 5)\n indices = [x for x in range(0, len(data))]\n ran_ind = random.sample(indices, 5)\n #print(\"random indices\", ran_ind)\n ran_val = []\n for i in ran_ind:\n ran_val.append(data[i])\n #print(\"random values\", ran_val)\n sum_x = 0\n sum_y = 0\n #print(\"type\", type(ran_val[0][1]))\n for i in range(0, 5):\n sum_x += ran_val[i][0]\n sum_y += ran_val[i][1]\n\n print(ran_val[i][0])\n mean_x = sum_x / 5\n mean_y = sum_y / 5\n mean = [mean_x, mean_y]\n #print(\"our mean\", mean_x, mean_y)\n return mean\n\ndef init(data, numberOfClusters):\n # means of 5 random points\n init_means = []\n\n #calculate means by 5 random points for each potential cluster:\n for i in range(0, numberOfClusters):\n init_means.append(random_points_mean(data))\n print(\"means:\", init_means)\n\n #Initializing the covariance matrices to identity matrices.\n covarianceMatrices = []\n for i in range(0, numberOfClusters):\n covarianceMatrices.append([[1,0],[0,1]])\n #print (first) two initial covariance matrices:\n print(\"first covariance matrix: \", covarianceMatrices[0])\n print(\"second covariance matrix: \", covarianceMatrices[1])\n\n\n priors = []\n\n # init step\n\n mixture_components = [1/numberOfClusters for index in range(numberOfClusters)]\n\n\n#ex5\ndef rmse(vec1, vec2):\n N = len(vec1)\n distance = 0\n if(len(vec1) == len(vec2)):\n for idx in range(N):\n distance+=(vec1[idx]-vec2[idx])*(vec1[idx]-vec2[idx])\n distance/=N\n return math.sqrt(distance)\n\n# ex6\ndef associate(data, initial_means):\n result_vector = []\n clusters_number = len(initial_means)\n if(clusters_number>0):\n for idx, value in enumerate(data):\n nearest_cluster_index = len(initial_means)-1\n max_value = np.inf\n for idx_mean, mean_vector in enumerate(initial_means):\n current_distance = rmse(mean_vector, data[idx] if type(data[idx]) is list else [data[idx]])\n if max_value >= current_distance:\n max_value = current_distance\n nearest_cluster_index = idx_mean\n result_vector.append(nearest_cluster_index)\n\n return result_vector\n\n \ndef add_vec(vec1, vec2):\n if type(vec1) is not list:\n vec1 = [vec1]\n if type(vec2) is not list:\n vec2 = [vec2] \n print (vec1, vec2)\n return [vec2[idx]+vec1[idx] for idx in range(len(vec1))]\n\n#7-8\ndef compute_means(corpus, association, k):\n cluster_centroids = [ [0] for row_idx in range(k)] # k*N matrix, k - number of clusters, N - data dim, e.g. 49\n number_of_data_points_per_cluster = [0 for x in range(k)]\n for idx, current_data_point in enumerate(corpus):\n number_of_data_points_per_cluster[association[idx]]+=1\n cluster_centroids[association[idx]] += current_data_point\n cluster_centroids = [[val/number_of_data_points_per_cluster[j] for val in cluster_centroids[j]] for j in range(k)]\n return cluster_centroids\n\ndef recompute(data_matrix, init_clusters_means):\n print(\"init_clusters_means\", init_clusters_means)\n old_association = associate(data_matrix, init_clusters_means)\n new_means = compute_means(data_matrix, old_association, len(init_clusters_means))\n print(\"new means\", new_means)\n new_association = associate(data_matrix, new_means)\n number_of_point_that_changed_clusters = 0\n for idx in range(len(new_association)):\n if new_association[idx]!=old_association[idx]:\n number_of_point_that_changed_clusters+=1\n return number_of_point_that_changed_clusters, new_means, new_association\n\n\ndef kmeans(data_matrix, init_clusters_means):\n number_of_point_that_changed_clusters, new_means, new_association = recompute(data_matrix, init_clusters_means)\n i = 0\n print(\"Amount of points that changed clusters: \", number_of_point_that_changed_clusters)\n while number_of_point_that_changed_clusters!=0:\n i+=1\n print (\"Iteration - \", i)\n number_of_point_that_changed_clusters, new_means, new_association = recompute(data_matrix, new_means)\n print(\"Amount of points that changed clusters: \", number_of_point_that_changed_clusters)\n return new_means, new_association\n\ndef get_mean_and_centered(X):\n mean_matrix = []\n set_vers_matrix_centered = X\n for index_av in range(len(X[0])):\n mean_matrix.append(np.mean(X[:,index_av])) \n for index_experiment in range(len(X)):\n set_vers_matrix_centered[index_experiment][index_av] = X[index_experiment][index_av]-mean_matrix[index_av]\n return mean_matrix, set_vers_matrix_centered\n\n\ncorpus = load_data(\"corpus\", {0})\nprint(len(corpus))\n# index_min = corpus.index(min(corpus))\n# index_max = corpus.index(max(corpus))\n\npca2 = PCA(n_components=2)\npca_result2 = pca2.fit_transform(corpus)\nprint(\"PCA result: \\n\", pca_result2)\n#print(\"PCA data type \", type(pca_result2))\n\npca3 = PCA(n_components=3)\npca_result3 = pca3.fit_transform(corpus)\nprint(\"PCA result: \\n\", pca_result3)\n# first_dim = [val[0] for val in pca_result]\n# print(\"First dimension of PCA result: \\n\", first_dim)\nplotPCA2d(pca_result2)\nplotPCA3d(pca_result3)\ninit(pca_result2, 2)\n# # trying to get most distinct points as initial values for clusters\n# init_clusters = [[-1.2], [1.0]] \n\n# print(\"Clusters initialized: \", init_clusters)\n# new_means, last_association = kmeans(first_dim, init_clusters)\n# print(\"new_means: \", new_means)\n# for cluster_idx in range(len(init_clusters)):\n# print(\"#12: mean vector \", cluster_idx, \" \\n\", new_means[cluster_idx])\n\n# #11\n# plotting(first_dim, new_means, init_clusters)\n\n# #computing wigths in a way as computing probabilities of point to be in each cluster\n# # for two clusters if K1 - amount of point from cluster1, and K2- cluster2,\n# # so weight(cluster1) = K1/(K1+K2) and weight(cluster2) = K2/(K1+K2),\n# # in common case we have weight(clusterN) = Kn/amount(data_point)\n\n# mean_matrix, centered = get_mean_and_centered(first_dim)\n\n# print('Mean vector: \\n',mean_matrix)\n# print('Centered matrix: \\n',centered)\n# print('Covariance matrix: \\n', np.cov(first_dim.T))\n# cluster_weights = [0 for i in range(len(init_clusters))] # initialize with zero weights\n# for idx in range(len(last_association)):\n# cluster_weights[last_association[idx]]+=1\n# cluster_weights = [value/len(first_dim) for value in cluster_weights]\n# print(\"weights of clusters: \", cluster_weights)\n","sub_path":"ex6/source/PSR6.py","file_name":"PSR6.py","file_ext":"py","file_size_in_byte":7931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"460073198","text":"#pvprograms.weebly.com\n#Sums in Loop\n\ndef main():\n test = int(input()) #number of test cases\n nums = [] #List of lists\n for i in range(test):\n a = input()\n #split the string - separated with space\n a = a.split(' ')\n #turn them to int first before appending to the main lists\n a[0] = int(a[0])\n a[1] = int(a[1])\n #append a to the nums list\n nums.append(a)\n #print the sum of the sub list - a\n for a in nums:\n print(sum(a), end=\" \")\n\n#call main function\nmain()\n","sub_path":"PY/Sums in Loop.py","file_name":"Sums in Loop.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"347693283","text":"import json\nimport boto3\nfrom botocore.exceptions import ClientError\nfrom ex import exceptions\n\ndynamodb = boto3.resource(\"dynamodb\")\ntable = dynamodb.Table(\"DemoServerless\")\n\ndef get(codigo):\n\tprint(codigo)\n\t\n\ttry:\n\t\tresponse = table.get_item(\n\t\t\tKey = {\n\t\t\t\t\"hk\": \"EMPLEADO\",\n\t\t\t\t\"sk\": codigo\n\t\t\t}\n\t\t)\n\t\tprint(response)\n\n\texcept ClientError as e:\n\t\traise exceptions.InternalServerError(e.response['Error']['Message'])\n\n\telse:\n\t\tif \"Item\" not in response:\n\t\t\traise exceptions.NotFound(\"No existe el código '{}'\".format(codigo))\n\n\t\tregistro = response['Item']\n\t\tregistro[\"codigo\"] = registro[\"sk\"]\n\t\tdel registro[\"hk\"]\n\t\tdel registro[\"sk\"]\n\t\tdel registro[\"busqueda\"]\n\t\t\n\t\treturn registro\n","sub_path":"api/get.py","file_name":"get.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"551965830","text":"try:\r\n import argparse\r\nexcept ImportError:\r\n print(\"Please check if module 'argparse' is installed\")\r\n quit()\r\n\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('--tab', type=argparse.FileType('r'), required=True)\r\nparser.add_argument('--kegg', type=str, required=True, help=\"The ID of the analyzed pathway in the KEGG.\\n\"\r\n \"For instance: ko04310 or ko04350\")\r\nparser.add_argument('--out', type=str, required=True, help=\"Prefix for output files\")\r\nargs = parser.parse_args()\r\n\r\n\r\ndef table_parsing(tab, contig_dict):\r\n header = tab.readline()\r\n for line in tab:\r\n description = line.strip().split(\"\\t\")\r\n contig, pathway, sites_significant, sites_head_cluster, sites_tail_cluster, head_significant, head_cluster, \\\r\n tail_significant, tail_cluster = description[0], description[12], description[23], description[24], \\\r\n description[25], description[26], description[27], description[28], \\\r\n description[29]\r\n contig_dict[contig] = {\"pathway\": pathway, \"sites_significant\": sites_significant,\r\n \"sites_head_cluster\": sites_head_cluster, \"sites_tail_cluster\": sites_tail_cluster,\r\n \"head_significant\": head_significant, \"head_cluster\": head_cluster,\r\n \"tail_significant\": tail_significant, \"tail_cluster\": tail_cluster}\r\n\r\n\r\ndef append_contig_to_significant(kegg_dict, contig, values, values_tag, kegg_tag):\r\n if values[values_tag] != '-':\r\n kegg_dict[kegg_tag].append(contig)\r\n\r\n\r\ndef append_contig_to_cluster(kegg_dict, contig, values, values_tag):\r\n if values[values_tag] != '-':\r\n if values[values_tag] not in kegg_dict.keys():\r\n kegg_dict[values[values_tag]] = []\r\n kegg_dict[values[values_tag]].append(contig)\r\n\r\n\r\ndef kegg_summary(contig_dict, kegg, kegg_dict):\r\n for contig, values in contig_dict.items():\r\n if kegg in values[\"pathway\"].split(\",\"):\r\n kegg_dict[\"total\"].append(contig)\r\n append_contig_to_significant(kegg_dict, contig, values, \"sites_significant\", \"sites\")\r\n append_contig_to_significant(kegg_dict, contig, values, \"head_significant\", \"head\")\r\n append_contig_to_significant(kegg_dict, contig, values, \"tail_significant\", \"tail\")\r\n append_contig_to_cluster(kegg_dict, contig, values, \"sites_head_cluster\")\r\n append_contig_to_cluster(kegg_dict, contig, values, \"sites_tail_cluster\")\r\n append_contig_to_cluster(kegg_dict, contig, values, \"head_cluster\")\r\n append_contig_to_cluster(kegg_dict, contig, values, \"tail_cluster\")\r\n\r\n\r\ndef output_writing(out, kegg, kegg_dict):\r\n cluster_keys = [cluster for cluster in kegg_dict.keys() if cluster not in [\"total\", \"sites\", \"head\", \"tail\"]]\r\n\r\n with open(\"{out}.{kegg}_summary.tsv\".format(out=out, kegg=kegg), 'a') as output:\r\n output.write(\"### In total, {count} sequences are assigned to {kegg} pathway\\n\"\r\n \"### Among them:\\n\"\r\n \"### {sites} were previously classified as sites-significant:\\t{sites_contigs}\\n\"\r\n \"### {head} were previously classified as head-significant:\\t{head_contigs}\\n\"\r\n \"### {tail} were previously classified as tail-significant:\\t{tail_contigs}\\n\"\r\n \"Cluster\\tNumber of 'pathway'-contigs in cluster\\tContigs (comma separated)\\n\".format(\r\n count=len(set(kegg_dict[\"total\"])), kegg=kegg,\r\n sites=len(set(kegg_dict[\"sites\"])), sites_contigs=\",\".join(set(kegg_dict[\"sites\"])),\r\n head=len(set(kegg_dict[\"head\"])), head_contigs=\",\".join(set(kegg_dict[\"head\"])),\r\n tail=len(set(kegg_dict[\"tail\"])), tail_contigs=\",\".join(set(kegg_dict[\"tail\"]))\r\n ))\r\n for cluster in cluster_keys:\r\n output.write(\"{cluster}\\t{length}\\t{contigs}\\n\".format(cluster=cluster, length=len(set(kegg_dict[cluster])),\r\n contigs=\",\".join(set(kegg_dict[cluster]))))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n contig_dict, kegg_dict = {}, {\"total\": [], \"sites\": [], \"head\": [], \"tail\": []}\r\n print(\"***** Input table parsing *****\")\r\n table_parsing(args.tab, contig_dict)\r\n print(\"***** Search for sequences related to {kegg} *****\".format(kegg=args.kegg))\r\n kegg_summary(contig_dict, args.kegg, kegg_dict)\r\n print(\"***** Output file writing *****\")\r\n output_writing(args.out, args.kegg, kegg_dict)","sub_path":"Pdum_KEGG_analysis.py","file_name":"Pdum_KEGG_analysis.py","file_ext":"py","file_size_in_byte":4674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"255867402","text":"from aws_resources.dynamo import build_update_expression, build_update_attributes_dictionary, table\nfrom boto3.dynamodb.conditions import Key\nimport uuid\n\ndef resolve_hang(obj, info, id):\n hang = table().query(\n KeyConditionExpression=Key('id').eq(id)\n )['Items'][0]\n return hang\n\ndef resolve_hangs(obj, info):\n ids = obj['hangs']\n return map(lambda id: resolve_hang(obj, info, id), ids)\n\ndef create_hang(obj, info, hang):\n id = str(uuid.uuid4())\n hang['id'] = id\n table().put_item(Item=hang)\n return {\n 'hang': hang,\n 'message': 'success',\n 'code': 200,\n 'success': True\n }\n\ndef update_hang(obj, info, hang):\n attributes_to_update = build_update_attributes_dictionary(hang)\n update_expression = build_update_expression(hang)\n table().update_item(\n Key={\n 'id': hang['id']\n },\n UpdateExpression=update_expression,\n ExpressionAttributeValues=attributes_to_update,\n )\n updated_hang = table().query(\n KeyConditionExpression=Key('id').eq(hang['id'])\n )['Items'][0]\n\n return {\n 'hang': updated_hang,\n 'message': 'success',\n 'code': 200,\n 'success': True\n }","sub_path":"features/Hangs/hang.py","file_name":"hang.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"2681962","text":"REQUEST_HEADERS = {\n 'sec-fetch-dest': 'document',\n 'sec-fetch-mode': 'navigate',\n 'sec-fetch-site': 'cross-site',\n 'sec-fetch-user': '?1',\n 'upgrade-insecure-requests': '1',\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36',\n}\n\nURL = 'https://overclockers.ru/lab?offset=-180&max=200'\n\nMAX_ARTICLES_ON_PAGE = 200\n\nRESULT_FILENAME = 'result.xlsx'\nNEW_SHEET = 'Нова сторінка'\nCOLUMN_SIZE = (150,\n 100,\n 20,\n 20,\n 20)\nTITLE_COLUMNS = ('Посилання',\n 'Назва',\n 'Автор',\n 'Дата створення',\n 'Категорія')\n\nDB_FILENAME = 'db.sqlite'\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"235762876","text":"import sys\nimport tkinter as tk\nimport sqlite3\nfrom pathlib import Path\nfrom random import randrange\nimport time\n\nfrom logging import getLogger, StreamHandler, DEBUG\n\nlogger = getLogger(__name__)\nhandler = StreamHandler()\nhandler.setLevel(DEBUG)\nlogger.setLevel(DEBUG)\nlogger.addHandler(handler)\nlogger.propagate = False\n\nimport sqlite_manager\n\ndatabase_path = str(Path(__file__).parent.parent / Path(\"database/alias_snippets\"))\nsm = sqlite_manager.SqliteManager(db_path=database_path)\n\nclass TkinterManager():\n def __init__(self, title):\n self.title = title\n\n def remove_empty(self, text):\n return text.replace(\" \", \"\").replace(\" \", \"\").replace(\"\\t\", \"\").replace(\"\\'\", \"\\\"\")\n\n def address_specific_letter(self, text):\n return text.replace(\"\\'\", \"\\\"\")\n\n def insert_alias_snippet(self):\n try:\n alias = self.remove_empty(entry1.get())\n snippet = self.address_specific_letter(entry2.get())\n\n query = \"\"\"\n insert into alias_snippets(alias_name,snippet) values('%s','%s')\n \"\"\" % (alias, snippet)\n\n messages = []\n loop_num = 1\n for i in range(loop_num):\n res_query = sm.execute_query(query,loop_num=5,sleep_time=0.2)\n messages.append(str(res_query))\n\n #sm.display_message(message=\"\\n\".join(messages))\n if res_query is None:\n sm.display_message(message=\"Failed\")\n else:\n sm.display_message(message=\"Ok\")\n \n # if res_query is not None:\n # sm.display_message(message=f\"Had success in Registering alias {alias}\")\n # else:\n # sm.display_message(message=f\"Failed to register {alias}\")\n # sm.display_message(message=f\"{res_query}\")\n\n except Exception as e:\n sm.display_message(message=e)\n finally:\n root.destroy()\n\n def register_snippet(self):\n global entry1, entry2, root\n root = tk.Tk()\n root.title(\"Productive Alias-Snippets\")\n\n w = 300\n h = 200\n\n ws = root.winfo_screenwidth()\n hs = root.winfo_screenheight()\n x = (ws / 2) - (w / 2)\n y = (hs / 2) - (h / 2) - 200\n\n root.geometry('%dx%d+%d+%d' % (w, h, x, y))\n\n label1 = tk.Label(root, text=self.title, font=(\"\", 16), height=2)\n label1.pack(fill=\"x\")\n frame1 = tk.Frame(root, pady=10)\n frame1.pack()\n label2 = tk.Label(frame1, font=(\"\", 14), text=\" alias \")\n label2.pack(side=\"left\")\n entry1 = tk.Entry(frame1, font=(\"\", 14), justify=\"center\", width=15)\n entry1.pack(side=\"left\")\n frame2 = tk.Frame(root, pady=10)\n frame2.pack()\n label3 = tk.Label(frame2, font=(\"\", 14), text=\"snippet\")\n label3.pack(side=\"left\")\n entry2 = tk.Entry(frame2, font=(\"\", 14), justify=\"center\", width=15)\n entry2.pack(side=\"left\")\n button4 = tk.Button(root, text=\"Register\", font=(\"\", 16), width=20, bg=\"gray\",\n command=self.insert_alias_snippet)\n button4.pack()\n root.mainloop()\n\n def update_database(self):\n\n try:\n alias = self.remove_empty(entry1.get())\n snippet = self.address_specific_letter(entry2.get())\n\n message = list()\n query = \"\"\"\n update alias_snippets set deleted_at = CURRENT_TIMESTAMP where alias_name = '%s'\n \"\"\" % alias\n res = sm.execute_query(query, is_update=True)\n logger.debug(res)\n\n deleted_alias = alias + str(randrange(9999999999999999))\n query = \"\"\"\n update alias_snippets set alias_name = '%s' where alias_name = '%s'\n \"\"\" % (deleted_alias, alias)\n res = sm.execute_query(query, is_update=True)\n logger.debug(res)\n\n query = \"\"\"\n insert into alias_snippets(alias_name,snippet) values('%s','%s')\n \"\"\" % (alias, snippet)\n\n res = sm.execute_query(query, is_update=False)\n sm.display_message(message=res)\n except Exception as e:\n sm.display_message(message=e)\n finally:\n root.destroy()\n\n def update_snippet(self):\n global entry1, entry2, root\n root = tk.Tk()\n root.title(\"Productive Alias-Snippets\")\n\n w = 300\n h = 200\n\n ws = root.winfo_screenwidth()\n hs = root.winfo_screenheight()\n x = (ws / 2) - (w / 2)\n y = (hs / 2) - (h / 2) - 200\n\n root.geometry('%dx%d+%d+%d' % (w, h, x, y))\n\n label1 = tk.Label(root, text=self.title, font=(\"\", 16), height=2)\n label1.pack(fill=\"x\")\n frame1 = tk.Frame(root, pady=10)\n frame1.pack()\n label2 = tk.Label(frame1, font=(\"\", 14), text=\" alias \")\n label2.pack(side=\"left\")\n entry1 = tk.Entry(frame1, font=(\"\", 14), justify=\"center\", width=15)\n entry1.pack(side=\"left\")\n frame2 = tk.Frame(root, pady=10)\n frame2.pack()\n label3 = tk.Label(frame2, font=(\"\", 14), text=\"snippet\")\n label3.pack(side=\"left\")\n entry2 = tk.Entry(frame2, font=(\"\", 14), justify=\"center\", width=15)\n entry2.pack(side=\"left\")\n button4 = tk.Button(root, text=\"Register\", font=(\"\", 16), width=20, bg=\"gray\",\n command=self.update_database)\n button4.pack()\n root.mainloop()\n","sub_path":"modules/tkinter_manager.py","file_name":"tkinter_manager.py","file_ext":"py","file_size_in_byte":5450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"125490143","text":"from flask import Flask, Blueprint, render_template, request, flash, redirect, url_for\nfrom flask_login import login_required, current_user\nimport os, datetime, logging, json\n\nimport utils\nfrom models import Problem, Submission\nfrom exts import db\n\nfrom judger import judge, manage\n\nlog = logging.getLogger('Judger')\n\nsubmit_page = Blueprint('submit_page',\n\t\t\t\t\t\t__name__,\n\t\t\t\t\t\ttemplate_folder=os.path.join(utils.cur_path(__file__), 'templates'))\n\n@submit_page.route('/submit', methods=['GET','POST'])\n@login_required\ndef submit_handle():\n\tif request.method == 'POST':\n\t\tpid = int(request.form['probID'])\n\t\tlang = request.form['lang']\n\t\tcode = request.form['code']\n\n\t\tprob = Problem.query.get(pid)\n\t\tif prob:\n\t\t\tinfo = json.loads(prob.info)\n\t\t\tnum_td = int(info['td_num'])\n\n\t\t\tdate_time = datetime.datetime.now()\n\t\t\tsub = Submission(result='Wait'\n\t\t\t\t\t, resTime=-1.0, resMem=-1.0\n\t\t\t\t\t, code=code, lang=lang, rank=-1, time=date_time\n\t\t\t\t\t, account=current_user, problem=prob)\n\t\t\tdb.session.add(sub)\n\t\t\tdb.session.commit()\n\n\t\t\tlog.debug('Add problem pid={} subid={}'.format(prob.problem_id, sub.submit_id))\n\n\t\t\tmanage.add_judger(sub.submit_id, prob.problem_id, judge.JUDGE_CPP, code, 3.0, 65536, num_td)\n\n\t\treturn redirect(url_for('submissions_page.submissions_handle'))\n\t# pid\n\tpid = ''\n\tif 'pid' in request.args:\n\t\tpid = request.args['pid']\n\t# not if\n\treturn render_template('submit.html', problem_id=pid)\n","sub_path":"page/submit/submit.py","file_name":"submit.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"594873954","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/taurus/core/tango/util/formatter.py\n# Compiled at: 2019-08-19 15:09:29\n__all__ = [\n 'tangoFormatter']\nfrom taurus.core.units import Quantity\n\ndef tangoFormatter(dtype=None, **kwargs):\n \"\"\"\n The tango formatter callable. Returns a format string based on\n the `format` Tango Attribute configuration (Display.Format in Tango DB)\n\n :param dtype: (type) type of the value object\n :param kwargs: other keyword arguments (ignored)\n\n :return: the string formatting\n \"\"\"\n if dtype is Quantity:\n fmt = '{:~{bc.modelObj.format_spec}}'\n else:\n fmt = '{:{bc.modelObj.format_spec}}'\n return fmt","sub_path":"pycfiles/taurus-4.6.1-py2.7/formatter.py","file_name":"formatter.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"69205884","text":"#!/usr/bin/python\n\nfrom scipy.spatial import KDTree\nimport matplotlib.pyplot as plt\nplt.rc(\"savefig\", dpi=150)\nimport numpy as np\n\npoints = np.array([[1,1],[1,-1],[-1,-1],[2,-2]])\ntree = KDTree(points)\nx = np.linspace(-2.5, 2.5, 100)\ny = np.linspace(-2.5, 2.5, 100)\nxx, yy = np.meshgrid(x, y)\nxy = np.c_[xx.ravel(), yy.ravel()]\nplt.pcolor(x, y, tree.query(xy)[1].reshape(100, 100))\nplt.plot(points[:,0], points[:,1], 'ko')\nplt.xlabel(\"X\")\nplt.ylabel(\"Y\")\n#plt.savefig(\"3b_Voronoi.png\")\n#plt.savefig(\"3b_Voronoi.ps\")\nplt.show()\n","sub_path":"CS6350_Machine-Learning/HW1/images/3a_voronoi.py","file_name":"3a_voronoi.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"423315526","text":"# pylint: disable=arguments-differ, redefined-builtin, missing-docstring, no-member, invalid-name, line-too-long, not-callable\nimport torch\n\nfrom e3nn import rs\nfrom e3nn.non_linearities import GatedBlock\nfrom e3nn.non_linearities.rescaled_act import swish, sigmoid\nfrom e3nn.linear import Linear\n\n\nclass DepthwiseConvolution(torch.nn.Module):\n def __init__(self, Rs_in, Rs_out, Rs_mid1, Rs_mid2, groups, convolution, linear=Linear, scalar_activation=swish, gate_activation=sigmoid):\n super().__init__()\n\n act_in = GatedBlock(groups * Rs_mid1, scalar_activation, gate_activation)\n self.lin_in = linear(Rs_in, act_in.Rs_in)\n self.act_in = act_in\n\n act_mid = GatedBlock(Rs_mid2, scalar_activation, gate_activation)\n self.conv = convolution(Rs_mid1, act_mid.Rs_in)\n self.act_mid = act_mid\n\n act_out = GatedBlock(Rs_out, scalar_activation, gate_activation)\n self.lin_out = linear(groups * Rs_mid2, act_out.Rs_in)\n self.act_out = act_out\n\n self.groups = groups\n\n def forward(self, features, *args, **kwargs):\n \"\"\"\n :param features: tensor [..., point, channel]\n :return: tensor [..., point, channel]\n \"\"\"\n features = self.lin_in(features)\n features = self.act_in(features)\n\n features = self.conv(features, *args, **kwargs, groups=self.groups)\n features = self.act_mid(features.reshape(-1, rs.dim(self.act_mid.Rs_in))).reshape(*features.shape[:-1], -1)\n\n features = self.lin_out(features)\n features = self.act_out(features)\n\n return features\n","sub_path":"e3nn/point/depthwise.py","file_name":"depthwise.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"519476528","text":"# -*- coding: utf-8 -*-\n##\n##\n## This file is part of Indico.\n## Copyright (C) 2002 - 2013 European Organization for Nuclear Research (CERN).\n##\n## Indico is free software; you can redistribute it and/or\n## modify it under the terms of the GNU General Public License as\n## published by the Free Software Foundation; either version 3 of the\n## License, or (at your option) any later version.\n##\n## Indico is distributed in the hope that it will be useful, but\n## WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n## General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License\n## along with Indico;if not, see .\n\nfrom MaKaC.conference import ConferenceHolder\nfrom indico.ext.search.repozer.repozeIndexer import RepozeCatalog\nfrom indico.ext.search.repozer.options import confCatalog, contribCatalog, matCatalog\n\nfrom datetime import datetime\nimport time\nfrom pytz import timezone\nimport MaKaC.common.info as info\n\nfrom repoze.catalog.query import *\n\n\nclass RepozerQueryManager(): \n \n def __init__(self, params):\n self.query = None\n self.params = params\n \n\n\n def getQuery(self): \n if not self.params:\n return \n self.checkParams() \n return self.query\n\n\n def setQuery(self, query):\n self.query = query\n \n\n def addQuery(self, elem):\n if not self.query:\n self.query = elem\n else:\n self.query = self.query & elem\n return\n \n \n def getResults(self, query=None):\n res = []\n params = self.params\n\n if params.get('id',None):\n event = ch.getById(params['id'])\n res.append(event)\n return 1, res\n \n if not query:\n query = self.getQuery() \n \n if not query:\n return 0, []\n \n collections = params.get('collections', 'Conference')\n rc = RepozeCatalog()\n if collections == 'Material':\n rc = RepozeCatalog(matCatalog)\n if collections == 'Contribution':\n rc = RepozeCatalog(contribCatalog)\n \n catalog = rc.catalog \n ch = ConferenceHolder() \n desc = params.get('desc',False) \n sort_field = params.get('sort_field','startDate') \n \n numdocs, results = catalog.query(query, sort_index=sort_field, reverse=desc, limit=params.get('limit',5000)) \n results = [catalog.document_map.address_for_docid(result) for result in results]\n \n if params.get('onlyFids', False):\n return numdocs, results\n else:\n for obj in results:\n try:\n confId = str(obj).split(\"|\")[0]\n event = ch.getById(confId)\n res.append(event)\n except:\n pass \n \n return numdocs, res\n \n \n def checkParams(self): \n params = self.params \n #print params\n if params.has_key('text'):\n text = params.get('text', None)\n \n # Ictp: custom case\n if text.lower().startswith('smr'):\n self.setQuery( Any('keywords', text.replace(\" \", \"\")) )\n return\n \n # WHERE: specify where to search \n where = params.get('where', 'title_description')\n if where == 'title_description':\n self.addQuery( Eq('title', text.decode('utf8')) | Eq('description', text.decode('utf8')) )\n\n if where == 'title':\n self.addQuery( Eq('title', text.decode('utf8')) )\n \n if where == 'roles':\n val = unicode(text, \"UTF-8\").encode('ascii', 'xmlcharrefreplace')\n self.addQuery( Contains('rolesVals', val) )\n \n if where == 'persons':\n self.addQuery( Contains('persons', text.decode('utf8')) )\n\n if where == 'all':\n val = unicode(text, \"UTF-8\").encode('ascii', 'xmlcharrefreplace')\n textDecoded = text.decode('utf8')\n self.addQuery( Eq('description', textDecoded) | Eq('title', textDecoded) | Contains('persons', text) | Contains('rolesVals', val) )\n \n\n # START_DATE, END_DATE, STARTED\n startDate_ts = None\n endDate_ts = None\n datesAvailable = False\n localTimezone = info.HelperMaKaCInfo.getMaKaCInfoInstance().getTimezone()\n \n if params.has_key('start_date'):\n sdate = params['start_date'].split('/')\n if 1:\n #try:\n startDate_ts = timezone(localTimezone).localize(datetime(int(sdate[0]), int(sdate[1]), int(sdate[2]), 0, 0))\n datesAvailable = True\n #except:\n # self.setQuery(None)\n # return\n \n if params.has_key('end_date'):\n edate = params['end_date'].split('/')\n try:\n endDate_ts = timezone(localTimezone).localize(datetime(int(edate[0]), int(edate[1]), int(edate[2]), 23, 59))\n datesAvailable = True\n except:\n self.setQuery(None)\n return \n \n if params.has_key('started'):\n ssdate = params['started'].split('/')\n try:\n started_ts = timezone(localTimezone).localize(datetime(int(ssdate[0]), int(ssdate[1]), int(ssdate[2]), 0, 0))\n self.addQuery( Ge('startDate',started_ts) )\n except:\n self.setQuery(None)\n return\n \n elif params.has_key('today'):\n if params['today'] == '':\n td = time.strftime(\"%Y/%m/%d\").split('/')\n else: \n td = params['today'].split('/')\n try:\n today_ts = timezone(localTimezone).localize(datetime(int(td[0]), int(td[1]), int(td[2]), 23, 59))\n end_today_ts = timezone(localTimezone).localize(datetime(int(td[0]), int(td[1]), int(td[2]), 00, 00))\n except:\n self.setQuery(None)\n return \n self.addQuery( Le('startDate',today_ts) & Ge('endDate',end_today_ts) )\n \n elif params.has_key('todaybeyond'):\n if params['todaybeyond'] == '' or params['todaybeyond'] == '1':\n td = time.strftime(\"%Y/%m/%d\").split('/')\n else: \n td = params['todaybeyond'].split('/')\n try:\n today_ts = timezone(localTimezone).localize(datetime(int(td[0]), int(td[1]), int(td[2]), 23, 59))\n except:\n self.setQuery(None)\n return \n self.addQuery( Le('startDate',today_ts) & Ge('endDate',today_ts) | Ge('startDate',today_ts) ) \n \n elif datesAvailable:\n self.addQuery( Not(Lt('endDate',startDate_ts) | Gt('startDate',endDate_ts)) | (InRange('startDate',startDate_ts, endDate_ts)) ) \n \n if params.has_key('keywords'):\n k = params['keywords']\n if k.find(',') > -1:\n kw = k.split(',')\n else:\n kw = [k] \n self.addQuery( Any('keywords', kw) )\n \n if params.has_key('keywordsAnd'):\n kw = params['keywordsAnd'].split(',')\n self.addQuery( All('keywords', kw) )\n \n if params.has_key('category'):\n kw = params['category'].split(',')\n self.addQuery( Any('category', kw) )\n \n # ICTP SPECIFIC\n if params.has_key('valid_deadline'):\n today = datetime.now()\n self.addQuery( Gt('deadlineDate', today) & NotEq('deadlineDate', datetime.strptime('01/01/1970', '%d/%m/%Y')) )\n\n # ICTP SPECIFIC: do not add Conference with keyword = NOSCIAL \n if params.get('collections', 'Conference') == 'Conference': \n self.addQuery( Not(Any('keywords', 'NOSCICAL')) ) \n\n return \n \n \n\n\n \n ","sub_path":"repozerQueryManager.py","file_name":"repozerQueryManager.py","file_ext":"py","file_size_in_byte":8523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"637199590","text":"import yaml\nimport json\nimport paramiko\nimport requests\nimport ssl\nimport socket\nimport hashlib\nimport os\nimport pickle\nimport time\nfrom pprint import pprint\nfrom bravado.client import SwaggerClient\nfrom bravado.requests_client import RequestsClient\n\nPYNSXTOBJFILE = '.pynsxt'\nSPEC_PATH = \"/tmp/nsx_api.json\"\n\n\ndef load_configfile(args):\n with open(args.config_file, 'r') as f:\n config = yaml.load(f)\n return config\n\n\ndef connect_cli(config):\n if config.has_key('cli'):\n return config['cli']\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(config['ip'], username=config['user'],\n password=config['password'], port=22, timeout=15.0, look_for_keys=False)\n config['cli'] = ssh\n return ssh\n\n\ndef exec_command(cli, cmd, display=False):\n output = \"\"\n if display:\n print(\"# %s\" % cmd)\n stdin, stdout, stderr = cli.exec_command(cmd)\n for line in stdout:\n output += line\n if display:\n print(output)\n return output\n\n\ndef load_spec(manager):\n raw_spec = requests.get(\"https://%s/api/v1/spec/openapi/nsx_api.json\" %\n manager['ip'], auth=(manager['user'], manager['password']), verify=False).json()\n\n\ndef api_request(args, method, uri, data=\"\"):\n config = load_configfile(args)\n uri = \"https://%s/\" % config['nsxManager']['ip'] + uri\n # headers = {'Content-Type': 'application/json'}\n headers = {'Content-Type': 'application/json',\n 'Accept': 'application/json'}\n auth = (config['nsxManager']['user'], config['nsxManager']['password'])\n if method == 'get':\n res = requests.get(uri, auth=auth, headers=headers, verify=False)\n elif method == 'post':\n res = requests.post(uri, auth=auth, headers=headers,\n data=data, verify=False)\n elif method == 'delete':\n res = requests.delete(uri, auth=auth, headers=headers,\n data=data, verify=False)\n return (res.status_code, res.json())\n\n\ndef get_api_client(config, validation=False):\n if config.has_key('client'):\n return config['client']\n raw_spec = json.load(open(SPEC_PATH))\n raw_spec['host'] = config['nsxManager']['ip']\n http_client = RequestsClient()\n http_client.session.verify = False\n http_client.set_basic_auth(\n config['nsxManager']['ip'], config['nsxManager']['user'], config['nsxManager']['password'])\n config = {\n 'also_return_response': True,\n 'validate_swagger_spec': validation,\n 'validate_responses': False,\n 'validate_requests': False,\n 'use_models': False\n }\n client = SwaggerClient.from_spec(\n raw_spec, http_client=http_client, config=config)\n config['client'] = client\n return client\n\n\ndef get_thumbprint(ip):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(1)\n wrappedSocket = ssl.wrap_socket(sock)\n\n try:\n wrappedSocket.connect((ip, 443))\n except:\n response = False\n else:\n der_cert_bin = wrappedSocket.getpeercert(True)\n pem_cert = ssl.DER_cert_to_PEM_cert(wrappedSocket.getpeercert(True))\n # Thumbprint\n thumb_sha256 = hashlib.sha256(der_cert_bin).hexdigest()\n wrappedSocket.close()\n return ':'.join(map(''.join, zip(*[iter(thumb_sha256)] * 2)))\n\n\ndef convert_to_dict(model):\n try:\n model = model.__dict__['_Model__dict']\n for k, v in model.items():\n model[k] = convert_to_dict(v)\n except AttributeError:\n if isinstance(model, dict):\n for k, v in model.items():\n model[k] = convert_to_dict(v)\n if isinstance(model, list):\n for i, v in enumerate(model):\n model[i] = convert_to_dict(v)\n return model\n\n\ndef main():\n args = get_args()\n if args.debug:\n basicConfig(level=DEBUG)\n else:\n basicConfig(level=INFO)\n handler = StreamHandler()\n logger.addHandler(handler)\n args.func(args)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"pynsxt/pynsxt_utils.py","file_name":"pynsxt_utils.py","file_ext":"py","file_size_in_byte":4083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"527033444","text":"import boto3\nimport datetime\nimport gzip\n\nfrom src.data_models.SmartsAlertsDataModel import SmartsAlertsDataModel\nfrom src.utility.Configuration import Configuration\n\n\ndef update_alerts_to_s3(evaluation_date):\n alerts = SmartsAlertsDataModel().initialize(evaluation_date=evaluation_date).evaluate()\n alerts_compress_str = alerts.to_csv(compression='gzip', index=False)\n alerts_gzip_file = gzip.compress(bytes(alerts_compress_str, 'utf-8'))\n\n config = Configuration().get()['aws_s3']\n access_key_id = [key_id['access_key_id'] for key_id in config if list(key_id.keys())[0] == 'access_key_id'][0]\n secret_access_key = [secret_key['secret_access_key'] for secret_key in config if list(secret_key.keys())[0] == 'secret_access_key'][0]\n bucket = [bucket['bucket_name'] for bucket in config if list(bucket.keys())[0] == 'bucket_name'][0]\n\n session = boto3.Session(aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key)\n s3 = session.client('s3')\n\n s3.put_object(Body=alerts_gzip_file, Key='smarts_alerts_{}.csv.gz'.format(evaluation_date), Bucket=bucket)\n\n\nif __name__ == '__main__':\n update_alerts_to_s3(datetime.date.today() - datetime.timedelta(days=1))\n","sub_path":"src/aws_s3/UpdateAlertsToS3.py","file_name":"UpdateAlertsToS3.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"647301416","text":"#!/usr/bin/env python3\n\nimport json\nimport argparse\nimport http.client\n\nfrom homekit import find_device_ip_and_port, HapStatusCodes\n\n\ndef setup_args_parser():\n parser = argparse.ArgumentParser(description='HomeKit identify app - performs identify on given HomeKit device')\n parser.add_argument('-d', action='store', required=True, dest='device')\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = setup_args_parser()\n\n connection_data = find_device_ip_and_port(args.device)\n\n conn = http.client.HTTPConnection(connection_data['ip'], port=connection_data['port'])\n\n conn.request('POST', '/identify')\n\n resp = conn.getresponse()\n if resp.code == 400:\n data = json.loads(resp.read().decode())\n code = data['status']\n print('identify failed because: {reason} ({code}). Is it paired?'.format(reason=HapStatusCodes[code], code=code))\n elif resp.code == 200:\n print('identify succeeded.')\n conn.close()\n","sub_path":"homekit/identify.py","file_name":"identify.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"397917439","text":"from typing import Optional\nfrom termcolor import cprint\nimport sys\n\nBLACKJACK = 21\n_LINE_WIDTH = 60\n_CTRL_C = \"\\x03\"\n_LOG_COLOR = \"green\"\n\nif sys.platform == \"win32\":\n import msvcrt\n\n getch = lambda: msvcrt.getch()\n\nelse:\n import tty, termios\n\n def _getch():\n fd = sys.stdin.fileno()\n original_attributes = termios.tcgetattr(fd)\n try:\n tty.setraw(sys.stdin.fileno())\n ch = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, original_attributes)\n if ch == _CTRL_C:\n sys.tracebacklimit = 0\n raise KeyboardInterrupt\n return ch\n\n getch = _getch\n\n\ndef choose(action_1: str, action_2: str, other: Optional[str] = None):\n def choose_two_or(action_1: str, action_2: str, other: str):\n try:\n return [action_1, action_2][int(getch()) - 1]\n except ValueError:\n return other\n\n def choose_two(action_1: str, action_2: str):\n while True:\n result = choose_two_or(action_1, action_2, \"retry\")\n if result != \"retry\":\n return result\n print(\"잘못 입력하셨습니다. 1과 2 중 하나를 선택해주십시오.\")\n\n print(f\"1: {action_1}, 2: {action_2}\")\n if other:\n print(f\"다른 키: {other}\")\n return choose_two_or(action_1, action_2, other)\n\n return choose_two(action_1, action_2)\n\n\ndef log(line: str, *lines: str):\n cprint(\"=\" * _LINE_WIDTH, _LOG_COLOR)\n\n for line in [line, *lines]:\n cprint(f\"{line:>30}\", _LOG_COLOR)\n\n cprint(\"=\" * _LINE_WIDTH, _LOG_COLOR)\n\n\ndef how_to_play():\n log(\n \"블랙잭은 21에 가까운 수를 만들면 이기는 게임입니다.\",\n \"J, Q, K는 10으로, A는 1과 11 어느쪽으로든 계산할 수 있습니다.\",\n \"시작하며 카드 두장을 기본으로 지급받습니다.\",\n \"카드를 더 뽑으면 Hit, 뽑지 않고 차례를 마치면 Stay.\",\n \"숫자의 합이 21을 넘어가면 Bust로 즉시 패배합니다.\",\n \"플레이어의 차례가 끝나면 상대의 차례입니다.\",\n \"딜러는 숫자의 합이 17 이상이 될때까지 무조건 히트를 합니다.\",\n \"상대보다 합이 높거나, 상대가 Bust되면 플레이어의 승리입니다.\",\n )\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"307631411","text":"# two-sum 다른 해결 법\n\n\nclass Solution:\n # 1. Brute Force : 가장 느림. O(n^2)\n def brute_force(self, nums, target: int):\n for i in range(len(nums)):\n for j in range(i + 1, len(nums)):\n if nums[i] + nums[j] == target:\n return [i, j]\n\n # 2. in => 내가 사용한 방법 : O(n^2)으로 시간복잡도는 같지만 in이 더 빠름\n def using_in(self, nums, target: int):\n for i, n in enumerate(nums):\n complement = target - n\n if complement in nums[i + 1]:\n return [i, nums[i + 1 :].index(complement) + (i + 1)]\n\n # 3. 첫번째 수를 뺀 결과 키 조회 : 평균 O(1), 최악 O(n)\n def find_key_except_first_num_1(self, nums, target: int):\n # key <> value -> dict\n nums_map = {}\n for i, num in enumerate(nums):\n nums_map[num] = i\n\n # 타겟에서 첫번째 수를 뺀 결과를 키로 조회\n for i, num in enumerate(nums):\n if target - num in nums_map and i != nums_map[target - num]:\n return [i, nums_map[target - num]]\n\n # 4. 3 구조 개선 : 성능의 차이는 없지만 코드가 간결해짐\n def find_key_except_first_num_2(self, nums, target: int):\n nums_map = {}\n for i, num in enumerate(nums):\n if target - num in nums_map:\n return [nums_map[target - num], i]\n nums_map[num] = i\n\n # 만약 정렬된 리스트라면 투 포인터 방식을 이용해도 됨\n # 합이 타겟보다 작으면 왼쪽 포인터를 오른쪽으로\n # 합이 타겟보다 크면 오른쪽 포인터를 왼쪽으로\n # 합이 타겟과 같으면 return\n","sub_path":"python-algorithm-interview/3_linear_data_structures/07_array/7-1.py","file_name":"7-1.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"498240693","text":"\"\"\"\nThis program is beginning of the piping GUI, it creates 2 Axis frames, a Scan Frame, and a Fault Frame\n\nIt communicates with the Aerotech Controller and sets up a queue thread for Control Commands Enable and Jog only\n\nCurrently the program is able to enable axes and jog them *NOTE PROGRAM IS IN MM, PARAMETER FILE MAY BE IN INCHES*\n\nNo Fault conditions are handled, be cautious while running\n\nAbility to read in text file to set to last used tool configuration\n\nScan button logic complete - No scan commands\n\nUpdated to allow for Jog, GOTO, Move Inc, Set To\n\"\"\"\n\nfrom tkinter import *\nfrom tkinter import messagebox\nimport serial\nimport threading\nimport time\nimport queue\n\n\n# This class sets Frame parameters for the Circ Axis\nclass SetupAxis1Frame:\n def __init__(self):\n self.axisName = \"CIRC\"\n self.axisUnits = \"mm\"\n self.jogText1 = \"CCW\"\n self.jogText2 = \"CW\"\n self.speedMin = 0.1\n self.speedMax = 25.0\n self.speedRes = .5\n self.queue_name = \"CTRL\"\n\n\n# This class sets Frame parameters for the Trans Axis\nclass SetupAxis2Frame:\n def __init__(self):\n self.axisName = \"TRANSLATOR\"\n self.axisUnits = \"mm\"\n self.jogText1 = \"IN\"\n self.jogText2 = \"OUT\"\n self.speedMin = 0.1\n self.speedMax = 50.0\n self.speedRes = .5\n self.queue_name = \"CTRL\"\n\n\n# This class places a button on the top of the GUI to select between LAPIS and NOVA\nclass ToolFrame:\n def __init__(self, master, params):\n self.master = master\n self.tool = params\n topFrame = Frame(master, relief=SUNKEN, border=2)\n topFrame.pack(fill=X, padx=10, pady=10)\n self.toolButton = Button(topFrame, text=\"NOVA\", fg=\"Black\", bg=\"Sky Blue\", font=(\"Helvetica\", 14),\n command=lambda: self.toggle_tool())\n\n # Read in first parameter from TIMC setup file list to determine last configuration\n if self.tool[0] == \"LPS-1000\\n\":\n self.toolButton.config(text=\"LPS-1000\", fg=\"Black\", bg=\"Goldenrod\")\n elif self.tool[0] == \"NOVA\\n\":\n self.toolButton.config(text=\"NOVA\", fg=\"Black\", bg=\"Sky Blue\")\n self.toolButton.pack(fill=X)\n\n # Toggle tool between LAPIS and NOVA, overwrite setup file list\n def toggle_tool(self):\n if self.toolButton[\"text\"] == \"NOVA\":\n self.toolButton.config(text=\"LPS-1000\", fg=\"Black\", bg=\"Goldenrod\")\n self.tool[0] = \"LPS-1000\\n\"\n TIMC.params = self.tool\n else:\n self.toolButton.config(text=\"NOVA\", fg=\"Black\", bg=\"Sky Blue\")\n self.tool[0] = \"NOVA\\n\"\n TIMC.params = self.tool\n\n\n# This class pulls parameters from the specific axis and puts them in the GUI\nclass AxisFrame:\n def __init__(self, master, parameters):\n self.axisName = parameters.axisName\n self.axisUnits = parameters.axisUnits\n self.jogText1 = parameters.jogText1\n self.jogText2 = parameters.jogText2\n self.speedMin = parameters.speedMin\n self.speedMax = parameters.speedMax\n self.speedRes = parameters.speedRes\n self.queue = parameters.queue_name\n self.current_limit = 5 # (A) pull this in eventually\n self.pos_err_limit = 1.5 # (mm)\n\n self.state = 0 # Flag for Enabled/Disabled Axis\n\n self.frame = Frame(master, relief=SUNKEN, border=2)\n self.frame.pack(fill=X, padx=10, pady=5)\n\n self.position = float(0)\n self.current = float(0)\n self.velocity = float(0)\n self.setToText = StringVar(master, value=\"0\")\n self.GoToText = StringVar(master, value=\"0\")\n self.moveIncText = StringVar(master, value=\"0\")\n\n # Create Widgets\n # Frames\n self.pos_frame = Frame(self.frame, bg=\"White\", relief=SUNKEN, border=2)\n self.button_frame = Frame(self.frame)\n self.error_frame = Frame(self.frame)\n self.pos_err_graph = Canvas(self.error_frame, bg=\"white\", height=100, width=20)\n self.current_graph = Canvas(self.error_frame, bg=\"white\", height=100, width=20)\n\n # Labels\n self.label_0 = Label(self.frame, text=self.axisName, font=\"Helvetica, 14 bold\")\n self.label_1 = Label(self.pos_frame, text=self.axisUnits, fg=\"Gray\", bg=\"White\", font=\"Helvetica, 20 bold\")\n self.label_2 = Label(self.pos_frame, text=\"Velocity\", font=(\"Helvetica\", 8), bg=\"White\")\n self.label_3 = Label(self.pos_frame, text=(self.axisUnits + \"/s\"), font=(\"Helvetica\", 8), bg=\"White\")\n self.label_4 = Label(self.button_frame, text=(\"Speed (\" + self.axisUnits + \"/s)\"), font=(\"Helvetica\", 10))\n self.label_5 = Label(self.error_frame, text=\"Pos Err\", font=(\"Helvetica\", 8))\n self.label_6 = Label(self.error_frame, text=\"Current\", font=(\"Helvetica\", 8))\n\n # Buttons\n self.enableButton = Button(self.frame, text=\"OFF\", fg=\"Red\", bg=\"Light Grey\", height=1, width=8,\n command=lambda: self.toggle_axis(), font=\"Helvetica, 12 bold\")\n self.setToButton = Button(self.button_frame, text=\"SET TO:\", fg=\"Gray\", bg=\"Light Grey\", height=1, width=10,\n state=\"disabled\", command=lambda: self.setTo(1))\n self.GoToButton = Button(self.button_frame, text=\"GO TO:\", fg=\"Gray\", bg=\"Light Grey\", height=1, width=10,\n state=\"disabled\",\n command=lambda: self.GoTo(1))\n self.moveIncButton = Button(self.button_frame, text=\"MOVE INC:\", fg=\"Gray\", bg=\"Light Grey\", height=1, width=10,\n state=\"disabled\", command=lambda: self.moveInc())\n self.setToZero = Button(self.button_frame, text=\"Set To 0\", fg=\"Gray\", bg=\"Light Grey\", height=1,\n state=\"disabled\", command=lambda: self.setTo(0))\n self.GoToZero = Button(self.button_frame, text=\"Go To 0\", fg=\"Gray\", bg=\"Light Grey\", height=1,\n state=\"disabled\", command=lambda: self.GoTo(0))\n self.jogButtonFWD = Button(self.frame, text=\"Jog \" + self.jogText1, fg=\"Gray\", bg=\"Light Grey\", height=1, width=10,\n font=\"Helvetica, 12 bold\", state=\"disabled\")\n self.jogButtonREV = Button(self.frame, text=\"Jog \" + self.jogText2, fg=\"Gray\", bg=\"Light Grey\", height=1, width=10,\n font=\"Helvetica, 12 bold\", state=\"disabled\")\n\n # Feedback Labels and Entry Boxes\n self.position_box = Label(self.pos_frame, text=(\"%.2f\" % self.position), fg=\"Gray\", bg=\"White\", width=6, anchor=E, font=\"Helvetica, 26 bold\")\n self.velocity_box = Label(self.pos_frame, text=(\"%.1f\" % self.velocity), bg=\"White\", width=8, state=\"disabled\", font=(\"Helvetica\", 8))\n self.setToEntry = Entry(self.button_frame, textvariable=self.setToText, width=10, font=(\"Helvetica\", 10), justify=\"center\")\n self.GoToEntry = Entry(self.button_frame, textvariable=self.GoToText, width=10, font=(\"Helvetica\", 10), justify=\"center\")\n self.moveIncEntry = Entry(self.button_frame, textvariable=self.moveIncText, width=10, font=(\"Helvetica\", 10),\n justify=\"center\")\n\n # Velocity Scale\n self.vel = Scale(self.frame, from_=self.speedMin, to=self.speedMax, orient=HORIZONTAL, length=200, resolution=self.speedRes, troughcolor=\"White\")\n self.vel.set((self.speedMax - self.speedMin) * 0.5)\n\n # Jog Button Actions\n self.jogButtonFWD.bind('', lambda event: self.jogFWD())\n self.jogButtonFWD.bind('', lambda event: self.stopjog())\n self.jogButtonREV.bind('', lambda event: self.jogREV())\n self.jogButtonREV.bind('', lambda event: self.stopjog())\n\n # Grid Widgets\n self.label_0.grid(column=0, row=0, columnspan=2, pady=5, sticky=W)\n self.pos_frame.grid(column=0, row=1, rowspan=3, padx=5, sticky=N)\n self.position_box.grid(column=0, row=0, columnspan=2, pady=5)\n self.label_1.grid(column=2, row=0, padx=2, pady=5, sticky=W) # Units\n self.label_2.grid(column=0, row=1, pady=2, sticky=E) # Velocity\n self.velocity_box.grid(column=1, row=1)\n self.label_3.grid(column=2, row=1, pady=2, sticky=W) # Units/s\n self.enableButton.grid(column=0, row=4, rowspan=1, padx=5, sticky=N)\n\n self.button_frame.grid(column=1, row=0, rowspan=3, columnspan=3)\n self.setToButton.grid(column=0, row=0, padx=10, sticky=S)\n self.setToEntry.grid(column=0, row=1, sticky=N)\n self.setToZero.grid(column=0, row=2, pady=5, sticky=N)\n self.moveIncButton.grid(column=1, row=0, padx=10, sticky=S)\n self.moveIncEntry.grid(column=1, row=1, sticky=N)\n self.label_4.grid(column=1, row=2, sticky=S) # Units/s\n self.GoToButton.grid(column=2, row=0, padx=10, sticky=S)\n self.GoToEntry.grid(column=2, row=1, sticky=N)\n self.GoToZero.grid(column=2, row=2, pady=5, sticky=N)\n\n self.vel.grid(column=1, row=2, rowspan=2, columnspan=3, sticky=S)\n self.vel.lower()\n self.jogButtonFWD.grid(column=1, row=4, rowspan=2, padx=10, pady=5, sticky=SW)\n self.jogButtonREV.grid(column=3, row=4, rowspan=2, padx=10, pady=5, sticky=SE)\n\n self.error_frame.grid(column=4, row=0, rowspan=5, padx=10)\n self.pos_err_graph.grid(column=0, row=0)\n self.current_graph.grid(column=1, row=0)\n self.label_5.grid(column=0, row=1, sticky=S) # Current\n self.label_6.grid(column=1, row=1, sticky=S) # Pos Err\n self.pos_err_rect = self.pos_err_graph.create_rectangle(2, 98, 21, 100, fill=\"red\", outline=\"red\")\n self.current_rect = self.current_graph.create_rectangle(2, 98, 21, 100, fill=\"red\", outline=\"red\")\n\n # This function toggles the button between OFF and ON\n def toggle_axis(self):\n if self.state == 0:\n self.enable_axis()\n else:\n self.disable_axis()\n\n # This function enables the axis\n def enable_axis(self):\n self.activate_axis_btns()\n self.enableButton.config(text=\"ON\")\n if TIMC.online == 1:\n TIMC.acmd(self.queue, \"ENABLE \" + self.axisName) # Aerotech command to enable axis\n\n if TIMC.axis1.state & TIMC.axis2.state == 1:\n TIMC.scan.activate_scan_btns() # Once both axes are enabled, scan button is active\n\n if TIMC.scan.scan_state == 1:\n TIMC.axis1.enableButton.config(state=\"disabled\") # If scan is active, disable axis 1 enable button\n TIMC.axis2.enableButton.config(state=\"disabled\") # If scan is active, disable axis 2 enable button\n TIMC.scan.start.config(state=\"disabled\") # If scan is active, disable start scan button\n TIMC.scan.stop.config(state=\"normal\", fg=\"Black\", bg=\"Indian Red\")\n TIMC.scan.resume.config(state=\"normal\", fg=\"Black\", bg=\"Dodger Blue\")\n\n # This function disables the axis\n def disable_axis(self):\n self.deactivate_axis_btns()\n self.enableButton.config(text=\"OFF\", fg=\"Red\", bg=\"Light Grey\")\n TIMC.scan.deactivate_scan_btns()\n if TIMC.online == 1:\n TIMC.acmd(self.queue, \"DISABLE \" + self.axisName) # Aerotech command to disable axis\n\n def activate_axis_btns(self):\n self.state = 1\n self.enableButton.config(state=\"normal\", fg=\"Black\", bg=\"Lawn Green\")\n self.setToButton.config(state=\"normal\", fg=\"Black\", bg=\"Lawn Green\")\n self.GoToButton.config(state=\"normal\", fg=\"Black\", bg=\"Lawn Green\")\n self.moveIncButton.config(state=\"normal\", fg=\"Black\", bg=\"Lawn Green\")\n self.setToZero.config(state=\"normal\", fg=\"Black\", bg=\"Lawn Green\")\n self.GoToZero.config(state=\"normal\", fg=\"Black\", bg=\"Lawn Green\")\n self.jogButtonFWD.config(state=\"normal\", fg=\"Black\", bg=\"SteelBlue2\")\n self.jogButtonREV.config(state=\"normal\", fg=\"Black\", bg=\"SteelBlue2\")\n self.position_box.config(fg=\"Black\")\n self.label_1.config(fg=\"Black\")\n self.velocity_box.config(state=\"normal\")\n\n def deactivate_axis_btns(self):\n self.state = 0\n self.setToButton.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Grey\")\n self.GoToButton.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Grey\")\n self.moveIncButton.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Grey\")\n self.setToZero.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Grey\")\n self.GoToZero.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Grey\")\n self.jogButtonFWD.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Grey\")\n self.jogButtonREV.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Grey\")\n self.position_box.config(fg=\"Gray\")\n self.label_1.config(fg=\"Gray\")\n self.velocity_box.config(state=\"disabled\")\n\n # This function starts Jogging in the FORWARD Direction\n def jogFWD(self):\n if TIMC.online == 1:\n TIMC.acmd(self.queue, \"ABORT \" + self.axisName)\n speed = str(self.vel.get())\n TIMC.acmd(self.queue, \"FREERUN \" + self.axisName + \" \" + speed)\n\n # This function starts Jogging in the REVERSE Direction\n def jogREV(self):\n if TIMC.online == 1:\n TIMC.acmd(self.queue, \"ABORT \" + self.axisName)\n speed = str(-1 * self.vel.get())\n TIMC.acmd(self.queue, \"FREERUN \" + self.axisName + \" \" + speed)\n\n # This function stops Jogging\n def stopjog(self):\n if TIMC.online == 1:\n TIMC.acmd(self.queue, \"FREERUN \" + self.axisName + \" 0\")\n\n # This function sets the position in the position label based on Set To Entry Box\n def setTo(self, zero):\n if zero == 0:\n position = \"0\"\n else:\n position = str(self.setToEntry.get())\n if checkIsDigit(position):\n if TIMC.online == 1:\n TIMC.acmd(self.queue, \"POSOFFSET SET \" + self.axisName + \", \" + position)\n else:\n self.position = float(position)\n self.position_box.config(text=(\"%.2f\" % self.position))\n\n def GoTo(self, zero):\n if zero == 0:\n position = \"0\"\n else:\n position = str(self.GoToEntry.get())\n if checkIsDigit(position):\n if TIMC.online == 1:\n speed = str(self.vel.get())\n TIMC.acmd(self.queue, \"MOVEABS \" + self.axisName + \" \" + position + \" F \" + speed)\n else:\n self.position = float(position)\n self.position_box.config(text=(\"%.2f\" % self.position))\n\n def moveInc(self):\n distance = self.moveIncEntry.get()\n if checkIsDigit(distance):\n if TIMC.online == 1:\n speed = str(self.vel.get())\n TIMC.acmd(self.queue, \"MOVEINC \" + self.axisName + \" \" + distance + \" F \" + speed)\n else:\n self.position = self.position + float(distance)\n self.position_box.config(text=(\"%.2f\" % self.position))\n\n def updateCurrent(self, cur):\n # Max Error for x1 = 401+61 = 462\n cur = abs((cur/self.current_limit)*100)\n # Delete the old representation of current\n self.current_graph.delete(self.current_rect)\n # Draw the new position error box\n self.current_rect = self.current_graph.create_rectangle(2, 98-cur, 21, 100, fill=\"red\", outline=\"red\")\n\n def updatePosErr(self, pos_err):\n # Max Error for x1 = 401+61 = 462\n pos_err = abs((pos_err/self.pos_err_limit)*100)\n # Delete the old representation of current\n self.pos_err_graph.delete(self.pos_err_rect)\n # Draw the new position error box\n self.pos_err_rect = self.pos_err_graph.create_rectangle(2, 98-pos_err, 21, 100, fill=\"red\", outline=\"red\")\n\n\n# This class creates a Scan Window in the GUI\nclass ScanFrame:\n def __init__(self, master, parameters1, parameters2):\n self.master = master\n self.axis1 = parameters1\n self.axis2 = parameters2\n self.scanConfig = IntVar()\n self.scanType = IntVar()\n self.scan_setup = [\"0.0\", \"20.0\", \"0.0\", \"10.0\", \"1.0\"]\n self.scan_state = 0\n\n scan_frame = Frame(master, relief=SUNKEN, border=2)\n left_frame = Frame(scan_frame)\n right_frame = Frame(scan_frame)\n bottom_frame = Frame(master, relief=RAISED, border=2)\n scan_frame.pack(fill=X, padx=10)\n\n left_frame.grid(column=0, row=0, sticky=W)\n right_frame.grid(column=1, row=0, padx=20)\n bottom_frame.pack(fill=X, padx=10)\n\n # Labels and Axis Names\n self.label_0 = Label(left_frame, text=\"SCAN WINDOW\", font=(\"Helvetica\", 14))\n self.axis_label_1 = Label(left_frame, text=self.axis1.axisName, font=(\"Helvetica\", 12))\n self.axis_label_2 = Label(left_frame, text=self.axis2.axisName, font=(\"Helvetica\", 12))\n\n # Start, Stop, Pause, and Resume Buttons\n self.start = Button(bottom_frame, text=\"START\", font=\"Helvetica, 12 bold\", fg=\"Gray\", bg=\"Light Green\", height=2,\n width=10, command=lambda: self.start_scan(), state=\"disabled\")\n self.stop = Button(bottom_frame, text=\"STOP\", font=\"Helvetica, 12 bold\", fg=\"Gray\", bg=\"Light Coral\", height=2,\n width=10, command=lambda: self.stop_scan(), state=\"disabled\")\n self.pause = Button(bottom_frame, text=\"PAUSE\", font=\"Helvetica, 10 bold\", fg=\"Gray\", bg=\"Light Yellow\", height=1, width=10,\n command=lambda: self.pause_scan(), state=\"disabled\")\n self.resume = Button(bottom_frame, text=\"RESUME\", font=\"Helvetica, 10 bold\", fg=\"Gray\", bg=\"Light Blue\", height=1, width=10,\n command=lambda: self.resume_scan(), state=\"disabled\")\n\n # Speed slider bars\n self.vel1 = Scale(left_frame, from_=self.axis1.speedMin, to=self.axis1.speedMax, orient=HORIZONTAL, length=100,\n label=\"Speed \" + self.axis1.axisUnits + \"/sec\", font=(\"Helvetica\", 10),\n resolution=self.axis1.speedRes)\n self.vel1.set((self.axis1.speedMax - self.axis1.speedMin) * 0.5)\n self.vel2 = Scale(left_frame, from_=self.axis2.speedMin, to=self.axis2.speedMax, orient=HORIZONTAL, length=100,\n label=\"Speed \" + self.axis2.axisUnits + \"/sec\", font=(\"Helvetica\", 10),\n resolution=self.axis2.speedRes)\n self.vel2.set((self.axis2.speedMax - self.axis2.speedMin) * 0.5)\n\n # Radio buttons to select scan configuration\n self.axis1_radio_0 = Radiobutton(left_frame, text=\"Scan\", font=(\"Helvetica\", 10), variable=self.scanConfig, value=0)\n self.axis1_radio_1 = Radiobutton(left_frame, text=\"Index\", font=(\"Helvetica\", 10), variable=self.scanConfig, value=1)\n self.axis2_radio_0 = Radiobutton(left_frame, text=\"Scan\", font=(\"Helvetica\", 10), variable=self.scanConfig, value=1)\n self.axis2_radio_1 = Radiobutton(left_frame, text=\"Index\", font=(\"Helvetica\", 10), variable=self.scanConfig, value=0)\n self.bidirectional_radio = Radiobutton(right_frame, text=\"Bidirectional\", font=(\"Helvetica\", 10),\n variable=self.scanType, value=1)\n self.unidirectional_radio = Radiobutton(right_frame, text=\"Unidirectional\", font=(\"Helvetica\", 10),\n variable=self.scanType, value=0)\n\n # Scan Entry Boxes\n self.label_1 = Label(right_frame, text=\"Scan Start\", font=(\"Helvetica\", 10))\n self.label_2 = Label(right_frame, text=\"Scan Stop\", font=(\"Helvetica\", 10))\n self.label_3 = Label(right_frame, text=\"Index Start\", font=(\"Helvetica\", 10))\n self.label_4 = Label(right_frame, text=\"Index Stop\", font=(\"Helvetica\", 10))\n self.label_5 = Label(right_frame, text=\"Index Size\", font=(\"Helvetica\", 10))\n self.label_6 = Label(right_frame, text=\"Remaining Time\", font=(\"Helvetica\", 10))\n self.e_scanStart = Entry(right_frame, width=8, font=(\"Helvetica\", 10), justify=\"center\")\n self.e_scanStart.insert(0, self.scan_setup[0])\n self.e_scanStop = Entry(right_frame, width=8, font=(\"Helvetica\", 10), justify=\"center\")\n self.e_scanStop.insert(0, self.scan_setup[1])\n self.e_indexStart = Entry(right_frame, width=8, font=(\"Helvetica\", 10), justify=\"center\")\n self.e_indexStart.insert(0, self.scan_setup[2])\n self.e_indexStop = Entry(right_frame, width=8, font=(\"Helvetica\", 10), justify=\"center\")\n self.e_indexStop.insert(0, self.scan_setup[3])\n self.e_indexSize = Entry(right_frame, width=8, font=(\"Helvetica\", 10), justify=\"center\")\n self.e_indexSize.insert(0, self.scan_setup[4])\n self.label_rem_time = Label(right_frame, text=\"00:00:00\", font=(\"Helvetica\", 12))\n\n # Place widgets in frames\n self.label_0.grid(column=0, row=0, columnspan=2, sticky=W)\n self.axis_label_1.grid(column=0, row=2, rowspan=2, sticky=E, pady=5)\n self.axis_label_2.grid(column=0, row=4, rowspan=2, sticky=E, pady=5)\n self.vel1.grid(column=1, row=2, rowspan=2, columnspan=2, pady=5)\n self.vel2.grid(column=1, row=4, rowspan=2, columnspan=2, pady=5)\n\n self.axis1_radio_0.grid(column=3, row=2, padx=5, sticky=S)\n self.axis1_radio_1.grid(column=3, row=3, padx=5, sticky=N)\n self.axis2_radio_0.grid(column=3, row=4, padx=5, sticky=S)\n self.axis2_radio_1.grid(column=3, row=5, padx=5, sticky=N)\n\n self.bidirectional_radio.grid(column=0, row=0, columnspan=2, padx=20, sticky=W)\n self.unidirectional_radio.grid(column=0, row=1, columnspan=2, padx=20, sticky=W)\n self.label_1.grid(column=2, row=0, sticky=E)\n self.label_2.grid(column=2, row=1, sticky=E)\n self.label_3.grid(column=2, row=2, sticky=E)\n self.label_4.grid(column=2, row=3, sticky=E)\n self.label_5.grid(column=2, row=4, sticky=E)\n self.label_6.grid(column=0, row=5, sticky=E)\n self.e_scanStart.grid(column=3, row=0, padx=5, pady=2)\n self.e_scanStop.grid(column=3, row=1, padx=5, pady=2)\n self.e_indexStart.grid(column=3, row=2, padx=5, pady=2)\n self.e_indexStop.grid(column=3, row=3, padx=5, pady=2)\n self.e_indexSize.grid(column=3, row=4, padx=5, pady=2)\n self.label_rem_time.grid(column=1, row=5, columnspan=2, pady=5, padx=5, sticky=W)\n self.start.pack(side=LEFT, padx=22, pady=5)\n self.stop.pack(side=LEFT, padx=22, pady=5)\n self.resume.pack(side=RIGHT, padx=22, pady=5)\n self.pause.pack(side=RIGHT, padx=22, pady=5)\n\n # Create a scan thread which will command movements to each scan point in self.scan_points\n self.process_scan = ScanThread(self.scan_setup)\n\n def start_scan(self):\n print(\"Start Scan\")\n # Get values from Entry Boxes\n self.scan_setup[0] = self.e_scanStart.get()\n self.scan_setup[1] = self.e_scanStop.get()\n self.scan_setup[2] = self.e_indexStart.get()\n self.scan_setup[3] = self.e_indexStop.get()\n self.scan_setup[4] = self.e_indexSize.get()\n\n # Deactivate Start Scan Button and Axis Buttons During Scan\n self.start.config(state=\"disabled\", fg=\"Gray\")\n TIMC.axis1.deactivate_axis_btns()\n TIMC.axis2.deactivate_axis_btns()\n TIMC.axis1.enableButton.config(state=\"disabled\")\n TIMC.axis2.enableButton.config(state=\"disabled\")\n\n # Activate Stop and Pause Buttons\n self.stop.config(state=\"normal\", fg=\"Black\", bg=\"Indian Red\")\n self.pause.config(state=\"normal\", fg=\"Black\", bg=\"Gold\")\n\n \"\"\"\n Check values here to make sure scan is okay to proceed\n \"\"\"\n # Is Start Scan Less Than End Scan\n if self.scan_setup[0] >= self.scan_setup[1] or self.scan_setup[2] >= self.scan_setup[3]:\n messagebox.showinfo(\"Bad Scan Inputs\", \"Start/Stop Values are Same or In Wrong Direction\")\n self.stop_scan()\n else:\n self.scan_state = 1\n self.process_scan = ScanThread(self.scan_setup)\n self.process_scan.start()\n\n def stop_scan(self):\n self.process_scan.stop()\n self.scan_state = 0\n\n # Activate Start Scan Button and Axis Buttons\n self.start.config(state=\"normal\", fg=\"Black\")\n TIMC.axis1.activate_axis_btns()\n TIMC.axis2.activate_axis_btns()\n TIMC.axis1.enableButton.config(state=\"normal\")\n TIMC.axis2.enableButton.config(state=\"normal\")\n\n # Deactivate Stop, Pause and Resume\n self.stop.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Coral\")\n self.pause.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Yellow\")\n self.resume.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Blue\")\n\n def pause_scan(self):\n self.process_scan.pause()\n\n # Activate Axis Buttons and Resume Button\n TIMC.axis1.activate_axis_btns()\n TIMC.axis2.activate_axis_btns()\n TIMC.axis1.enableButton.config(state=\"normal\")\n TIMC.axis2.enableButton.config(state=\"normal\")\n self.resume.config(state=\"normal\", fg=\"Black\", bg=\"Dodger Blue\")\n self.pause.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Yellow\")\n\n def resume_scan(self):\n self.process_scan.resume()\n\n # Deactivate Axis Buttons and Resume Button\n TIMC.axis1.deactivate_axis_btns()\n TIMC.axis2.deactivate_axis_btns()\n TIMC.axis1.enableButton.config(state=\"disabled\")\n TIMC.axis2.enableButton.config(state=\"disabled\")\n self.resume.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Blue\")\n self.pause.config(state=\"normal\", fg=\"Black\", bg=\"Gold\")\n\n def activate_scan_btns(self):\n self.start.config(state=\"normal\", fg=\"Black\", bg=\"Lawn Green\")\n\n def deactivate_scan_btns(self):\n self.start.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Green\")\n self.stop.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Coral\")\n self.pause.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Yellow\")\n self.resume.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Blue\")\n\n\n# This class creates a Fault Frame in the GUI\nclass FaultFrame:\n def __init__(self, master):\n self.frame = Frame(master, borderwidth=2, relief=SUNKEN)\n self.canvas = Canvas(self.frame, highlightthickness=0)\n self.canvas.pack(fill=X)\n self.frame.pack(fill=X, padx=10, pady=5)\n self.frame.pack()\n self.status_text = StringVar()\n\n self.label_0 = Label(self.canvas, text=\"FAULT STATUS\", height=1, font=(\"Helvetica\", 14))\n self.button = Button(self.canvas, text=\"FAULT\\nRESET\", font=\"Helvetica, 12\", fg=\"black\", bg=\"#d3d3d3\", height=2,\n width=6, command=lambda: self.fault_ack())\n self.entry = Entry(self.canvas, width=50, textvariable=self.status_text, font=\"Helvetica, 12\", justify=\"center\")\n self.label_0.grid(row=0, column=0, columnspan=2, sticky=W)\n self.entry.grid(row=1, column=0, columnspan=2, padx=30)\n self.button.grid(row=0, column=2, rowspan=2, pady=10, padx=5)\n\n # Method to display the fault text and change the background color to red\n def fault_status(self, text):\n self.canvas.config(bg=\"red\")\n self.label_0.config(bg=\"red\")\n self.status_text.set(text)\n\n # Method to display information text and keep the background color default\n def update_status(self, text):\n self.canvas.config(bg=\"SystemButtonFace\")\n self.label_0.config(bg=\"SystemButtonFace\")\n self.entry.config(bg=\"Yellow\")\n self.status_text.set(text)\n\n # Method to reset the fault and change background color back to default\n def fault_ack(self):\n if TIMC.online:\n TIMC.acmd(\"CTRL\", \"ACKNOWLEDGEALL\")\n self.canvas.config(bg=\"SystemButtonFace\")\n self.label_0.config(bg=\"SystemButtonFace\")\n self.entry.config(bg=\"White\")\n self.status_text.set(\"\")\n\n\n# This class starts a thread that opens communication and puts queued commands to the Ensemble\nclass SerialThread(threading.Thread):\n def __init__(self, baud, qControl_read, qControl_write, qScan_read, qScan_write, qFBK_read, qFBK_write):\n threading.Thread.__init__(self)\n self.qControl_read = qControl_read\n self.qControl_write = qControl_write\n self.qScan_read = qScan_read\n self.qScan_write = qScan_write\n self.qFBK_read = qFBK_read\n self.qFBK_write = qFBK_write\n\n self._is_running = 1\n self.port_open = 0\n self.baud = baud\n\n def run(self):\n # Open the serial port\n ports = ['COM%s' % (i + 1) for i in range(100)]\n result = []\n for port in ports:\n try:\n s = serial.Serial(port)\n s.close()\n result.append(port)\n except (OSError, serial.SerialException):\n pass\n if len(result) == 1:\n self.s = serial.Serial(result[0], self.baud, timeout=0.05)\n\n # Send a command to check if communication has been established\n self.s.write(\"ACKNOWLEDGEALL\".encode('ascii') + b' \\n')\n data = self.s.readline().decode('ascii')\n if '%' in data:\n self.port_open = 1\n self.s.write(\"WAIT MODE NOWAIT\".encode('ascii') + b' \\n')\n # Throw away second response\n data = self.s.readline().decode('ascii')\n elif len(result) > 1:\n self.port_open = 0\n self._is_running = 0\n else:\n self._is_running = 0\n\n # Serial Thread Main Loop - Check Queue for Commands to Send to Aerotech Drive\n while self._is_running:\n time.sleep(.0001)\n # Check if control queue has commands in the queue to send to the Aerotech drive\n if self.qControl_write.qsize():\n command = self.qControl_write.get().encode('ascii') + b' \\n'\n self.s.write(command)\n data = self.s.readline().decode('ascii')\n self.qControl_read.put(data)\n # Check if scan queue has commands in the queue to send to the Aerotech drive\n elif self.qScan_write.qsize():\n command = self.qScan_write.get().encode('ascii') + b' \\n'\n self.s.write(command)\n data = self.s.readline().decode('ascii')\n self.qScan_read.put(data)\n # Check if feedback queue has commands in the queue. This is the least priority\n elif self.qFBK_write.qsize():\n command = self.qFBK_write.get().encode('ascii') + b' \\n'\n self.s.write(command)\n data = self.s.readline().decode('ascii')\n self.qFBK_read.put(data)\n\n # Stop the thread from running\n def stop(self):\n self._is_running = 0\n try:\n self.s.close()\n print(\"Serial Port Closed\")\n except:\n print(\"No Serial Port to Close\")\n\n\n# This class starts a thread for automated scanning\nclass ScanThread(threading.Thread):\n def __init__(self, scan_setup):\n threading.Thread.__init__(self)\n self._is_running = 1\n self._is_paused = 0\n self.scan_setup = scan_setup\n self.setDaemon(True)\n\n \"\"\"\n Calculate scan points\n \"\"\"\n\n def run(self):\n print(\"Running Scan\")\n while self._is_running:\n time.sleep(.25)\n if self._is_paused != 1:\n print(\"Scanning...\")\n\n \"\"\"\n Scan profile\n \"\"\"\n\n def stop(self):\n self._is_running = 0\n self._is_paused = 1\n print(\"Stopping Scan\")\n\n def pause(self):\n self._is_paused = 1\n print(\"Pause Scan\")\n\n def resume(self):\n self._is_paused = 0\n print(\"Resume Scan\")\n\n\n# Thread to update the feedback on the GUI. The thread always loads commands into the feedback write queue\n# and the feedback read queue is synced to the write queue to update the appropriate feedback variable on the GUI\nclass UpdateFeedback(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.setDaemon(True)\n self._is_running = 1\n self.write_index = 0 # Variable to sync read and write queue\n self.read_index = 0 # Variable to sync read and write queue\n\n # Array of text which are ASCII commands compatible with the Aerotech drive\n self.write_cmd = [\"PFBKPROG(\" + TIMC.axis1.axisName + \")\", \"IFBK(\" + TIMC.axis1.axisName + \")\",\n \"VFBK(\" + TIMC.axis1.axisName + \")\", \"PERR(\" + TIMC.axis1.axisName + \")\",\n \"PFBKPROG(\" + TIMC.axis2.axisName + \")\", \"IFBK(\" + TIMC.axis2.axisName + \")\",\n \"VFBK(\" + TIMC.axis2.axisName + \")\", \"PERR(\" + TIMC.axis2.axisName + \")\"]\n\n def run(self):\n while self._is_running:\n time.sleep(.02)\n # If there is something in the read queue, update the correct variable\n if TIMC.qFBK_read.qsize():\n data = TIMC.qFBK_read.get()\n data = data.replace(\"%\", \"\")\n if self.read_index == 0:\n pos = round(float(data), 2)\n TIMC.axis1.position_box.config(text=(\"%.2f\" % pos))\n self.read_index += 1\n elif self.read_index == 1:\n cur = float(data)\n TIMC.axis1.updateCurrent(cur)\n self.read_index += 1\n elif self.read_index == 2:\n vel = round(float(data), 1)\n TIMC.axis1.velocity_box.config(text=(\"%.2f\" % vel))\n self.read_index += 1\n elif self.read_index == 3:\n pos_err = float(data)\n TIMC.axis1.updatePosErr(pos_err)\n self.read_index += 1\n elif self.read_index == 4:\n pos = round(float(data), 2)\n TIMC.axis2.position_box.config(text=(\"%.2f\" % pos))\n self.read_index += 1\n elif self.read_index == 5:\n cur = float(data)\n TIMC.axis2.updateCurrent(cur)\n self.read_index += 1\n elif self.read_index == 6:\n vel = round(float(data), 1)\n TIMC.axis2.velocity_box.config(text=(\"%.2f\" % vel))\n self.read_index += 1\n elif self.read_index == 7:\n pos_err = float(data)\n TIMC.axis2.updatePosErr(pos_err)\n self.read_index = 0\n\n # Auto-populate the feedback write queue with commands so the queue is never empty\n if TIMC.qFBK_write.qsize() == 0:\n TIMC.qFBK_write.put(self.write_cmd[self.write_index])\n if self.write_index < len(self.write_cmd) - 1:\n self.write_index += 1\n else:\n self.write_index = 0\n\n def stop(self):\n self._is_running = 0\n\n\n# This class is starts Main GUI Window and starts communication with controller\nclass Main:\n def __init__(self, master):\n self.master = master\n master.geometry(\"650x750\")\n master.title(\"R0: TIMC - Piping\")\n master.resizable(width=False, height=False)\n\n # Open Setup File\n self.params = str()\n self.filename = \"TIMC-P-SETUP.txt\"\n try:\n f = open(self.filename, \"r+\")\n self.params = f.readlines()\n f.close()\n print(\"Setup File Parameters \" + str(self.params))\n except FileNotFoundError:\n print(\"Cannot find file: \" + self.filename)\n print(\"Creating Default \" + self.filename)\n f = open(self.filename, \"w+\")\n self.params = [\"NOVA\\n\", \"0\\n\", \"20\\n\"] # Default TIMC Parameters\n f.writelines(self.params)\n f.close()\n print(self.params)\n\n self.baud = 115200\n self.online = 0 # If communication is successful with the controller this value will be set to 1\n self.qControl_read = queue.Queue() # Results of the qControl_write commands recorded here\n self.qControl_write = queue.Queue() # Jog, GoTo, Index, and Set button press commands are sent to this queue\n self.qScan_read = queue.Queue() # Results of the qScan_write commands recorded here\n self.qScan_write = queue.Queue() # Commands from the scan thread are written to this queue\n self.qFBK_read = queue.Queue() # Results of the qFBK_write commands recorded here\n self.qFBK_write = queue.Queue() # Commands to update the feedback are sent to this queue\n self.write_queue = queue.Queue()\n self.read_queue = queue.Queue()\n\n # Start serial thread\n self.process_serial = SerialThread(self.baud,\n self.qControl_read, self.qControl_write,\n self.qScan_read, self.qScan_write,\n self.qFBK_read, self.qFBK_write)\n self.process_serial.start()\n\n # Wait for serial thread to establish communication\n time.sleep(1.0)\n\n # Setup GUI Frames\n self.tool = ToolFrame(self.master, self.params)\n self.axis1 = AxisFrame(self.master, SetupAxis1Frame())\n self.axis2 = AxisFrame(self.master, SetupAxis2Frame())\n self.scan = ScanFrame(self.master, self.axis1, self.axis2)\n self.fault = FaultFrame(self.master)\n\n # Determine if GUI should be started offline\n self.is_offline()\n\n # Main method for sending commands to TIMC, command syntax specified by Aerotech: ASCII Commands\n def acmd(self, queue_name, text):\n if queue_name == \"CTRL\":\n self.write_queue = self.qControl_write\n self.read_queue = self.qControl_read\n elif queue_name == \"SCAN\":\n self.write_queue = self.qScan_write\n self.read_queue = self.qScan_read\n elif queue_name == \"STATUS\": # Not ready yet\n print(\"Status\")\n # self.write_queue = self.qStatus_write\n # self.read_queue = self.qStatus_read\n elif queue_name == \"FBK\": # Doesn't ever get called\n print(\"FBK\")\n # self.write_queue = self.qFBK_write\n # self.read_queue = self.qFBK_read\n\n # Put command on the queue, process_serial sends the command and returns the result in the read queue\n print(text) # For now, print all Aerotech commands\n self.write_queue.put(text)\n data = self.read_queue.get()\n\n # Aerotech drive sends back special characters in response to the command given\n if \"!\" in data:\n print(\"(!) Bad Execution, Queue: \" + queue_name + \" CMD: \" + text)\n return 0\n elif \"#\" in data:\n print(\"(#) ACK but cannot execute, Queue:\", queue_name, \"CMD:\", text)\n return 0\n elif \"$\" in data:\n print(\"($) CMD timeout, Queue:\", queue_name, \"CMD:\", text)\n return 0\n elif data == \"\":\n print(\"No data returned, check serial connection, Queue:\", queue_name, \"CMD:\", text)\n return 0\n elif \"%\" in data:\n data = data.replace(\"%\", \"\")\n return data\n else:\n print(\"Error\")\n\n # if communication is not successful then use Offline Mode\n def is_offline(self):\n if self.process_serial.port_open == 0: # OFFLINE\n self.fault.update_status(\"OFFLINE MODE\")\n self.process_serial.stop()\n self.online = 0\n elif self.process_serial.port_open == 1: # ONLINE\n self.online = 1\n\n\ndef on_closing():\n print(\"Closing...\")\n exception_flag = 0\n\n if TIMC.online:\n print(\"Disconnecting...\")\n try:\n TIMC.axis1.disable_axis() # Disable Axis 1\n except :\n exception_flag = 1\n try:\n TIMC.axis2.disable_axis() # Disable Axis 2\n except:\n exception_flag = 1\n try:\n process_feedback.stop()\n except:\n exception_flag = 1\n try:\n time.sleep(0.5)\n TIMC.process_serial.stop() # Close Serial Port Communication\n except:\n exception_flag = 1\n\n if exception_flag == 1:\n print(\"ERROR CLOSING A THREAD\")\n\n # Overwrite setup file with parameters to include any changes during program execution\n try:\n new_f = open(TIMC.filename, \"r+\")\n new_f.close()\n print(\"Setup File Parameters \" + str(TIMC.params))\n new_f = open(TIMC.filename, \"w+\")\n new_f.writelines(TIMC.params)\n new_f.close()\n except FileExistsError:\n print(\"No File to Overwrite\")\n\n root.destroy()\n\n\ndef checkIsDigit(text):\n if \"-\" in text:\n text = text.replace(\"-\", \"0\")\n if \".\" in text:\n text = text.replace(\".\", \"0\")\n if text.isdigit():\n return True\n else:\n messagebox.showinfo(\"Bad Input\", \"Value is not a number\")\n return False\n\n\nroot = Tk()\nTIMC = Main(root)\n\nif TIMC.online:\n # Start thread to updated position, current and error feedback for each axis\n process_feedback = UpdateFeedback()\n process_feedback.start()\n\n # Start thread to monitor for ESTOP and faults etc.\n\n\nroot.protocol(\"WM_DELETE_WINDOW\", on_closing)\nroot.mainloop()\n","sub_path":"Development/Scantest3.py","file_name":"Scantest3.py","file_ext":"py","file_size_in_byte":41602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"250836410","text":"from agent.TradingAgent import TradingAgent\nimport pandas as pd\nimport numpy as np\n\n\nclass MeanReversionAgent(TradingAgent):\n\n \"\"\"\n Simple Trading Agent that compares the \"n\" past mid-price observations with\n the \"m\" past observations and places a buy limit order if the\n \"n\" mid-price average <= \"m\" mid-price average minus margin, or a sell\n limit order if the \"n\" mid-price average >= \"m\" mid-price average plus\n margin\n \n THIS HAS TO BE A MARKET ORDER, NOT A LIMIT ORDER\n \"\"\"\n\n def __init__(self, id, name, type, symbol='IBM', starting_cash=100000,\n min_size=50, max_size=100, lambda_a=0.05,\n log_orders=False, random_state=None, short_duration=20,\n long_duration=40, margin=0):\n\n super().__init__(id, name, type, starting_cash=starting_cash,\n log_orders=log_orders, random_state=random_state)\n\n # received information\n self.symbol = symbol\n self.min_size = min_size # Minimum order size\n self.max_size = max_size # Maximum order size\n self.short_duration = short_duration\n self.long_duration = long_duration\n self.margin = margin\n self.lambda_a = lambda_a\n self.log_orders = log_orders\n\n # initialise setup\n self.order_size = self.random_state.randint(self.min_size, self.max_size)\n self.mid_list = []\n self.ma_short_list = []\n self.ma_long_list = []\n self.state = \"AWAITING_WAKEUP\"\n\n def kernelStarting(self, startTime):\n super().kernelStarting(startTime)\n\n def kernelStopping(self):\n # Always call parent method to be safe.\n super().kernelStopping()\n\n def wakeup(self, currentTime):\n\n \"\"\" Agent wakeup is determined by self.wake_up_freq \"\"\"\n\n can_trade = super().wakeup(currentTime)\n\n if not can_trade:\n return\n\n self.getCurrentSpread(self.symbol)\n self.state = 'AWAITING_SPREAD'\n\n\n def receiveMessage(self, currentTime, msg):\n\n \"\"\"\n Mean reversion agent actions are determined after obtaining the best\n bid and ask in the LOB\n \"\"\"\n\n super().receiveMessage(currentTime, msg)\n if (self.state == 'AWAITING_SPREAD' and\n msg.body['msg'] == 'QUERY_SPREAD'):\n\n # query bid/ask price\n bid, bidvolume, ask, askvolume = self.getKnownBidAsk(self.symbol)\n self.bidvol = bidvolume\n self.askvol = askvolume\n\n if bid and ask:\n \n mid = (bid + ask) / 2\n self.mid_list.append(mid)\n\n # determine mid-price\n\n if len(self.mid_list) > self.long_duration:\n\n self.mid_list.pop(0)\n\n\n # Determine Moving Average \"n\" after n datapoints\n self.ma_short = MeanReversionAgent.ma(self.mid_list, n=self.short_duration)[-1].round(0)\n self.ma_long = MeanReversionAgent.ma(self.mid_list, n=self.long_duration)[-1].round(0)\n\n # Only start comparing once both MAs become available\n if self.ma_short and self.ma_long:\n \n # 20210513 Chris Cho: Query new order size\n buyorder = np.round(self.askvol**0.35)\n sellorder = np.round(self.bidvol**0.35)\n # 20200928 Chris Cho: Added the margin function\n if (self.ma_short > self.ma_long - self.margin):\n\n self.placeMarketOrder(self.symbol, quantity=sellorder,\n is_buy_order=False)\n\n\n elif (self.ma_short < self.ma_long + self.margin):\n\n self.placeMarketOrder(self.symbol, quantity=buyorder,\n is_buy_order=True)\n \n\n # set wakeup time\n self.setWakeup(currentTime + self.getWakeFrequency())\n self.state = 'AWAITING_WAKEUP'\n\n def getWakeFrequency(self):\n\n delta_time = self.random_state.exponential(scale=1.0 / self.lambda_a)\n return pd.Timedelta('{}ns'.format(int(round(delta_time))))\n\n # 20201026 Chris Cho: function to query order size\n def getOrderSize(self):\n\n # round up the order size to prevent orders of size 0\n order_size = np.ceil(70/np.random.power(3.5))\n\n # select random number\n i = self.random_state.rand()\n\n # with a chance, submit order as it is\n if i < 0.8:\n self.order_size = order_size\n\n # otherwise, round to nearest 10 orders\n else:\n\n # quick hack to prevent orders rounding to 0\n if order_size < 5:\n order_size += 5\n\n # round to nearest 10\n self.order_size = np.round(order_size, -1)\n\n return None\n\n @staticmethod\n def ma(a, n=20):\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n\n","sub_path":"agent/MeanReversionAgent.py","file_name":"MeanReversionAgent.py","file_ext":"py","file_size_in_byte":5092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"151684745","text":"from enum import Enum\n\nfrom rv.controller import Controller\nfrom rv.modules import Behavior as B, Module\n\n\nclass Reverb(Module):\n\n name = mtype = \"Reverb\"\n mgroup = \"Effect\"\n flags = 0x000051\n\n behaviors = {B.receives_audio, B.sends_audio}\n\n class Mode(Enum):\n hq = 0\n hq_mono = 1\n lq = 2\n lq_mono = 3\n\n dry = Controller((0, 256), 256)\n wet = Controller((0, 256), 64)\n feedback = Controller((0, 256), 256)\n damp = Controller((0, 256), 128)\n stereo_width = Controller((0, 256), 256)\n freeze = Controller(bool, False)\n mode = Controller(Mode, Mode.hq)\n all_pass_filter = Controller(bool, True)\n room_size = Controller((0, 128), 16)\n random_seed = Controller((0, 32768), 0)\n","sub_path":"rv/modules/reverb.py","file_name":"reverb.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"517500026","text":"#! /usr/bin/python\n# -*- coding: latin1 -*-\n\n\"\"\"\nConverts a GRADS readable dataset to the data model.\n\nModule for reading a GRADS compatible raster file and exporting it to the data model\n(or netCDF-file by using GRADS functions) that is consisting of the following files:\nfiles numpy data array,coordinate metadata xml file and NCML NetCDF XML file.\nData is considered as grid, therefore the shape of the output numpy array is:\n(variable, time, z, lat, lon). This program was particularly written to convert\nGRAPES GRIB raster files. Find more information in the documentation.\n\"\"\"\n\n__author__= \"Nicolai Holzer\"\n__author_email__ = \"first-name dot last-name @ mailbox.tu-dresden.de\"\n__date__ =\"2011-03-28\"\n__version__ = \"v0.1.3\" #MajorVersion(backward_incompatible).MinorVersion(backward_compatible).Patch(Bug_fixes)\n\n\n#Changelog\n#-------------------------------------------------------------------------------\n#2011-01-14: v0.1.3 logging implemented, functionalities changed\n#2010-12-14: v0.1.2 parser added, functionalities changed\n#2010-11-24: v0.1.1 comments and docstrings added\n#2010-11-15: v0.1.0 first version\n\n\n#Imported libraries\n#-------------------------------------------------------------------------------\n#standard libraries\nimport sys\nimport time\nfrom optparse import OptionParser #Parser\nimport logging\n\n#related libraries\nimport numpy\n\n#Importing GRADS\n#Extends the GrADS client class GaCore, providing methods for exchanging\n#n-dimensional NumPy array data between Python and GrADS.\nimport grads.ganum as ganum\n\n#This module extends the GrADS client class by providing methods for\n#exchanging n-dimensional NumPy array data between Python and GrADS\n#import grads.numtypes as numtypes\n\n#A simple container class to collect output for query() operations.\n#import grads.gahandle as gahandle\n\n\n#local applications / library specific import\nfrom interface_Settings import *\nfrom interface_ProcessingTools import *\nfrom etc.progressBar import * #needs empty '__init__.py' file in directory\n\n#===============================================================================\n\n#Module constants (Parser)\n#-------------------------------------------------------------------------------\nUSAGE = \"%prog [options] operation data\\\n \\n[options]:\\\n \\n type '--help' for more information\\\n \\n\\\n \\noperation:\\\n \\n - grads2Model Convert GRADS raster image file (here GRAPES GRIB data) to data model\\\n \\n - printGrads Read GRADS file and print it on screen\\\n \\n - testGrads Test GRADS functionalities\\\n \\n\\\n \\ndata:\\\n \\n Raster data file that is readable by GRADS library\"\n\nDESCRIPTION= \"Conversion tool of CEOP-AEGIS data model for GRADS readable raster data\"\nEPILOG = \"Author: \"+__author__+\" (E-mail: \"+__author_email__+\")\"\n\nVERSION = \"%prog version \"+__version__+\" from \"+__date__\n\n\n#Module default values / constants, may be overwritten by OptionParser\n#-------------------------------------------------------------------------------\nNUMPYDATA_DTYPE = 'float32' #Default data type of output numpy array\nNODATA = 0 #Default nodata value of output numpy array\n\n#Multiplicator for each time value, should be same unit as of reference time\n#Value can't yet be extracted of Grib Metadata automatically. See Grib Metadata file for finding this value\nDATATIMESTEP = 0.5 \n\nMODULE_LOGGER_ROOT = 'grads' #Logger root name\n\n#_______________________________________________________________________________\n\nclass ControlModelGrads:\n \"\"\"Control class for model 'ModelGradsRead'. This class is providing all available functions for reading data\"\"\"\n\n def __init__(self, infile_, option_):\n \"\"\"\n Constructor for new control instance of specific file.\n\n INPUT_PARAMETERS:\n infile - name of data file with filename extension (string)\n option - Parser.options arguments\n\n COMMENTS:\n Suffixes will be automatically assigned and must respect the declarations\n in the module 'interface_Settings'.\n \"\"\"\n \n infile = str(infile_).rsplit('__',1)\n self.inputFile = infile[0]\n self.pModelGradsRead = ModelGradsRead(self.inputFile)\n\n self.pParserOptions = option_\n self.pLogger = logging.getLogger(MODULE_LOGGER_ROOT+\".\"+__name__+\".\"+self.__class__.__name__)\n self.pLogger.info(\"Open project '\" + self.inputFile + \"':\")\n \n\n #def __del__(self):\n #\"\"\"Desctructor\"\"\"\n \n\n def writeGradsNumpyData(self):\n \"\"\"Read GRADS file and save data as numpy data array according to the specifications\n of the data interface\"\"\"\n\n #Make a copy of the GRADS-file as numpy file\n pGradsData = self.pModelGradsRead.readGradsFile(self.pParserOptions.dataType)\n \n #Optional to select specific data from time stamp\n if not self.pParserOptions.specificData is None: #specificData is choosen\n pGradsData = self.pModelGradsRead.choseSpecificData(pGradsData, self.pParserOptions.specificData)\n\n #Export data as new numpy file\n self.pModelGradsRead.writeNumpyData(pGradsData)\n return\n\n\n def writeGradsMetadata(self):\n \"\"\"Get metadata from a GRADS readable file and write metadata to coordinate metadata file and\n NCML XML file according to the specifications of the data interface\"\"\"\n\n self.pModelGradsRead.writeMetadataNcml(self.pParserOptions.nodataValue)\n self.pModelGradsRead.writeMetadataNumpymeta(self.pParserOptions.specificData)\n return\n\n\n #optional\n def completeDataModelManually(self):\n \"\"\"Complete missing data and metadata manually\"\"\"\n\n self.pModelGradsRead.completeDataVariables()\n self.pModelGradsRead.completeMetadataNcml()\n self.pModelGradsRead.completeMetadataNumpymeta() #not implemented\n return\n\n\n #optional\n def printGradsMetadata(self):\n \"\"\"Read GRADS readable file and print metadata on screen\"\"\"\n\n self.pModelGradsRead.printGradsMetadata()\n return\n\n\n #optional\n def testGradsFunctionality(self):\n \"\"\"Test GRADS functionality by testing its functions and creating a NetCDF\n file automatically\"\"\"\n\n self.pModelGradsRead.grib2NetCdf_gradsTest()\n return\n\n\n#_______________________________________________________________________________\n\nclass ModelGradsRead:\n \"\"\"This class contains functions to handle read operations on GRADS data and is controlled by\n the class 'ControlModelGrads'.\n This class was in particularly written to handle GRAPES GRIB data.\"\"\"\n\n\n def __init__(self, infile_):\n \"\"\"\n Constructor.\n\n INPUT_PARAMETERS:\n infile - name of GRADS file name with filename extension (string)\n \"\"\"\n self.pDefaultSettings = DefaultSettings()\n \n self.gradsFileName = infile_ #With file name extension\n\n #infile = self.gradsFileName.rsplit('.',1) #without file name extension\n self.numpyDataName = infile_+FILENAME_SUFFIX_NUMPYDATA\n self.ncmlName = infile_+FILENAME_SUFFIX_NCML\n self.numpymetaName = infile_+FILENAME_SUFFIX_NUMPYXML\n\n #Use Processing Tools\n self.pProcessingTool = ProcessingTool()\n self.pProcessNcml = ProcessNcml(self.ncmlName)\n self.pProcessNumpymeta = ProcessNumpymeta(self.numpymetaName)\n\n self.pLogger = logging.getLogger(MODULE_LOGGER_ROOT+\".\"+__name__+\".\"+self.__class__.__name__)\n\n #Read GRADS file\n #Start the GRADS application, creating new instance\n #Depending on GRADS version, 'Bin' is telling which GRADS executable to start\n #For 2.0a7 this is 'grads' and 'gradsdap'\n try:\n self.pGa = ganum.GaNum(Bin='grads', Echo=False, Window=False)\n self.pGa.open(self.gradsFileName)\n except:\n raise Exception (\"Opening of file '\" + str(self.gradsFileName) + \"' failed. Check if it exists and if filename suffix is set.\")\n \n\n def __del__(self):\n \"\"\"Destructor\"\"\"\n #Close GRADS instance\n del self.pGa\n\n\n def readGradsFile(self, dataType_):\n \"\"\"Reads a GRADS file and returns GRADS data as numpy array.\n Argument 'dataType' defines the data type of the resulting numpy array.\"\"\"\n\n pGa = self.pGa\n \n #Get file information via GRADS\n #-------------------------------------------------------------------------------\n # Query dataset information, command available for \"file\" and \"dims\"\n pGa_queryFile = pGa.query(\"file\")\n pGa_queryDims = pGa.query(\"dims\")\n\n #Get dimension values and set dimensions\n dimX = pGa_queryFile.nx #number of longitude points\n dimY = pGa_queryFile.ny #number of latitude points\n #dimZ = pGa_queryFile.nz #z-dimension not used in GRAPES data, level = 1\n dimT = pGa_queryFile.nt #number of time values in file\n dimVar = pGa_queryFile.nvars #numbers of variables in file\n\n varsNames = pGa_queryFile.vars #names of variables in file\n\n pGa(\"set z 1\") #GRADS command to set dimensions\n pGa(\"set t 1 last\") #Get all time values; define timestamp later in python\n\n #Define progress bar settings\n widgetsBar = ['Import status: ', Percentage(), ' ', Bar(marker=RotatingMarker()),\n ' ', ETA(), ' ', FileTransferSpeed()]\n progressBar = ProgressBar(widgets=widgetsBar, maxval=dimVar).start()\n\n #Print Dataset information on the screen\n #print \"\\nCoordinate information: \\n\", pGa.coords()\n #print \"\\nFile information: \\n\", pGa_queryFile\n #print \"\\nDimension information: \\n\", pGa_queryDims\n\n\n #Writing numpy file\n #-------------------------------------------------------------------------------\n pDataType = self.pProcessingTool.dataType_2Numpy(dataType_)\n pGradsData = numpy.zeros((dimVar,dimT,dimY,dimX), dtype = pDataType)# All data\n \n #Reading all variables in GRADS file\n for i_var in range(0,dimVar,1): # otherwise returns list of ints from >= start and < end: 0 .. 10\n\n self.pLogger.info(\"Reading GRADS variable ID '\" + str(i_var) + \"' with name '\" + str(varsNames[i_var]) + \"'...\")\n pDataArray = pGa.expr(varsNames[i_var]) #Export GRADS field of specific variable as numpy-like array\n \n #Create a numpy file per file, so for all variables per file\n pGradsData[i_var,:,:,:] = numpy.asarray(pDataArray.astype(pDataType))\n\n progressBar.update(i_var+1)# Progress bar\n\n\n #Change dimensions of numpy array so that it gets conform with the data model specifications\n #-------------------------------------------------------------------------------\n dimVar = pGradsData.shape[0] #Number of variables in array\n dimT = pGradsData.shape[1] #Time Dimension\n dimZ = int(1) #Level Dimensions\n dimY = pGradsData.shape[2] #Last but one axis top to bottom: lat -> row\n dimX = pGradsData.shape[3] #Last axis left to right: lon -> col\n\n pBuffer = numpy.zeros((dimY,dimX), dtype = pGradsData.dtype) #Buffer for calculation\n pGradsDataNorm = numpy.zeros((dimVar,dimT,dimZ,dimY, dimX), dtype = pGradsData.dtype) #Normed numpy data array\n\n #Change dimension order that is \"var,time,y,x' and is to be 'var,time,level,y,x'.\n #This is neccessary so that the time variables dimension can be set to unlimited (only possible for first variable).\n #Define progress bar settings\n widgetsBar = ['Making data conform for data model: ', Percentage(), ' ', Bar(marker=RotatingMarker()),\n ' ', ETA(), ' ', FileTransferSpeed()]\n progressBar = ProgressBar(widgets=widgetsBar, maxval=dimVar).start()\n\n for i_var in range(0,dimVar,1):\n for i_time in range(0, dimT, 1):\n pBuffer[:,:] = pGradsData[i_var,i_time,:,:] #Extract data to buffer\n pGradsDataNorm[i_var,i_time,0,:,:] = pBuffer[:,:] #Write Data in output numpy array\n\n progressBar.update(i_var+1)# Progress bar\n\n return pGradsDataNorm\n\n\n def choseSpecificData(self, pGradsData_, dataTime_):\n \"\"\"Optional: Extract those datasets that fall within the wanted timestamp\n\n Define time stamp in list dataTime. dataTime[0] is start value, dataTime[1]\n end value, as time units since reference time. \n Example: nt = 97 values; first (1st) value first day 0h00, half hour steps,\n 96th value: second day 23h30, 97th value third day 0h00\n Time intervall has for example to consist of 24 hours, so 47 values!\n position numbers (start value = 1, not 0!!!), not index numbers of arrays; needed for dimension setting\n DATASTART = 25 #12h00 first day\n DATASTOP = 72 #11h30 second day\n \"\"\"\n\n self.pLogger.info(\"Extract specific data as implemented in function 'choseSpecificData'...\")\n\n pGradsData = pGradsData_\n dataStart = int(dataTime_[0])\n dataStop = int(dataTime_[1])\n\n #Define progress bar settings\n widgetsBar = ['Extracting specific data: ', Percentage(), ' ', Bar(marker=RotatingMarker()),\n ' ', ETA(), ' ', FileTransferSpeed()]\n progressBar = ProgressBar(widgets=widgetsBar, maxval=(dataStop-(dataStart-1))).start()\n\n\n #Get all datasets from wanted time intervall. Index of time value is defined in\n #global variables. Export data as numpy file\n\n #Number of time values for wanted time stamp\n grapesTimeStamp = dataStop - dataStart + 1#dimension value, not index value for array!!! E.g. (48-1)+1=48\n\n #Only GRADS data of wanted time stamp\n pGradsDataTS = numpy.zeros((pGradsData.shape[0],grapesTimeStamp,pGradsData.shape[2],\\\n pGradsData.shape[3], pGradsData.shape[4]), dtype = pGradsData.dtype)\n\n #!Range: Last value not taken for iteration! So don't use DATASTOP-1, but DATASTOP!\n i = 0\n for j in range(dataStart-1, dataStop, 1): #array index numbers, not position numbers\n pGradsDataTS[:,i,:,:,:] = pGradsData[:,j,:,:,:]\n i = i+1\n\n progressBar.update(j-(dataStart-1)+1)# Progress bar\n \n return pGradsDataTS\n\n\n def writeNumpyData(self, pNumpyData_):\n \"\"\"Export numpy data array to file\"\"\"\n\n self.pLogger.info(\"Numpy output will be file saved as '\"+ str(self.numpyDataName) + \"'...\")\n numpy.save(str(self.numpyDataName), pNumpyData_) #Better as 'tofile'. Also possible: 'dump'\n self.pLogger.info(\"Done. Shape of resulting numpy file: '\" + str(pNumpyData_.shape) + \"'; Data type: '\" + str(pNumpyData_.dtype) + \"'.\")\n\n return\n\n\n def writeMetadataNcml(self, nodata_):\n \"\"\"Create new NCML XML file according to the specifications of the data model and\n complete this file by the metadata that can be extracted out of the GRADS file\"\"\"\n\n #Get metadata information from file \n #-------------------------------------------------------------------------------\n pGa = self.pGa\n pGa_queryFile = pGa.query(\"file\") # Query dataset information, command available for \"file\" and \"dims\"\n\n pNumpyData = numpy.load(self.numpyDataName)\n\n dimVar = pNumpyData.shape[0] #Number of variables in array\n varsNames = pGa_queryFile.vars #names of variables on file\n varsTitles = pGa_queryFile.var_titles #var_titles are equivalent to long_name\n\n #Define progress bar settings\n widgetsBar = ['Creating Ncml metadata file: ', Percentage(), ' ', Bar(marker=RotatingMarker()),\n ' ', ETA(), ' ', FileTransferSpeed()]\n progressBar = ProgressBar(widgets=widgetsBar, maxval=dimVar).start()\n \n #Write metadata NCML file\n #-------------------------------------------------------------------------------\n self.pProcessNcml.createMacroNcmlFile()\n self.pProcessNcml.fillNcmlMacroWithNumpy(pNumpyData) \n\n #Correct and complete entries\n self.pProcessNcml.changeGlobalAttribute('title', 'value', pGa_queryFile.title)\n\n for i_var in range(0,dimVar,1): # otherwise returns list of ints from >= start and < end: 0 .. 10\n varName = 'variable #'+str(i_var)\n varsDescriptions = varsTitles[i_var].rsplit('0 ') #To get rid of weird values at beginning\n \n self.pProcessNcml.changeVariable(varName, 'name', varsNames[i_var])\n self.pProcessNcml.changeLocalAttribute(varsNames[i_var], 'long_name', 'value', varsDescriptions[1])\n self.pProcessNcml.changeLocalAttribute(varsNames[i_var], '_FillValue', 'value', str(nodata_))\n\n progressBar.update(i_var+1)# Progress bar\n\n return\n\n\n def writeMetadataNumpymeta(self, dataTime_):\n \"\"\"Create new metadata coordinate XML file according to the specifications of the data model and\n complete this file by the metadata that can be extracted out of the GRADS file\"\"\"\n\n pGa = self.pGa\n\n #Get metadata information from file by the use of GRADS\n #-------------------------------------------------------------------------------\n #Query dataset information, command available for \"file\" and \"dims\"\n pGa_queryDims = pGa.query(\"dims\")\n pGa_queryFile = pGa.query(\"file\")\n\n #Get latitude / longitude values\n latMin = pGa_queryDims.lat[0]#ymin\n latMax = pGa_queryDims.lat[1]#ymax\n lonMin = pGa_queryDims.lon[0]#xmin\n lonMax = pGa_queryDims.lon[1]#xmax\n\n #Get time values\n #Number of time values for wanted time stamp, otherwise DimT\n if not dataTime_ is None: #specificData is choosen\n dataStart = int(dataTime_[0])\n dataStop = int(dataTime_[1])\n else:\n dataStart = 1\n dataStop = pGa_queryFile.nt #dimT, number of time values in file\n grapesTimeStamp = dataStop - dataStart + 1 #Dimension value, not index value for array!!! E.g. (48-1)+1=48\n\n referenceTimeGrib = pGa_queryDims.time[0] #Reference time of data in grib metadata format\n referenceTimeNetCdf = self.__timeUnitGrib2NetCdf(referenceTimeGrib, dataStart) #Reference time of data translated to NetCDF metadata format\n pTimes = self.pProcessingTool.createTimeValuesNumpy(referenceTimeNetCdf, grapesTimeStamp, DATATIMESTEP) #Calculate time values\n\n\n #Write coordinate metadata file\n #-------------------------------------------------------------------------------\n self.pProcessNumpymeta.createMacroNumpymetaFile()\n\n self.pProcessNumpymeta.writeNumpyMetadataValues(pTimes, 'time') #Either time values or min/max\n\n self.pProcessNumpymeta.setAttribute('numpymeta', 'latitude', 'min', str(latMin))\n self.pProcessNumpymeta.setAttribute('numpymeta', 'latitude', 'max', str(latMax))\n self.pProcessNumpymeta.setAttribute('numpymeta', 'longitude', 'min', str(lonMin))\n self.pProcessNumpymeta.setAttribute('numpymeta', 'longitude', 'max', str(lonMax))\n\n self.pProcessNumpymeta.setAttribute('numpymeta', 'height', 'values', str(1))\n self.pProcessNumpymeta.setAttribute('numpymeta', 'height', 'separator', str(','))\n\n return\n\n\n #Other functions\n #-------------------------------------------------------------------------------\n\n def printGradsMetadata(self):\n \"\"\"Read GRADS file and print metadata on screen\"\"\"\n\n infile = self.gradsFileName\n\n ga = ganum.GaNum(Bin='grads', Echo=False, Window=True)#Starts the GRADS application\n #Depending on GRADS version, bin is telling which GRADS executable to start\n #For 2.0a7 this is 'grads' and 'gradsdap'\n try:\n fh = ga.open(infile)\n except:\n raise Exception(\"Error: GRADS file does not exist: '\" + str(infile) + \"'.\")\n exit()\n\n #Query metadata information\n qh_file = ga.query(\"file\")\n qh_dims = ga.query(\"dims\")\n\n #Print metadata information on screen\n self.pLogger.info(\"---------------------------------------------------------------------\")\n self.pLogger.info(\"File information:\")\n self.pLogger.info(qh_file)\n self.pLogger.info(\"Dimension information:\")\n self.pLogger.info(qh_dims)\n self.pLogger.info(\"---------------------------------------------------------------------\")\n\n return\n\n\n def __timeUnitGrib2NetCdf(self, timeGribStart_, dataStart_):\n \"\"\"Transforms reference time from Grib metadata format to time format of\n NetCDF time units attribute. timeGribStart is reference time, dataStart\n is offset time.\"\"\"\n\n #Get time values from infofile and adapt data\n #-------------------------------------------------------------------------------\n fileTimeStart = timeGribStart_ #Start value of data\n\n if len(fileTimeStart) == 12: #Data format for example 00Z11JAN2008\n timestart_time = fileTimeStart[0:2]+':00:0.0'\n timestart_day = fileTimeStart[3:5]\n timestart_month = fileTimeStart[5:8]\n timestart_year = fileTimeStart[8:12]\n #print timestart_year, timestart_month, timestart_day, timestart_time\n elif len(fileTimeStart) == 15: #Data format for example 00:30Z11JAN2008\n timestart_time = fileTimeStart[0:5]+':0.0'\n timestart_day = fileTimeStart[6:8]\n timestart_month = fileTimeStart[8:11]\n timestart_year = fileTimeStart[11:15]\n #print timestart_year, timestart_month, timestart_day, timestart_time\n else:\n raise Exception(\"Error in function 'timeUnitGrib2NetCdf': Time specification in infofile can't be read, process aborted...\")\n \n #Change month from word statement to number\n if timestart_month == 'JAN':\n timestart_month_nr = '01'\n elif timestart_month == 'FEB':\n timestart_month_nr = '02'\n elif timestart_month == 'MAR':\n timestart_month_nr = '03'\n elif timestart_month == 'APR':\n timestart_month_nr = '04'\n elif timestart_month == 'MAI':\n timestart_month_nr = '05'\n elif timestart_month == 'JUN':\n timestart_month_nr = '06'\n elif timestart_month == 'JUL':\n timestart_month_nr = '07'\n elif timestart_month == 'AUG':\n timestart_month_nr = '08'\n elif timestart_month == 'SEP':\n timestart_month_nr = '09'\n elif timestart_month == 'OCT':\n timestart_month_nr = '10'\n elif timestart_month == 'NOV':\n timestart_month_nr = '11'\n elif timestart_month == 'DEC':\n timestart_month_nr = '12'\n else:\n raise Exception(\"Error in function 'timeUnitGrib2NetCdf': Month specification in infofile corrupt, process aborted...\")\n \n########### Hack to change timeUnitNetCdf in case that not all GRAPES data is used (like here the timestamp)\n offsetTime = (dataStart_ - 1) * DATATIMESTEP #(25-1)*0.5=12h00\n if (offsetTime < 24 and offsetTime % 2 == 0): #must be full hours\n timestart_time_hours = int(timestart_time[0:1])+int(offsetTime)\n timestart_time = str(timestart_time_hours)+str(timestart_time[2:9])\n else:\n raise Exception (\"Error in function 'timeUnitGrib2NetCdf': time offset >= 24 is not implemented yet! DATATIMESTEP unless full hours is not implemented yet!\")\n\n\n #Set NetCDF time unit, e.g. \"hours since 2008-01-11 00:00:0.0\"\n #-------------------------------------------------------------------------------\n timeUnitNetCdf = 'hours since '+ str(timestart_year)+ '-'+ str(timestart_month_nr)+\\\n '-'+ str(timestart_day)+' '+ str(timestart_time)\n\n return timeUnitNetCdf\n\n\n def grib2NetCdf_gradsTest(self):\n \"\"\"Test GRADS functionality by testing functions and creating a NetCdf file\"\"\"\n\n #Open file\n infile = self.gradsFileName\n\n ga = ganum.GaNum(Bin='grads', Echo=False, Window=True)#Starts the grads application\n #Depending on Grads version, bin is telling which grads executable to start\n #For 2.0a7 this is 'grads' and 'gradsdap'\n try:\n fh = ga.open(infile)\n except:\n raise Exception(\"Error: GRADS file does not exist: '\" + str(infile) + \"'.\")\n exit()\n\n #Printing metadata on screen\n qh_file = ga.query(\"file\")\n qh_dims = ga.query(\"dims\")\n\n self.pLogger.info(\"---------------------------------------------------------------------\")\n self.pLogger.info(\"File information:\")\n self.pLogger.info(qh_file)\n self.pLogger.info(\"Dimension information:\")\n self.pLogger.info(qh_dims)\n self.pLogger.info(\"---------------------------------------------------------------------\")\n\n\n #Create one netCDF-file of specific variable by using GRADS commands\n ga(\"set z 1\")\n ga(\"set t 1 last\")\n\n ga(\"display gsw\")\n ga(\"define out = gsw\")\n ga(\"set sdfwrite output_file_GRADS_gsw.nc\")\n ga(\"sdfwrite out\")\n\n raw_input(\"Press Enter to terminate.\") #Wait\n\n del ga\n\n return\n\n\n #Data specific functions\n #-------------------------------------------------------------------------------\n\n def completeDataVariables(self):\n \"\"\"Complete missing data variable value modification manually\n\n Example: Scale data values in case that units prefix have to be changed\n (e.g. from hPa to Pa) due to defined unit in standard_name entry.\"\"\"\n\n pGradsData = numpy.load(self.numpyDataName)\n\n #Scale of data. Here: data is in hPa, must be in Pa\n pGradsData = self.pProcessingTool.scaleNumpyDataVariable(pGradsData, 5, 100.0) #p_pbl\n pGradsData = self.pProcessingTool.scaleNumpyDataVariable(pGradsData, 7, 100.0) #ps\n pGradsData = self.pProcessingTool.scaleNumpyDataVariable(pGradsData, 8, 100.0) #psl\n\n numpy.save(self.numpyDataName, pGradsData) #Better then 'tofile'. Also possible: 'dump'\n\n return\n\n\n def completeMetadataNcml(self):\n \"Complete missing data in NCML XML file manually\"\n\n self.pProcessNcml.changeGlobalAttribute('source', 'value', 'No information available')\n self.pProcessNcml.changeGlobalAttribute('references', 'value', 'No information available')\n self.pProcessNcml.changeGlobalAttribute('comment', 'value', 'No information available')\n\n self.pProcessNcml.changeLocalAttribute(str(self.pDefaultSettings.axisHeightName), 'units', 'value', '1') #'Level' is not conform to udunits!\n self.pProcessNcml.changeLocalAttribute(str(self.pDefaultSettings.axisHeightName), 'long_name', 'value', 'level')\n###############Define Standard Name!\n #self.pProcessNcml.changeLocalAttribute(str(self.pDefaultSettings.axisHeightName), 'standard_name', 'value', '???')\n self.pProcessNcml.removeLocalAttribute(str(self.pDefaultSettings.axisHeightName), 'standard_name')\n\n self.pProcessNcml.changeLocalAttribute('pblh', 'units', 'value', 'm')\n self.pProcessNcml.changeLocalAttribute('pblh', 'standard_name', 'value', 'atmosphere_boundary_layer_thickness')\n \n self.pProcessNcml.changeLocalAttribute('tpbl', 'units', 'value', 'K')\n self.pProcessNcml.changeLocalAttribute('tpbl', 'standard_name', 'value', 'tropopause_air_temperature')\n \n self.pProcessNcml.changeLocalAttribute('qpbl', 'units', 'value', 'kg kg-1')\n self.pProcessNcml.changeLocalAttribute('qpbl', 'standard_name', 'value', 'specific_humidity')\n \n self.pProcessNcml.changeLocalAttribute('upbl', 'units', 'value', 'm s-1')\n self.pProcessNcml.changeLocalAttribute('upbl', 'standard_name', 'value', 'x_wind')\n \n self.pProcessNcml.changeLocalAttribute('vpbl', 'units', 'value', 'm s-1')\n self.pProcessNcml.changeLocalAttribute('vpbl', 'standard_name', 'value', 'y_wind')\n \n self.pProcessNcml.changeLocalAttribute('p_pbl', 'units', 'value', 'Pa')\n self.pProcessNcml.changeLocalAttribute('p_pbl', 'standard_name', 'value', 'tropopause_air_pressure')\n \n self.pProcessNcml.changeLocalAttribute('q2', 'units', 'value', 'kg kg-1')\n self.pProcessNcml.changeLocalAttribute('q2', 'standard_name', 'value', 'surface_specific_humidity')\n \n self.pProcessNcml.changeLocalAttribute('ps', 'units', 'value', 'Pa')\n self.pProcessNcml.changeLocalAttribute('ps', 'standard_name', 'value', 'surface_air_pressure')\n \n self.pProcessNcml.changeLocalAttribute('psl', 'units', 'value', 'Pa')\n self.pProcessNcml.changeLocalAttribute('psl', 'standard_name', 'value', 'air_pressure_at_sea_level')\n \n self.pProcessNcml.changeLocalAttribute('glw', 'units', 'value', 'W m-2')\n self.pProcessNcml.changeLocalAttribute('glw', 'standard_name', 'value', 'atmosphere_net_rate_of_absorption_of_longwave_energy')\n \n self.pProcessNcml.changeLocalAttribute('gsw', 'units', 'value', 'W m-2')\n self.pProcessNcml.changeLocalAttribute('gsw', 'standard_name', 'value', 'atmosphere_net_rate_of_absorption_of_shortwave_energy')\n \n return\n\n\n def completeMetadataNumpymeta(self):\n \"Complete missing data in metadata coordinate XML file manually\"\n #--> Nothing to complete at the moment\n return\n\n\n\n#_______________________________________________________________________________\n\ndef main():\n \"\"\"\n Main function.\n\n This function represents the user interface and is called when the\n program is executed. Start the program by executing it with the following\n statement in your shell: grads_2Interface.py --help\n \"\"\"\n\n startTime = time.time()\n pDefaultSettings = DefaultSettings()\n\n #Parser definition\n #-------------------------------------------------------------------------------\n pParser = OptionParser(usage=USAGE, version = VERSION, description = DESCRIPTION, epilog = EPILOG)\n\n pParser.set_defaults(completeModel = False)\n pParser.set_defaults(isDoc = False)\n pParser.set_defaults(logLevel = pDefaultSettings.loggerLevelConsole)\n pParser.set_defaults(nodataValue = NODATA)\n pParser.set_defaults(dataPath = pDefaultSettings.dataDirectory) \n pParser.set_defaults(dataType = NUMPYDATA_DTYPE)\n\n \n pParser.add_option(\"-c\", \"--complModel\", action=\"store_true\", dest='completeModel', help=\"Complete data model by functions particularly written for specific data (default = %default)\")\n pParser.add_option(\"-d\", \"--doc\", action=\"store_true\", dest='isDoc', help=\"Give more information by printing docstrings (default = %default)\")\n pParser.add_option('-l', '--log', action = 'store', dest='logLevel', choices = ['debug','info','warning','error','critical'], nargs = 1, help=\"Minimum level for printing information to the console (default = %default)\")\n pParser.add_option('-n', '--nodata', action = 'store', dest='nodataValue', nargs = 1, help=\"Set nodata value (default = %default)\")\n pParser.add_option('-p', '--path', action = 'store', type ='string', dest='dataPath', nargs = 1, help=\"Directory for input / output files (default = %default)\")\n pParser.add_option('-s', '--specData', action = 'store', dest='specificData', nargs = 2, help=\"Only extract specific data as implemented in function 'choseSpecificData' \\\n between DATASTART (arg1) and DATASTOP (arg2)\") #(default = %default)\")\n pParser.add_option('-t', '--dtype', action = 'store', dest='dataType', choices = [''] + NUMPY_DTYPES, nargs = 1, help=\"Define output data type of numpy array (default = %default)\")\n \n (options, args) = pParser.parse_args()\n\n\n #Initialize logger\n #-------------------------------------------------------------------------------\n pLog = LoggingInterface(MODULE_LOGGER_ROOT, options.logLevel, pDefaultSettings.loggerLevelFile) #Instance is necessary although if not used.\n pLogger = logging.getLogger(MODULE_LOGGER_ROOT+\".\"+__name__)\n pLogger.info(\"_____________________________________________________________________________________________\")\n pLogger.info(\"Starting program 'GRADS2INTERFACE' version '\" + str(__version__) + \"' from '\" + str(__date__) + \"':\")\n\n\n try:\n\n #Parse command line arguments and options\n #-------------------------------------------------------------------------------\n if len(args) != 2:\n pLogger.error(\"Parser error occured. See error messages on the screen.\")\n pParser.error(\"Incorrect number of arguments. Two arguments 'operation' and 'data' are nedded. \" \\\n +str(len(args))+\" arguments are given. Execute '%prog --help' for more information\")\n else:\n #args = sys.argv[1:]#sys.argv[0] is name of program being executed\n operation_ = args[0]\n infile_ = args[1]\n\n\n #Process parser options\n #-------------------------------------------------------------------------------\n if options.isDoc:\n pLogger.info(__doc__)\n sys.exit(0)\n\n dataPath = options.dataPath\n if not dataPath.endswith('/') and dataPath != '': #Adds '/' to path in case that this is not the case\n dataPath = dataPath+'/'\n infileName = dataPath+infile_ #Add path of data directory to filename\n\n\n #Run program\n #-------------------------------------------------------------------------------\n pControlModelGrads = ControlModelGrads(infileName, options)\n\n if operation_ == 'grads2Model':\n pLogger.info(\"Operation: Convert GRADS to data model\")\n pControlModelGrads.writeGradsNumpyData() #Write numpy data array\n pControlModelGrads.writeGradsMetadata() #Write metadata\n\n if options.completeModel:#optional\n pControlModelGrads.completeDataModelManually() #Complete data model manually\n\n elif operation_ == 'printGrads':\n pLogger.info(\"Operation: Print GRADS data on the screen\")\n pControlModelGrads.printGradsMetadata()\n\n elif operation_ == 'testGrads':\n pLogger.info(\"Operation: Test GRADS functionalities\")\n pControlModelGrads.testGradsFunctionality()\n\n else:\n pLogger.error(\"Parser error: Operation '\" + str(operation_) + \"' is unknown.\")\n pParser.error(\"Operation '\" + str(operation_) + \"' is unknown.\") #System exit code 2\n\n\n except Exception: #If Exceptiation occured in this module or all connected sub-modules\n pLogger.exception('Exception Error occured: ')\n raise\n\n finally:\n pLogger.info(\"Finished. Total processing time [s]: '\" + str(time.time() - startTime) + \"'.\")\n pLogger.info(\"_____________________________________________________________________________________________\")\n pLog.__del__()\n \n #pControlModelGrads.__del__()\n\n \nif __name__ == \"__main__\":\n main()\n","sub_path":"samples/grads_2Interface.py","file_name":"grads_2Interface.py","file_ext":"py","file_size_in_byte":35013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"356548249","text":"\"\"\"\n* user: VR432075\n* fname: BUSATTO\n* lname: ALESSANDRO\n* task: biancaneve\n* score: 2.0\n* date: 2019-06-25 10:43:37.634220\n\"\"\"\nfrom __future__ import print_function\nimport sys\nif sys.version_info<(3,0):\n input=raw_input\n\n\ndef scambia(nani,p1,p2):\n x=nani[p1-1]\n nani[p1-1]=nani[p2-1]\n nani[p2-1]=x\n\ndef check(nani, h1, h2):\n num_nani=h2-h1+1\n total=0\n y=len(nani)+1\n prefix_sum=[0]*y\n \n for i in range(0,y-1):\n prefix_sum[i+1]=prefix_sum[i]+nani[i]\n\n for i in range(h1,h2+1):\n total += i\n i=len(prefix_sum)-1\n while i-num_nani >= 0 and prefix_sum[i]>=total:\n if(prefix_sum[i]-prefix_sum[i-num_nani]==total):\n return 1\n i=i-1\n return 0\n\n\ndef main():\n #r1=input()\n #split=r1.split()\n #n=int(split[0])\n #m=int(split[1])\n \n #disp_nani=input()\n #nani=int(disp_nani.split())\n n, m = map(int, input().split()) \n nani = map(int, input().split())\n for i in range(0,m):\n t, p1, p2 = map(int, input().split())\n #r=input()\n #r_split=r.split()\n #t=int(r_split[0])\n #p1=int(r_split[1])\n #p2=int(r_split[2])\n if t==1:\n scambia(nani,p1,p2)\n else:\n res=check(nani,p1,p2)\n if res==1:\n print(\"YES\")\n else:\n print(\"NO\")\n\nif __name__ == '__main__':\n main()","sub_path":"Algoritmi/2019-06-25/all-CMS-submissions/2019-06-25.10:43:37.634220.VR432075.biancaneve.py","file_name":"2019-06-25.10:43:37.634220.VR432075.biancaneve.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"337441097","text":"import random\ndef di_demo(myfile):\n print(myfile)\n fin = open(myfile,\"r\")\n mydi = dict()\n for fi in fin:\n mydi[fi] = random.randint(0,999999)\n return mydi\n\nx = input(\"Enter file name to create dictionary:\")\nprint(di_demo(x))\n","sub_path":"LAB3TASK9.py","file_name":"LAB3TASK9.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"114195257","text":"# credit to: http://zxi.mytechroad.com/blog/dynamic-programming/688-knight-probability-in-chessboard/\nclass Solution:\n def knightProbability(self, N: int, K: int, r: int, c: int) -> float:\n dp0 = [[0] * N for _ in range(N)]\n dp0[r][c] = 1\n for k in range(1, K+1):\n dp1 = [[0] * N for _ in range(N)]\n for i in range(N):\n for j in range(N):\n sum_ = 0\n for di, dj in zip([-2,-1,1,2,2,1,-1,-2], [-1,-2,-2,-1,1,2,2,1]):\n new_i = di + i\n new_j = dj + j\n if new_i in range(N) and new_j in range(N):\n dp1[i][j] += dp0[new_j][new_i]\n dp1, dp0 = dp0, dp1\n\n total = 0\n for i in range(N):\n for j in range(N):\n total += dp0[i][j]\n return total / 8 ** K\n \nclass Solution(object):\n def knightProbability(self, N, K, r, c):\n \"\"\"\n :type N: int\n :type K: int\n :type r: int\n :type c: int\n :rtype: float\n \"\"\"\n \n dp = [[[0] * N for _ in range(N)] for _ in range(K+1)]\n \n for row in range(N):\n for col in range(N):\n dp[0][row][col] = 1\n \n for k in range(1, K+1):\n for row in range(N):\n for col in range(N):\n \n for dr, dc in zip([-2, -1, 1, 2, 2, 1, -1, -2], [-1, -2, -2, -1, 1, 2, 2, 1]):\n nrow = row + dr\n ncol = col + dc\n if nrow in range(0, N) and ncol in range(0, N):\n dp[k][row][col] += dp[k-1][nrow][ncol]\n\n return float(dp[K][r][c]) / 8 ** K\n \n \n","sub_path":"python/688 Knight Probability in Chessboard.py","file_name":"688 Knight Probability in Chessboard.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"26384254","text":"\nclass Stack:\n def __init__(self):\n self.elements = []\n\n def push(self, ele):\n self.elements.append(ele)\n\n def pop(self):\n if self.empty():\n return None\n\n return self.elements.pop(len(self.elements) - 1)\n\n def empty(self):\n return len(self.elements) == 0\n\n\n# Processed vertex uses 0 indexed scheme\nclass Vertex:\n def __init__(self, id, adjVertices):\n self.id = id\n self.adjVertices = adjVertices\n\n def __str__(self):\n vertexStr = str(self.id) + \":\"\n for vert in self.adjVertices:\n vertexStr += \" \" + str(vert)\n return vertexStr\n\n\n# Raw vertex data uses 1 indexed scheme\ndef readGraphData(fileName, numVertices):\n graphData = [Vertex(i, []) for i in range(numVertices)]\n revGraphData = [Vertex(i, []) for i in range(numVertices)]\n with open(fileName) as f:\n for line in f:\n edge = line.split()\n start = int(edge[0]) - 1\n end = int(edge[1]) - 1\n # Ignore self connected edges\n if start == end:\n continue\n\n graphData[start].adjVertices.append(end)\n revGraphData[end].adjVertices.append(start)\n\n f.close()\n\n return graphData, revGraphData\n\n\ndef findStronglyConnectedGraph(graphData, revGraphData):\n numVertices = len(graphData)\n visited = {}\n for i in range(numVertices):\n visited[i] = False\n\n # First pass of DFS to do a topological sort to figure out\n # meta-graph info, which contains post order of each vertex\n # and topological ordering of each SCC.\n subDFSOrderStack = Stack()\n for i in range(numVertices):\n subDFSOrder = dfsI(revGraphData, i, visited)\n if len(subDFSOrder) > 0:\n subDFSOrderStack.push(subDFSOrder)\n\n # Reconstruct first DFS order of the first pass DFS\n insertionId = 0\n firstDFSOrder = [None] * numVertices\n while not subDFSOrderStack.empty():\n subDFSOrder = subDFSOrderStack.pop()\n newInsertionId = insertionId + len(subDFSOrder)\n firstDFSOrder[insertionId:newInsertionId] = subDFSOrder\n insertionId = newInsertionId\n\n # Reset visited record\n for i in range(numVertices):\n visited[i] = False\n\n sccSizes = []\n # Second pass of DFS to find out all SCCs\n for vertId in firstDFSOrder:\n size = dfsII(graphData, vertId, visited)\n if size > 0:\n sccSizes.append(size)\n\n return sccSizes\n\n\ndef dfsI(revGraphData, initialVertId, visited):\n if visited[initialVertId]:\n return []\n\n path = []\n stack = Stack()\n stack.push(initialVertId)\n while not stack.empty():\n vertId = stack.pop()\n # Important!!! It is possible traversing a cyclic graph\n # may result in pushing duplicated nodes into the stack/queue.\n if visited[vertId]:\n continue\n\n path.append(vertId)\n visited[vertId] = True\n vertex = revGraphData[vertId]\n for adjVertId in vertex.adjVertices:\n if not visited[adjVertId]:\n stack.push(adjVertId)\n\n return path\n\n\ndef dfsII(graphData, initialVertId, visited):\n if visited[initialVertId]:\n return 0\n\n nodeCounter = 0\n stack = Stack()\n stack.push(initialVertId)\n while not stack.empty():\n vertId = stack.pop()\n # Important!!! It is possible traversing a cyclic graph\n # may result in pushing duplicated nodes into the stack/queue.\n if visited[vertId]:\n continue\n\n visited[vertId] = True\n vertex = graphData[vertId]\n for adjVertId in vertex.adjVertices:\n if not visited[adjVertId]:\n stack.push(adjVertId)\n\n nodeCounter += 1\n\n return nodeCounter\n\n\ngraphData, revGraphData = readGraphData(\"./SCC2\", 9)\nsizes = findStronglyConnectedGraph(graphData, revGraphData)\nsizes.sort(reverse=True)\nprint(sizes[:5])\n","sub_path":"Py_solution/StrongConnectedGraph.py","file_name":"StrongConnectedGraph.py","file_ext":"py","file_size_in_byte":3920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"463483317","text":"import datetime\n\nfrom django.contrib.auth.models import Group\n\nfrom api.models import *\nfrom rest_framework import serializers\nfrom django.template.defaultfilters import slugify\n\n\nclass SkillSerializer(serializers.ModelSerializer):\n class Meta:\n model = Skill\n fields = ('id', 'label')\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = '__all__'\n\nclass PortfolioSerializer(serializers.ModelSerializer):\n skills = serializers.PrimaryKeyRelatedField(queryset=Skill.objects.all(), many=True)\n class Meta:\n model = Portfolio\n fields = ('user', 'about', 'education', 'experience', 'phone', 'skills')\n extra_kwargs = {'user': {'required': False}}\n\n def create(self, validated_data):\n user = self.context['request'].user\n validated_data['user'] = user\n return super(PortfolioSerializer, self).create(validated_data)\n\n# this should work (but i'm not sure)\nclass ProjectSerializer(serializers.ModelSerializer):\n creator = UserSerializer(read_only=True)\n class Meta:\n model = Project\n fields = ('id', 'name', 'description', 'stage',\n 'chatRoom', 'creator', 'endDate', 'startDate')\n extra_kwargs = {\n 'chatRoom': {\n 'read_only': True\n }\n }\n def create(self, validated_data):\n return Project.objects.create(\n creator=self.context['request'].user, **validated_data,\n chatRoom=slugify(str(self.context['request'].user)+'-'+validated_data['name'])\n )\n\nclass TaskSerializer(serializers.ModelSerializer):\n developers = serializers.PrimaryKeyRelatedField(many=True, queryset=User.objects.all())\n class Meta:\n model = Task\n fields = ('id', 'description', 'deadline', 'stage', 'project', 'developers')\n\nclass CommentSerializer(serializers.ModelSerializer):\n user = UserSerializer(read_only=True)\n class Meta:\n model = Comment\n fields = '__all__'\n def create(self, validated_data):\n return Comment.objects.create(\n user = self.context['request'].user, **validated_data\n )\n\nclass ApplicationSerializer(serializers.ModelSerializer):\n user = UserSerializer(read_only=True)\n class Meta:\n model = Application\n fields = '__all__'\n\n def create(self, validated_data):\n return Application.objects.create(\n user=self.context['request'].user, **validated_data\n )\n\nclass UserSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = User\n fields = (\"password\", \"email\", \"first_name\", \"last_name\")\n extra_kwargs = {'password': {'write_only': True}, }\n\n\nclass ProfileSerializer(serializers.ModelSerializer):\n user = UserSerializer()\n\n class Meta:\n model= Profile\n fields = '__all__'\n\n def create(self, validated_data):\n user_data = validated_data.pop('user')\n username = user_data[\"email\"]\n role = validated_data.pop('role')\n if role not in Profile.RoleValues.values:\n raise str(\"Unsupported role exeption. Only %s supported. You set: '%s'.\", Profile.RoleValues.values, role)\n user = User(username=username, **user_data)\n user.set_password(user_data[\"password\"])\n user.save()\n profile = Profile.objects.create(user=user, role=role)\n group, created = Group.objects.get_or_create(name=role)\n user.groups.add(group)\n return profile\n\n\n","sub_path":"api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"489238266","text":"# !/usr/bin/python\r\n\r\nimport re, os\r\n\r\ndef words_between():\r\n file_path = os.path.join(os.getcwd(),\"dataset.sentences\")\r\n labelsf = open('dataset.labels', 'r+')\r\n \r\n outputf = open('dataset.words_between.csv', 'w')\r\n outputf.write(\"words\\tlabel\\n\")\r\n \r\n with open(file_path,'r+') as sentencesf:\r\n for line in sentencesf:\r\n # Find all the words between PROTX1 and PROTX2 and vice versa\r\n result12=re.findall(\"(?<=PROTX1).*(?=PROTX2)\", line)\r\n result21=re.findall(\"(?<=PROTX2).*(?=PROTX1)\", line)\r\n # Read the label of the sentence\r\n label=labelsf.readline()\r\n # Write the words in between and the label separated by a tab\r\n if result12:\r\n outputf.write(\"\\\"\"+result12[0]+\"\\\"\\t\"+label)\r\n else:\r\n if result21:\r\n outputf.write(\"\\\"\"+result21[0]+\"\\\"\\t\"+label)\r\n else:\r\n outputf.write(\"\\\"\\\"\\t\"+label)\r\n \r\n sentencesf.close()\r\n labelsf.close()\r\n outputf.close()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n words_between()","sub_path":"baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"91399915","text":"# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Interactive prompt to run advanced commands and sub-processes.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\ntry:\n unicode\nexcept NameError:\n unicode = str\n unichr = chr\n\nimport os\nimport re\nimport subprocess\n\nimport app.controller\n\n\ndef functionTestEq(a, b):\n assert a == b, u\"%r != %r\" % (a, b)\n\n\nif 1:\n # Break up a command line, separate by |.\n kRePipeChain = re.compile(\n #r'''\\|\\|?|&&|((?:\"(?:\\\\\"|[^\"])*\"|'(?:\\\\'|[^'])*'|[^\\s|&]+)+)''')\n r'''((?:\"(?:\\\\\"|[^\"])*\"|'(?:\\\\'|[^'])*'|\\|\\||[^|]+)+)''')\n functionTestEq(\n kRePipeChain.findall(''' date \"a b\" 'c d ' | sort '''),\n [\"\"\" date \"a b\" 'c d ' \"\"\", ' sort '])\n functionTestEq(kRePipeChain.findall('date'), ['date'])\n functionTestEq(kRePipeChain.findall('d-a.te'), ['d-a.te'])\n functionTestEq(kRePipeChain.findall('date | wc'), ['date ', ' wc'])\n functionTestEq(kRePipeChain.findall('date|wc'), ['date', 'wc'])\n functionTestEq(kRePipeChain.findall('date && sort'), ['date && sort'])\n functionTestEq(kRePipeChain.findall('date || sort'), ['date || sort'])\n functionTestEq(\n kRePipeChain.findall('''date \"a b\" 'c d ' || sort'''),\n [\"\"\"date \"a b\" 'c d ' || sort\"\"\"])\n\n# Break up a command line, separate by &&.\nkReLogicChain = re.compile(\n r'''\\s*(\\|\\|?|&&|\"(?:\\\\\"|[^\"])*\"|'(?:\\\\'|[^'])*'|[^\\s|&]+)''')\nfunctionTestEq(kReLogicChain.findall('date'), ['date'])\nfunctionTestEq(kReLogicChain.findall('d-a.te'), ['d-a.te'])\nfunctionTestEq(kReLogicChain.findall('date | wc'), ['date', '|', 'wc'])\nfunctionTestEq(kReLogicChain.findall('date|wc'), ['date', '|', 'wc'])\nfunctionTestEq(kReLogicChain.findall('date && sort'), ['date', '&&', 'sort'])\nfunctionTestEq(kReLogicChain.findall('date || sort'), ['date', '||', 'sort'])\nfunctionTestEq(\n kReLogicChain.findall(''' date \"a\\\\\" b\" 'c d ' || sort '''),\n ['date', '\"a\\\\\" b\"', \"'c d '\", '||', 'sort'])\n\n# Break up a command line, separate by \\\\s.\nkReArgChain = re.compile(r'''\\s*(\"(?:\\\\\"|[^\"])*\"|'(?:\\\\'|[^'])*'|[^\\s]+)''')\nfunctionTestEq(kReArgChain.findall('date'), ['date'])\nfunctionTestEq(kReArgChain.findall('d-a.te'), ['d-a.te'])\nfunctionTestEq(\n kReArgChain.findall(''' date \"a b\" 'c d ' \"a\\\\\" b\" 'c\\\\' d ' '''),\n ['date', '\"a b\"', \"'c d '\", '\"a\\\\\" b\"', \"'c\\\\' d '\"])\nfunctionTestEq(kReArgChain.findall('''bm +'''), ['bm', '+'])\n\n# Break up a command line, separate by \\w (non-word chars will be separated).\nkReSplitCmdLine = re.compile(\n r\"\"\"\\s*(\"(?:\\\\\"|[^\"])*\"|'(?:\\\\'|[^'])*'|\\w+|[^\\s]+)\\s*\"\"\")\nfunctionTestEq(kReSplitCmdLine.findall('''bm ab'''), ['bm', 'ab'])\nfunctionTestEq(kReSplitCmdLine.findall('''bm+'''), ['bm', '+'])\nfunctionTestEq(kReSplitCmdLine.findall('''bm \"one two\"'''), ['bm', '\"one two\"'])\nfunctionTestEq(\n kReSplitCmdLine.findall('''bm \"o\\\\\"ne two\"'''), ['bm', '\"o\\\\\"ne two\"'])\n\n# Unquote text.\nkReUnquote = re.compile(r'''([\"'])([^\\1]*)\\1''')\nfunctionTestEq(kReUnquote.sub('\\\\2', 'date'), 'date')\nfunctionTestEq(kReUnquote.sub('\\\\2', '\"date\"'), 'date')\nfunctionTestEq(kReUnquote.sub('\\\\2', \"'date'\"), 'date')\nfunctionTestEq(kReUnquote.sub('\\\\2', \"'da\\\\'te'\"), \"da\\\\'te\")\nfunctionTestEq(kReUnquote.sub('\\\\2', '\"da\\\\\"te\"'), 'da\\\\\"te')\n\n\nclass InteractivePrompt(app.controller.Controller):\n \"\"\"Extended commands prompt.\"\"\"\n\n def __init__(self, view):\n app.controller.Controller.__init__(self, view, u\"prompt\")\n\n def setTextBuffer(self, textBuffer):\n app.controller.Controller.setTextBuffer(self, textBuffer)\n self.textBuffer = textBuffer\n self.commands = {\n u'bm': self.bookmarkCommand,\n u'build': self.buildCommand,\n u'cua': self.changeToCuaMode,\n u'emacs': self.changeToEmacsMode,\n u'make': self.makeCommand,\n u'open': self.openCommand,\n #u'split': self.splitCommand, # Experimental wip.\n u'vim': self.changeToVimNormalMode,\n }\n self.filters = {\n u'format': self.formatCommand,\n u'lower': self.lowerSelectedLines,\n u'numEnum': self.assignIndexToSelectedLines,\n u's': self.substituteText,\n u'sort': self.sortSelectedLines,\n u'sub': self.substituteText,\n u'upper': self.upperSelectedLines,\n u'wrap': self.wrapSelectedLines,\n }\n self.subExecute = {\n u'!': self.shellExecute,\n u'|': self.pipeExecute,\n }\n\n def bookmarkCommand(self, cmdLine, view):\n args = kReSplitCmdLine.findall(cmdLine)\n if len(args) > 1 and args[1][0] == u'-':\n if self.view.host.textBuffer.bookmarkRemove():\n return {}, u'Removed bookmark'\n else:\n return {}, u'No bookmarks to remove'\n else:\n self.view.host.textBuffer.bookmarkAdd()\n return {}, u'Added bookmark'\n\n def buildCommand(self, cmdLine, view):\n return {}, u'building things'\n\n def changeToCuaMode(self, cmdLine, view):\n return {}, u'CUA mode'\n\n def changeToEmacsMode(self, cmdLine, view):\n return {}, u'Emacs mode'\n\n def changeToVimNormalMode(self, cmdLine, view):\n return {}, u'Vim normal mode'\n\n def focus(self):\n app.log.info(u'InteractivePrompt.focus')\n self.textBuffer.selectionAll()\n\n def formatCommand(self, cmdLine, lines):\n formatter = {\n #\".js\": app.format_javascript.format\n #\".py\": app.format_python.format\n #\".html\": app.format_html.format,\n }\n\n def noOp(data):\n return data\n\n fileName, ext = os.path.splitext(self.view.host.textBuffer.fullPath)\n app.log.info(fileName, ext)\n data = formatter.get(ext,\n noOp)(self.view.host.textBuffer.parser.data)\n lines = data.split(u\"\\n\")\n\n return lines, u'Changed %d lines' % (len(lines),)\n\n def makeCommand(self, cmdLine, view):\n return {}, u'making stuff'\n\n def openCommand(self, cmdLine, view):\n \"\"\"\n Opens the file under cursor.\n \"\"\"\n args = kReArgChain.findall(cmdLine)\n app.log.info(args)\n if len(args) == 1:\n # If no args are provided, look for a path at the cursor position.\n view.textBuffer.openFileAtCursor()\n return {}, view.textBuffer.message[0]\n # Try the raw path.\n path = args[1]\n if os.access(path, os.R_OK):\n return self.openFile(path, view)\n # Look in the same directory as the current file.\n path = os.path.join(os.path.dirname(view.textBuffer.fullPath), args[1])\n if os.access(path, os.R_OK):\n return self.openFile(path, view)\n return {}, u\"Unable to open \" + args[1]\n\n def openFile(self, path, view):\n textBuffer = view.program.bufferManager.loadTextBuffer(path)\n inputWindow = self.currentInputWindow()\n inputWindow.setTextBuffer(textBuffer)\n self.changeTo(inputWindow)\n inputWindow.setMessage('Opened file {}'.format(path))\n\n def splitCommand(self, cmdLine, view):\n view.splitWindow()\n return {}, u'Split window'\n\n def execute(self):\n try:\n cmdLine = self.textBuffer.parser.data\n if not len(cmdLine):\n self.changeToHostWindow()\n return\n tb = self.view.host.textBuffer\n lines = list(tb.getSelectedText())\n if cmdLine[0] in self.subExecute:\n data = self.view.host.textBuffer.parser.data.encode('utf-8')\n output, message = self.subExecute.get(cmdLine[0])(cmdLine[1:],\n data)\n if app.config.strict_debug:\n assert isinstance(output, bytes)\n assert isinstance(message, unicode)\n tb.editPasteLines(tuple(output.decode('utf-8').split(u\"\\n\")))\n tb.setMessage(message)\n else:\n cmd = re.split(u'\\\\W', cmdLine)[0]\n dataFilter = self.filters.get(cmd)\n if dataFilter:\n if not len(lines):\n tb.setMessage(\n u'The %s filter needs a selection.' % (cmd,))\n else:\n lines, message = dataFilter(cmdLine, lines)\n tb.setMessage(message)\n if not len(lines):\n lines.append(u'')\n tb.editPasteLines(tuple(lines))\n else:\n command = self.commands.get(cmd, self.unknownCommand)\n message = command(cmdLine, self.view.host)[1]\n tb.setMessage(message)\n except Exception as e:\n app.log.exception(e)\n tb.setMessage(u'Execution threw an error.')\n self.changeToHostWindow()\n\n def shellExecute(self, commands, cmdInput):\n \"\"\"\n cmdInput is in bytes (not unicode).\n return tuple: output as bytes (not unicode), message as unicode.\n \"\"\"\n if app.config.strict_debug:\n assert isinstance(commands, unicode), type(commands)\n assert isinstance(cmdInput, bytes), type(cmdInput)\n try:\n process = subprocess.Popen(\n commands,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n shell=True)\n return process.communicate(cmdInput)[0], u''\n except Exception as e:\n return u'', u'Error running shell command\\n' + e\n\n def pipeExecute(self, commands, cmdInput):\n \"\"\"\n cmdInput is in bytes (not unicode).\n return tuple: output as bytes (not unicode), message as unicode.\n \"\"\"\n if app.config.strict_debug:\n assert isinstance(commands, unicode), type(commands)\n assert isinstance(cmdInput, bytes), type(cmdInput)\n chain = kRePipeChain.findall(commands)\n try:\n process = subprocess.Popen(\n kReArgChain.findall(chain[-1]),\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n if len(chain) == 1:\n return process.communicate(cmdInput)[0], u''\n else:\n chain.reverse()\n prior = process\n for i in chain[1:]:\n prior = subprocess.Popen(\n kReArgChain.findall(i),\n stdin=subprocess.PIPE,\n stdout=prior.stdin,\n stderr=subprocess.STDOUT)\n prior.communicate(cmdInput)\n return process.communicate()[0], u''\n except Exception as e:\n app.log.exception(e)\n return b'', u'Error running shell command\\n' + unicode(e)\n\n def info(self):\n app.log.info(u'InteractivePrompt command set')\n\n def lowerSelectedLines(self, cmdLine, lines):\n lines = [line.lower() for line in lines]\n return lines, u'Changed %d lines' % (len(lines),)\n\n def assignIndexToSelectedLines(self, cmdLine, lines):\n output = []\n for i, line in enumerate(lines):\n output.append(u\"%s = %d\" % (line, i))\n return output, u'Changed %d lines' % (len(output),)\n\n def sortSelectedLines(self, cmdLine, lines):\n lines.sort()\n return lines, u'Changed %d lines' % (len(lines),)\n\n def substituteText(self, cmdLine, lines):\n if len(cmdLine) < 2:\n return (lines, u'''tip: %s/foo/bar/ to replace 'foo' with 'bar'.'''\n % (cmdLine,))\n if not lines:\n return lines, u'No text was selected.'\n sre = re.match('\\w+(\\W)', cmdLine)\n if not sre:\n return (lines, u'''Separator punctuation missing, example:'''\n u''' %s/foo/bar/''' % (cmdLine,))\n separator = sre.groups()[0]\n try:\n _, find, replace, flags = cmdLine.split(separator, 3)\n except ValueError:\n return (lines, u'''Separator punctuation missing, there should be'''\n u''' three '%s'.''' % (separator,))\n data = self.view.host.textBuffer.parser.data\n output = self.view.host.textBuffer.findReplaceText(\n find, replace, flags, data)\n lines = output.split(u\"\\n\")\n return lines, u'Changed %d lines' % (len(lines),)\n\n def upperSelectedLines(self, cmdLine, lines):\n lines = [line.upper() for line in lines]\n return lines, u'Changed %d lines' % (len(lines),)\n\n def unknownCommand(self, cmdLine, view):\n self.view.host.textBuffer.setMessage(u'Unknown command')\n return {}, u'Unknown command %s' % (cmdLine,)\n\n def wrapSelectedLines(self, cmdLine, lines):\n tokens = cmdLine.split()\n app.log.info(\"tokens\", tokens)\n width = 80 if len(tokens) == 1 else int(tokens[1])\n indent = len(lines[0]) - len(lines[0].lstrip())\n width -= indent\n lines = app.curses_util.wrapLines(lines, u\" \" * indent, width)\n return lines, u'Changed %d lines' % (len(lines),)\n","sub_path":"app/interactive_prompt.py","file_name":"interactive_prompt.py","file_ext":"py","file_size_in_byte":13830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"90869773","text":"import igraph as ig\nimport numpy as np\nimport pandas as pd\nimport time\nimport sys\nfrom multiprocessing import Pool, cpu_count\n\nimport random\nimport math\n\nfrom typing import NewType, Set\nfrom pathlib import Path\n\nfrom tqdm import tqdm\nfrom HighGraphPreprocessing import HighGraphPreprocessing\nfrom NeighborManager import NeighborManager\nfrom LayerManager import LayerManager\nfrom ComponentCollector import ComponentCollector\nfrom ReachabilityEstimator import ReachabilityEstimator\nfrom SizeEstimation import SizeEstimation\nfrom Statistics import Statistics\nfrom GraphUtils import GraphUtils as gu\n#from utils.metrics import *\n\nfrom globals import *\n\nGraph = NewType(\"Graph\", ig.GraphBase)\nNode = NewType(\"Node\", ig.Vertex)\nNodesSet = NewType(\"NodesSet\", Set[ig.Vertex])\n\nclass Sampler:\n def init(self, dataset_name=\"Epinions\", loaded_graph=None):\n dataset = None\n for ds in datasets:\n if dataset_name == ds.title:\n dataset = ds\n break\n if dataset is None:\n raise ValueError(\"No dataset with this name\")\n\n self.title = dataset_name\n self.dataset = dataset\n path, sep, directed = dataset.path, dataset.sep, dataset.directed\n print(\"Reading graph: \", self.title, \"from\", path)\n self.layer_num = 2\n\n if loaded_graph is None:\n g, in_degrees = gu.load_graph(path, sep, directed=directed)\n else:\n g, in_degrees = loaded_graph\n g.to_undirected()\n print(\"Taking giant component...\")\n g = g.components().giant()\n # the actual size will not be used at any point during the algorithm\n # (used only for evaluation).\n self.actual_graph_size = len(g.vs)\n print(\"Number of nodes:\", self.actual_graph_size)\n self.graph = g\n self.initialized = True\n self.total_num_samples = 0\n self.containment_probs = None\n\n def generate_L0(self, L0_method: str = \"greedy\", L0_size: int = -1):\n assert self.initialized\n self.high_subgraph = HighGraphPreprocessing(self.graph)\n self.high_subgraph.set_method(L0_method)\n if L0_size <= 0:\n L0_size = nums_L0[self.title]\n self.high_subgraph.get_high_nodes(L0_size, np.random.choice(self.graph.vs))\n self.L0_generated = True\n self.L0_size = L0_size\n self.L1_size = len(self.high_subgraph.L1_set)\n self.L0_L1_size = self.L0_size + self.L1_size\n # note the the actual L2_size will not be used anywhere in the algorithm (it\n # is only used for evaluation purposes)\n self.actual_L2_size = self.actual_graph_size - self.L0_L1_size\n self.neighbor_manager = NeighborManager(self.high_subgraph)\n print(\"Built L0. Total queries:\", self.neighbor_manager.query_counter)\n\n self.layer_manager = LayerManager(self.graph,\n self.high_subgraph,\n self.neighbor_manager)\n self.layers = self.layer_manager.get_layers()\n self.component_collector = ComponentCollector(self.graph,\n self.high_subgraph,\n self.neighbor_manager,\n self.layer_manager,\n self.layer_num,\n with_in_layer_edges=False)\n self.reachability_estimator = ReachabilityEstimator(self.high_subgraph,\n self.neighbor_manager,\n self.layer_manager,\n self.component_collector,\n self.layer_num)\n self.size_estimator = SizeEstimation(self.high_subgraph,\n self.neighbor_manager,\n self.layer_manager,\n self.component_collector,\n self.reachability_estimator)\n self.statistics = Statistics(self.graph,\n self.high_subgraph,\n self.neighbor_manager,\n self.layer_manager,\n self.reachability_estimator,\n self.component_collector)\n\n def L2_reach_step(self, with_updating=True, preprocessing=False):\n update_flag = with_updating and not self.frozen\n next_node_lst = self.component_collector.sample_component_nodes_no_rejection(1, False)\n self.reached_nodes += next_node_lst\n if update_flag:\n self.size_estimator.update_down(next_node_lst)\n node_reachability = self.reachability_estimator.get_reachability(next_node_lst[0],\n layer_num=self.layer_num)\n self.reachabilities.append(node_reachability)\n if update_flag:\n self.reachability_estimator.update_observed_reachabilities([node_reachability])\n node_prob = random.random()\n self.node_probs.append(node_prob)\n self.query_counters.append(self.neighbor_manager.query_counter)\n\n if preprocessing:\n self.is_accepted.append(None)\n else:\n acceptance = self.is_reached_node_accepted(node_prob, node_reachability)\n self.is_accepted.append(acceptance)\n if acceptance:\n return next_node_lst[0]\n else:\n return None\n\n def is_reached_node_accepted(self, node_prob, node_reachability):\n return node_prob < self.baseline_reach / node_reachability\n\n\n def preprocess_L2(self, L1_num_samples: int = 100, L2_num_reaches: int = 10, allowed_error=0.01):\n assert self.L0_generated\n\n self.frozen = False\n\n nodes_from_L1 = random.sample(self.high_subgraph.L1_list, L1_num_samples)\n self.up_nodes = set(nodes_from_L1)\n self.size_estimator.update_up(nodes_from_L1)\n self.initial_query_counter = self.neighbor_manager.query_counter\n\n print(\"Sampled\", L1_num_samples, \"nodes from L1. Total queries:\",\n self.initial_query_counter)\n\n self.reached_nodes = []\n self.node_probs = []\n self.reachabilities = []\n self.query_counters = []\n self.is_accepted = []\n\n for _ in range(L2_num_reaches):\n self.L2_reach_step(preprocessing=True)\n\n self.update_estimates(allowed_error=allowed_error)\n for i in range(len(self.reached_nodes)):\n self.is_accepted[i] = self.is_reached_node_accepted(self.node_probs[i], self.reachabilities[i])\n\n print(\"Sampled\", L2_num_reaches, \"nodes from L2+ without rejection. Total queries:\",\n self.neighbor_manager.query_counter)\n\n self.L2_preprocessed = True\n\n def freeze(self):\n self.frozen = True\n self.neighbor_manager.stop_recording()\n\n def unfreeze(self):\n self.frozen= False\n self.neighbor_manager.resume_recording()\n\n\n def update_estimates(self, allowed_error=0.01):\n if not self.frozen:\n self.L2_size_estimation = self.size_estimator.estimate_size(self.L1_size)\n self.graph_size_estimation = self.L2_size_estimation + self.L0_L1_size\n self.estimated_fractions = np.array([self.L0_size, self.L1_size, self.L2_size_estimation],\n dtype=np.float) / self.graph_size_estimation\n self.reach_quantile = allowed_error / self.estimated_fractions[2]\n self.baseline_reach = self.reachability_estimator.estimate_baseline_reachability(self.reach_quantile)\n\n def sample_from_components(self):\n node = None\n while node is None:\n node = self.L2_reach_step()\n return node\n\n def sample_v2(self,\n num_samples: int = 1,\n num_additional_L1_samples: int = 5,\n allowed_error: float = 0.01,\n with_tqdm: bool = False):\n assert self.L2_preprocessed\n\n samples = []\n query_counters = []\n layer_numbers = range(3)\n layer_choices = np.random.choice(layer_numbers, num_samples, p=self.estimated_fractions)\n\n rng = range(num_samples)\n if with_tqdm:\n rng = tqdm(rng)\n for i in rng:\n if layer_choices[i] == 0:\n samples.append(random.choice(self.high_subgraph.L0_list))\n elif layer_choices[i] == 1:\n samples.append(random.choice(self.high_subgraph.L1_list))\n else:\n samples.append(self.sample_from_components())\n query_counters.append(self.neighbor_manager.query_counter)\n\n if not self.frozen:\n new_L1_samples = [self.sample_new_L1_node() for _ in range(num_additional_L1_samples)]\n new_L1_samples = [samp for samp in new_L1_samples if samp is not None]\n if len(new_L1_samples) > 0:\n self.size_estimator.update_up(new_L1_samples)\n\n self.update_estimates(allowed_error=allowed_error)\n\n if num_samples is 1:\n return samples[0], query_counters[0]\n else:\n return samples, query_counters\n\n def sample_new_L1_node(self):\n if len(self.up_nodes) >= self.L1_size:\n return None\n while True:\n node = random.choice(self.high_subgraph.L1_list)\n if node not in self.up_nodes:\n self.up_nodes.add(node)\n return node\n\n\n\n def sample_old(self, num_samples: int,\n allowed_error = 0.01,\n use_standard_mult_factor=False,\n standard_mult_factor = 0.05,\n standard_additive_factor = 100,\n use_extra_mult_factor=False,\n extra_mult_factor: float = 0.5,\n extra_additive_factor = 100,\n with_updating=True,\n with_printing=False):\n\n new_probs = np.array(np.random.random(num_samples), dtype=np.float)\n if self.containment_probs is None:\n self.containment_probs = new_probs\n else:\n self.containment_probs = np.concatenate([self.containment_probs, new_probs])\n\n self.total_num_samples += num_samples\n\n curr_stop = len(self.reached_nodes)\n while True:\n if use_standard_mult_factor:\n next_stop = int(curr_stop + math.ceil(curr_stop * standard_mult_factor))\n else: # use additive factor\n next_stop = curr_stop + standard_additive_factor\n\n curr_L2_size = self.size_estimator.estimate_size(self.L1_size)\n curr_graph_size = curr_L2_size + self.L0_L1_size\n if with_printing:\n print(\"Estimated L2+ size:\", curr_L2_size)\n curr_L2_fraction = float(curr_L2_size) / curr_graph_size\n num_required_L2_samples = int(np.count_nonzero(self.containment_probs < curr_L2_fraction))\n\n reach_quantile = allowed_error / curr_L2_fraction\n baseline_reach = self.reachability_estimator.estimate_baseline_reachability(reach_quantile)\n if with_printing:\n print(\"Estimated baseline reachability:\", baseline_reach)\n\n num_actual_L2_samples = np.count_nonzero(np.array(self.node_probs)\n < baseline_reach / np.array(self.reachabilities))\n if num_actual_L2_samples >= num_required_L2_samples:\n if with_printing:\n print(\"Achieved the L2+ initial goal. Now continuing some more steps for stabilization.\")\n break # At this point, we roughly made all required queries from L2+\n\n for counter in range(curr_stop, next_stop):\n self.L2_reach_step(with_updating=with_updating)\n\n curr_stop = next_stop\n\n finished = False\n if with_printing:\n print(\"Stabilization stage...\")\n while not finished:\n if use_extra_mult_factor:\n num_steps = int(extra_mult_factor * curr_stop)\n else:\n num_steps = extra_additive_factor\n for _ in range(num_steps):\n self.L2_reach_step(with_updating=with_updating)\n curr_L2_size = self.size_estimator.estimate_size(self.L1_size)\n curr_graph_size = curr_L2_size + self.L0_L1_size\n curr_L2_fraction = float(curr_L2_size) / curr_graph_size\n num_required_L2_samples = int(np.sum(self.containment_probs < curr_L2_fraction))\n reach_quantile = allowed_error / curr_L2_fraction\n baseline_reach = self.reachability_estimator.estimate_baseline_reachability(reach_quantile)\n L2_possible_samples = np.nonzero(np.array(self.node_probs)\n < baseline_reach / np.array(self.reachabilities))[0]\n if len(L2_possible_samples) >= num_required_L2_samples:\n finished = True\n L2_sample_indices = np.sort(np.random.choice(L2_possible_samples, num_required_L2_samples, replace=False))\n\n if with_printing:\n print(\"Finished!!!\\nEstimated size:\", curr_L2_size)\n print(\"Actual L2+ size:\", sum([len(layer) for layer in self.layers[2:]]))\n print(\"Estimated baseline reachability:\", baseline_reach)\n print(\"Sampled\", len(L2_sample_indices), \"nodes from L2+ out of\", len(self.reached_nodes), \"reaches\")\n print(\"Total number of queries:\", self.neighbor_manager.query_counter)\n print(\"Number of queries per sample:\", self.neighbor_manager.query_counter / self.total_num_samples)\n L2_samples = [self.reached_nodes[int(i)] for i in L2_sample_indices]\n L2_query_counters = [self.query_counters[int(i)] for i in L2_sample_indices]\n\n all_samples = [None for _ in range(self.total_num_samples)]\n all_counters = [self.initial_query_counter for _ in range(self.total_num_samples)]\n L2_locations_in_all_samples = list(np.sort(np.random.choice(range(self.total_num_samples), len(L2_samples), replace=False)))\n for i in range(len(L2_samples)):\n all_samples[L2_locations_in_all_samples[i]] = L2_samples[i]\n all_counters[L2_locations_in_all_samples[i]] = L2_query_counters[i]\n\n # setting up query counters when queries have not been made.\n for i in range(L2_locations_in_all_samples[0]+1, len(all_samples)):\n if all_counters[i] == self.initial_query_counter:\n all_counters[i] = all_counters[i-1]\n\n # deciding for leftover samples whether they were from L0 or L1.\n L0_relative_size = float(self.L0_size) / self.L0_L1_size\n for j in range(self.total_num_samples):\n if all_samples[j] != None:\n pass\n elif random.random() < L0_relative_size:\n all_samples[j] = random.choice(self.high_subgraph.L0_list)\n else:\n all_samples[j] = random.choice(self.high_subgraph.L1_list)\n\n return all_samples, all_counters\n\n\n\n\n\n\n\n# def run_uniformity_experiment(ds_title,\n# L0_size,\n# min_num_samples_L2,\n# max_num_samples_L2,\n# jump_L2,\n# L0_method=\"greedy\",\n# L1_num_samples=3000,\n# L2_num_preprocessing_reaches=200):\n# sampler = Sampler()\n# sampler.init(ds_title)\n# sampler.generate_L0(L0_method=L0_method, L0_size=L0_size)\n# sampler.preprocess_L2(L1_num_samples=L1_num_samples, L2_num_reaches=L2_num_preprocessing_reaches)\n# samples, query_counts = sampler.sample(min_num_samples_L2)\n# frozen_samplers = [copy.deepcopy(sampler)]\n#\n# num_samples_list = len(range(min_num_samples_L2, max_num_samples_L2, jump_L2))\n#\n# for i in range(len(num_samples_list)-1):\n# sampler.sample(num_samples_list[i+1] - num_samples_list[i])\n# frozen_samplers.append(copy.deepcopy(sampler))\n#\n# samples, query_counts = sampler.sample(3000, with_updating=False)\n# return np.histogram(samples)\n\n\n\ndef run_sampling_experiment(dataset,\n L0_size,\n sample_size_range,\n results_fnames,\n L0_method=\"greedy\",\n L1_num_samples=3000,\n L2_num_preprocessing_reaches=500):\n sampler = Sampler()\n sampler.init(dataset.title)\n sampler.generate_L0(L0_method=L0_method, L0_size=L0_size)\n sampler.preprocess_L2(L1_num_samples=L1_num_samples, L2_num_reaches=L2_num_preprocessing_reaches)\n print(f\"{dataset.title}: {sample_size_range[0]}\")\n samples, query_counters = sampler.sample(sample_size_range[0])\n df = pd.DataFrame(list(zip([sample.index for sample in samples], query_counters)),\n columns=[\"node\", \"query_count\"])\n df.to_json(results_fnames[0])\n for i in range(len(sample_size_range) - 1):\n print(f\"{dataset.title}: {sample_size_range[i+1]}\")\n samples, query_counters = sampler.sample(sample_size_range[i+1] - sample_size_range[i])\n df = pd.DataFrame(list(zip([sample.index for sample in samples], query_counters)),\n columns=[\"node\", \"query_count\"])\n df.to_json(results_fnames[i+1])\n\n\ndef run_sampling_experiment_v2(dataset,\n L0_size,\n sample_size_range,\n fname,\n L0_method=\"greedy\",\n L2_num_preprocessing_reaches=200):\n sampler = Sampler()\n sampler.init(dataset.title)\n vertex_indices_range = (0,len(sampler.graph.vs))\n sampler.generate_L0(L0_method=L0_method, L0_size=L0_size)\n L1_num_samples = nums_L1_up[dataset.title]\n sampler.preprocess_L2(L1_num_samples=L1_num_samples, L2_num_reaches=L2_num_preprocessing_reaches)\n print(f\"{dataset.title}: {sample_size_range[0]}\")\n samples, query_counters = sampler.sample_v2(sample_size_range[0])\n dataframes = [pd.DataFrame(list(zip([sample.index for sample in samples], query_counters)),\n columns=[\"node\", \"query_count\"])]\n histogram = np.histogram([samp.index for samp in samples], range=vertex_indices_range)[0]\n print(\"histogram:\", histogram)\n t = time.time()\n for i in range(len(sample_size_range) - 1):\n print(f\"{dataset.title}: {sample_size_range[i+1]}\")\n samples, query_counters = sampler.sample_v2(sample_size_range[i+1] - sample_size_range[i])\n dataframes.append(pd.DataFrame(list(zip([sample.index for sample in samples], query_counters)),\n columns=[\"node\", \"query_count\"]))\n histogram += np.histogram([samp.index for samp in samples], range=vertex_indices_range)[0]\n print(\"histogram:\", histogram)\n t2 = time.time()\n print(\"time:\", t2-t)\n t = t2\n all_data = pd.concat(dataframes, ignore_index=True)\n print(all_data)\n all_data.to_json(fname)\n return all_data\n\ndef run_sampling_experiment_with_freezing(dataset,\n L0_size,\n sample_size_range,\n num_samples_per_instance,\n fname,\n L0_method=\"greedy\",\n L1_num_samples=5000,\n L2_num_preprocessing_reaches=200,\n additional_L1_samples_per=5):\n sampler = Sampler()\n sampler.init(dataset.title)\n vertex_indices_range = (0,len(sampler.graph.vs))\n sampler.generate_L0(L0_method=L0_method, L0_size=L0_size)\n sampler.preprocess_L2(L1_num_samples=L1_num_samples, L2_num_reaches=L2_num_preprocessing_reaches)\n print(f\"{dataset.title}: {sample_size_range[0]}\")\n for _ in range(sample_size_range[0]):\n sampler.sample_v2(num_samples=1, num_additional_L1_samples=additional_L1_samples_per)\n print(\"L2 size estimate:\", sampler.L2_size_estimation)\n print(\"L2 baseline reachability estimate:\", sampler.baseline_reach)\n sampler.freeze()\n samples, query_counts = sampler.sample_v2(num_samples=num_samples_per_instance, with_tqdm=True)\n print(len(sampler.reachability_estimator.observed_reachabilities))\n dataframes = [pd.DataFrame(list(zip([sample.index for sample in samples],\n query_counts,\n [sample_size_range[0] for _ in samples])),\n columns=[\"node\", \"query_count\", \"pre_freeze\"])]\n histogram = np.histogram([samp.index for samp in samples], range=vertex_indices_range)[0]\n print(\"histogram:\", histogram)\n t = time.time()\n for i in range(len(sample_size_range) - 1):\n print(f\"{dataset.title}: {sample_size_range[i+1]}\")\n sampler.unfreeze()\n for _ in range(sample_size_range[i+1] - sample_size_range[i]):\n sampler.sample_v2(num_samples=1, num_additional_L1_samples=additional_L1_samples_per)\n print(\"L2 size estimate:\", sampler.L2_size_estimation)\n print(\"L2 baseline reachability estimate:\", sampler.baseline_reach)\n sampler.freeze()\n samples, query_counts = sampler.sample_v2(num_samples=num_samples_per_instance, with_tqdm=True)\n print(len(sampler.reachability_estimator.observed_reachabilities))\n dataframes.append(pd.DataFrame(list(zip([sample.index for sample in samples],\n query_counts,\n [sample_size_range[i+1] for _ in samples])),\n columns=[\"node\", \"query_count\", \"pre_freeze\"]))\n histogram = np.histogram([samp.index for samp in samples], range=vertex_indices_range)[0]\n print(\"histogram:\", histogram)\n t2 = time.time()\n print(\"time:\", t2-t)\n t = t2\n all_data = pd.concat(dataframes, ignore_index=True)\n print(all_data)\n all_data.to_json(fname)\n return all_data\n\ndef run_sampling_experiment_with_freezing_v3(dataset,\n L0_size,\n sample_size_range,\n num_samples_per_instance,\n results_fnames,\n L0_method=\"greedy\",\n L1_num_samples=100,\n L2_num_preprocessing_reaches=10,\n additional_L1_samples_per=5):\n sampler = Sampler()\n sampler.init(dataset.title)\n vertex_indices_range = (0,len(sampler.graph.vs))\n sampler.generate_L0(L0_method=L0_method, L0_size=L0_size)\n sampler.preprocess_L2(L1_num_samples=L1_num_samples, L2_num_reaches=L2_num_preprocessing_reaches)\n print(f\"{dataset.title}: {sample_size_range[0]}\")\n for _ in range(sample_size_range[0]):\n sampler.sample_v2(num_samples=1, num_additional_L1_samples=additional_L1_samples_per)\n print(\"L2 size estimate:\", sampler.L2_size_estimation)\n print(\"L2 baseline reachability estimate:\", sampler.baseline_reach)\n sampler.freeze()\n samples, query_counts = sampler.sample_v2(num_samples=num_samples_per_instance, with_tqdm=True)\n print(len(sampler.reachability_estimator.observed_reachabilities))\n df = pd.DataFrame(list(zip([sample.index for sample in samples],\n query_counts,\n [sample_size_range[0] for _ in samples])),\n columns=[\"node\", \"query_count\", \"pre_freeze\"])\n df.to_json(results_fnames[0])\n histogram = np.histogram([samp.index for samp in samples], range=vertex_indices_range)[0]\n print(\"histogram:\", histogram)\n t = time.time()\n for i in range(len(sample_size_range) - 1):\n print(f\"{dataset.title}: {sample_size_range[i+1]}\")\n sampler.unfreeze()\n for _ in range(sample_size_range[i + 1] - sample_size_range[i]):\n sampler.sample_v2(num_samples=1, num_additional_L1_samples=additional_L1_samples_per)\n print(\"L2 size estimate:\", sampler.L2_size_estimation)\n print(\"L2 baseline reachability estimate:\", sampler.baseline_reach)\n sampler.freeze()\n samples, query_counts = sampler.sample_v2(num_samples=num_samples_per_instance, with_tqdm=True)\n print(len(sampler.reachability_estimator.observed_reachabilities))\n df = pd.DataFrame(list(zip([sample.index for sample in samples],\n query_counts,\n [sample_size_range[i+1] for _ in samples])),\n columns=[\"node\", \"query_count\", \"pre_freeze\"])\n df.to_json(results_fnames[i+1])\n histogram = np.histogram([samp.index for samp in samples], range=vertex_indices_range)[0]\n print(\"histogram:\", histogram)\n t2 = time.time()\n print(\"time:\", t2-t)\n t = t2\n #all_data = pd.concat(dataframes, ignore_index=True)\n #print(all_data)\n #all_data.to_json(fname)\n #return all_data\n\ndef run_sampling_all_graphs():\n for exp_num in range(10):\n for dataset in datasets[0:2]:\n sample_size_range = range(100, 200001, 100)\n print(\"dataset:\", dataset.title)\n fname = f\"{dataset.title}-sampling-exp{exp_num}-{sample_size_range[0]}-{sample_size_range[-1]}-{sample_size_range[1]-sample_size_range[0]}\"\n results_fname = Path(\"experiment_outs/results/sampling\") / (fname + \".json\")\n L0_size = nums_L0[dataset.title]\n run_sampling_experiment_v2(dataset, L0_size, sample_size_range, results_fname)\n for dataset in datasets[2:4]:\n sample_size_range = range(100, 50001, 100)\n print(\"dataset:\", dataset.title)\n fname = f\"{dataset.title}-sampling-exp{exp_num}-{sample_size_range[0]}-{sample_size_range[-1]}-{sample_size_range[1] - sample_size_range[0]}\"\n results_fname = Path(\"experiment_outs/results/sampling\") / (fname + \".json\")\n L0_size = nums_L0[dataset.title]\n run_sampling_experiment_v2(dataset, L0_size, sample_size_range, results_fname)\n for exp_num in range(5):\n for dataset in datasets[4:6]:\n sample_size_range = range(100, 20001, 100)\n print(\"dataset:\", dataset.title)\n fname = f\"{dataset.title}-sampling-exp{exp_num}-{sample_size_range[0]}-{sample_size_range[-1]}-{sample_size_range[1] - sample_size_range[0]}\"\n results_fname = Path(\"experiment_outs/results/sampling\") / (fname + \".json\")\n L0_size = nums_L0[dataset.title]\n run_sampling_experiment_v2(dataset, L0_size, sample_size_range, results_fname)\n for exp_num in range(5):\n for dataset in datasets[6:8]:\n sample_size_range = range(100, 20001, 100)\n print(\"dataset:\", dataset.title)\n fname = f\"{dataset.title}-sampling-exp{exp_num}-{sample_size_range[0]}-{sample_size_range[-1]}-{sample_size_range[1] - sample_size_range[0]}\"\n results_fname = Path(\"experiment_outs/results/sampling\") / (fname + \".json\")\n L0_size = nums_L0[dataset.title]\n run_sampling_experiment_v2(dataset, L0_size, sample_size_range, results_fname)\n\n\ndef run_sampling_with_freezing_all_graphs(results_base=None, exp_num=0, dataset_idx=0):\n dataset = datasets[dataset_idx]\n sample_size_range = [10, 20, 40, 80, 150, 300, 500, 750, 1000]\n num_samples_per_instance = 100000\n print(\"dataset:\", dataset.title)\n fnames = [f\"{dataset.title}-sampling-freezing-{sample_size_range[i]}-num_samples{num_samples_per_instance}-exp{exp_num}\"\n for i in range(len(sample_size_range))]\n if results_base is None:\n results_base = Path(__file__).resolve().parent.parent/\"experiment_outs\"/\"results\"/\"sampling\"\n results_fnames = [results_base / (fname + \".json\") for fname in fnames]\n L0_size = nums_L0[dataset.title]\n L1_num_samples = 100#nums_L1_up[dataset.title]\n L2_num_preprocessing_reaches = 10\n run_sampling_experiment_with_freezing_v3(dataset,\n L0_size,\n sample_size_range,\n num_samples_per_instance,\n results_fnames,\n L1_num_samples=L1_num_samples,\n L2_num_preprocessing_reaches= L2_num_preprocessing_reaches\n )\n\n\n\n\n\nif __name__ == \"__main__\":\n results_base = None#Path(\"/vol/scratch/omrib/sampling\")\n num_exps = 5\n dataset_idx = int(sys.argv[1])\n min_exp = int(sys.argv[2])\n pool = Pool(num_exps)\n pool.starmap(run_sampling_with_freezing_all_graphs,\n ((results_base, exp_num, dataset_idx)\n for exp_num in range(min_exp, min_exp+num_exps)))\n\n #run_sampling_with_freezing_all_graphs(results_base=results_base,\n # exp_num=int(sys.argv[1]),\n # dataset_idx=int(sys.argv[2]))\n #distance_from_uniformity_multi_experiments(title=\"Epinions\", num_exps=3, sample_size_range=range(10, 51, 10), L0_size=1000)","sub_path":"code/Sampler.py","file_name":"Sampler.py","file_ext":"py","file_size_in_byte":29656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"415944230","text":"bad = 0\ngood = 0\nmiddle = 0\nerro = 0\n\nwith open(\"xiaohongshu_sentiments.txt\", encoding=\"utf-8\") as f:\n\tlines = f.readlines()\n\nfor i in lines:\n\ti = int(i.strip())\n\tif i == 0:\n\t\tbad += 1\n\telif i == 1:\n\t\tmiddle += 1\n\telif i == 2:\n\t\tgood += 1\n\telif i == -1:\n\t\terro += 1\n\telse:\n\t\tprint(i)\n\t\tprint(\"超出范围\")\n\nsum = good+bad+middle\n\nprint(\"积极态度:\",good,\"\\t\",good/sum)\nprint(\"中间态度:\",middle,\"\\t\",middle/sum)\nprint(\"消极态度\",bad,\"\\t\",bad/sum)\nprint(\"情感分析错误:\",erro)\n","sub_path":"baiduAPI/get_percent.py","file_name":"get_percent.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"40213013","text":"import math\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nfrom statsmodels import api as sm\nfrom utils import stock_pgfunctions as pg\nimport plotly.graph_objects as go\nimport mplfinance as mpf\nfrom cycler import cycler\nimport matplotlib as mpl\n\npd.set_option ('display.max_rows', 1000)\npd.set_option ('display.max_columns',20)\n\ndef query_dt(sql):\n \"\"\"\n 连接数据库,查询指定SQL语句,返回PD的数据集\n :param sql: str\n :return: panda data\n \"\"\"\n conn = pg.connect()\n dt = pd.read_sql(sql, conn)\n return dt\n\n\ndef vwap_pic(symbol, from_t, to_t):\n \"\"\"\n 用plot画VWAP图\n 成交量加权平均价VWAP是将多笔交易的价格按各自的成交量加权而算出的平均价\n \"\"\"\n sql = f\"select * from stock_candles_min where symbol='{symbol}' and dt>='{from_t}' and dt<='{to_t}'\"\n dt = query_dt(sql)\n dt['avg_p'] = ( dt['h'] + dt['l'] + dt['c'] ) / 3\n dt['v_cum'] = dt['v'].cumsum()\n dt['pv'] = dt['avg_p']*dt['v']\n dt['pv_cum'] = dt['pv'].cumsum()\n dt['VWAP'] = dt['pv_cum']/dt['v_cum']\n print(dt.head())\n #打印出每日闭市价格(绿线)和VMAP价格(橙线)\n plt.figure(figsize=(12, 8))\n plt.plot(dt['dt'], dt['c'], color='green', label=\"Close Price\")\n plt.plot(dt['dt'], dt['VWAP'], color='orange', label=\"VWAP\")\n plt.title('VWAP and close price of dt on 21Jan.2020', fontsize=20)\n plt.xlabel('Time', fontsize=16)\n plt.ylabel('Price', fontsize=16)\n plt.legend()\n plt.show()\n\n\ndef candle_stick_plot1(symbol, from_t, to_t):\n \"\"\"\n 画candle stick plot图,方式一\n 用plotly.graph_objects\n \"\"\"\n sql = f\"select * from stock_candles_min where symbol='{symbol}' and dt>='{from_t}' and dt<='{to_t}'\"\n dt = query_dt(sql)\n fig = go.Figure(data=[go.Candlestick(x=dt['dt'], open=dt['o'], high=dt['h'], low=dt['l'], close=dt['c'])])\n fig.show()\n\n\ndef candle_stick_plot2(symbol, from_t, to_t):\n \"\"\"\n 画candle stick plot图,方式二\n mplfinance 画\n \"\"\"\n sql = f\"select * from stock_candles_min where symbol='{symbol}' and dt>='{from_t}' and dt<='{to_t}'\"\n dt = query_dt(sql)\n dt = dt[['o', 'h', 'l', 'c', 'v', 'dt']]\n #坑,必须用固定列名字,不能用其他列名,所以特意转换\n dt.rename(\n columns={\n 'dt': 'Date', 'o': 'Open',\n 'h': 'High', 'l': 'Low',\n 'c': 'Close', 'v': 'Volume'},\n inplace=True)\n dt.index = dt['Date'] #索引必须是标准日期格式\n dt = dt.drop('Date', axis=1) #只需要o,h,l,c,v五个数据列,不删除多余的列也不会报错\n # print('-'*30)\n # print(dt)\n symbol = 'dt'\n # 设置基本参数\n # type:绘制图形的类型,有candle, renko, ohlc, line等\n # 此处选择candle,即K线图\n # mav(moving average):均线类型,此处设置7,30,60日线\n # volume:布尔类型,设置是否显示成交量,默认False\n # title:设置标题\n # y_label:设置纵轴主标题\n # y_label_lower:设置成交量图一栏的标题\n # figratio:设置图形纵横比\n # figscale:设置图形尺寸(数值越大图像质量越高)\n kwargs = dict(\n type='candle',\n mav=(7, 30, 60),\n volume=True,\n title='\\nUS_stock %s candle_line' % (symbol),\n ylabel='OHLC Candles',\n ylabel_lower='Shares\\nTraded Volume',\n figratio=(15, 10),\n figscale=5)\n # 设置marketcolors\n # up:设置K线线柱颜色,up意为收盘价大于等于开盘价\n # down:与up相反,这样设置与国内K线颜色标准相符\n # edge:K线线柱边缘颜色(i代表继承自up和down的颜色),下同。详见官方文档)\n # wick:灯芯(上下影线)颜色\n # volume:成交量直方图的颜色\n # inherit:是否继承,选填\n mc = mpf.make_marketcolors(\n up='green',\n down='red',\n edge='i',\n wick='i',\n volume='in',\n inherit=True)\n # 设置图形风格\n # gridaxis:设置网格线位置\n # gridstyle:设置网格线线型\n # y_on_right:设置y轴位置是否在右\n s = mpf.make_mpf_style(\n gridaxis='both',\n gridstyle='-.',\n y_on_right=False,\n marketcolors=mc)\n # 设置均线颜色,配色表可见下图\n # 建议设置较深的颜色且与红色、绿色形成对比\n # 此处设置七条均线的颜色,也可应用默认设置\n mpl.rcParams['axes.prop_cycle'] = cycler(\n color=['dodgerblue', 'deeppink',\n 'navy', 'teal', 'maroon', 'darkorange',\n 'indigo'])\n # 设置线宽\n mpl.rcParams['lines.linewidth'] = .5\n # 图形绘制\n # show_nontrading:是否显示非交易日,默认False\n # savefig:导出图片,填写文件名及后缀\n mpf.plot(dt,\n **kwargs,\n style=s,\n show_nontrading=False,\n savefig='A_stock_%s_candle_min_line' %(symbol) + '.jpg')\n plt.show()\n print(\"A_stock_%s_candle_min_line\" %(symbol) + \".jpg\" +\"蜡烛图像已经画好。\")\n\ndef daily_return(symbol, from_t, to_t):\n \"\"\"\n 计算日收益率daily_return\n 计算公式是 : return = log(today close/previous close)\n :param symbol: str\n :param from_t: str\n :param to_t: str\n :return: picture\n \"\"\"\n sql = f\"select * from stock_candles_day where symbol='{symbol}' and dt>='{from_t}' and dt<='{to_t}' order by symbol,series\"\n dt = query_dt(sql)\n rt=[]\n rt.append(np.nan)\n for j in range(1, len(dt['c'])):\n r = (math.log((dt['c'][j])/(dt['c'][j - 1])))\n rt.append(r)\n dt['return'] = rt\n plt.plot(dt['dt'], dt['return'], '--')\n return plt.show()\n\n\ndef hyp_test_pic1(symbol, from_t, to_t):\n \"\"\"\n 画出正态分布的图。这是方法一。\n 先计算对数收益率,然后画图\n :param symbol: str\n :param from_t: str\n :param to_t: str\n :return: picture\n \"\"\"\n sql = f\"select * from stock_candles_day where symbol='{symbol}' and dt>='{from_t}' and dt<='{to_t}' order by symbol,series\"\n dt = query_dt(sql)\n #计算对数收益率\n a = np.log(dt['c'].pct_change()+1)\n #画出正态分布图\n a.hist(bins=50, figsize=(10, 6))\n return plt.show()\n\ndef hyp_test_pic2(symbol, from_t, to_t):\n \"\"\"\n 画出检验正态分布的图。这是方法二。\n X轴理论分位数,y轴样本分位数.只要不在一条直线上,就表示不符合正态分布\n :param symbol: str\n :param from_t: str\n :param to_t: str\n :return: picture\n \"\"\"\n sql = f\"select * from stock_candles_day where symbol='{symbol}' and dt>='{from_t}' and dt<='{to_t}' order by symbol,series\"\n dt = query_dt(sql)\n #计算对数收益率\n a = np.log(dt['c'].pct_change()+1)\n fix, axes = plt.subplots(1, 1, figsize=(10, 12))\n sm.qqplot(a.dropna(), line='s', ax=axes)\n axes.set_title(\"hypothesis testing\") #用中文做标题会出错\n return plt.show()\n\ndef hyp_test_data(symbol, from_t, to_t):\n \"\"\"\n 计算出检验正态分布的统计数据。这是方法三。\n :param symbol: str\n :param from_t: str\n :param to_t: str\n :return:str\n \"\"\"\n sql = f\"select * from stock_candles_day where symbol='{symbol}' and dt>='{from_t}' and dt<='{to_t}' order by symbol,series\"\n dt = query_dt(sql)\n a = np.log(dt['c'].pct_change()+1)#计算对数收益率\n b = a.drop(0, axis=0)\n u = b.mean() # 计算均值\n std = b.std() # 计算标准差\n \"\"\"\n kstest方法中的参数分别是:待检验的数据,检验方法(这里设置成norm正态分布),均值与标准差\n 返回两个值:statistic → D值,pvalue → P值\n 当p值大于0.05,说明待检验的数据符合为正态分布 \n \"\"\"\n c = b.values.tolist()\n result = stats.kstest(c,'norm', (b, std))\n return result\n","sub_path":"Final_Project/zhangsongbin/assginment2/stock_assginment2_utils.py","file_name":"stock_assginment2_utils.py","file_ext":"py","file_size_in_byte":7875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"284499139","text":"from flask import (\n Blueprint,\n request,\n url_for,\n render_template,\n redirect,\n session,\n)\n\nimport csrf_token\nfrom csrf_token import create_token\nfrom controllers import current_user, login_required\nfrom models.Board import Board\nfrom models.Reply import Reply\nfrom models.Topic import Topic\nfrom utils import log\n\ntopic_bp = Blueprint('topic', __name__, url_prefix='/topic')\n\n\n@topic_bp.route('/detail', methods=['GET'])\ndef detail():\n topic_id = request.args.get('id')\n topic = Topic.find_by(id=topic_id)\n topic.views += 1\n topic.update()\n board = Board.find_by(id=topic.board_id)\n topic_user = topic.user()\n replies = Reply.find_all(topic_id=topic_id)\n\n u = current_user()\n token = create_token()\n if u is not None:\n csrf_token.set_value(token, u.id)\n\n return render_template(\n 'topic/detail.html',\n topic=topic,\n topic_user=topic_user,\n board=board,\n replies=replies,\n token=token,\n )\n\n\n@topic_bp.route('/create', methods=['GET', 'POST'])\n@login_required\ndef create():\n u = current_user()\n if request.method == 'POST':\n form = request.form.to_dict()\n token = form.get('token')\n if csrf_token.get_value(token) == u.id:\n csrf_token.pop_key(token)\n topic = Topic.new(form, user_id=u.id)\n return redirect(url_for('.detail', id=topic.id))\n else:\n boards = Board.all()\n token = create_token()\n csrf_token.set_value(token, u.id)\n return render_template('topic/create.html', boards=boards, token=token)\n\n\n@topic_bp.route('/delete', methods=['GET'])\n@login_required\ndef delete():\n topic_id = request.args.get('id')\n topic = Topic.find_by(id=topic_id)\n token = request.args.get('token')\n # token 是属于 current_user 的\n if csrf_token.get_value(token) == topic.user_id:\n csrf_token.pop_key(token)\n Topic.delete(id=topic_id)\n return redirect(url_for('static.index'))\n else:\n csrf_token.pop_key(token)\n return redirect(url_for('static.error'))\n\n\n@topic_bp.route('/update', methods=['GET', 'POST'])\n@login_required\ndef update():\n u = current_user()\n topic_id = request.args.get('id')\n topic = Topic.find_by(id=topic_id)\n if request.method == 'POST':\n form = request.form.to_dict()\n token = form.get('token')\n if csrf_token.get_value(token) == topic.user_id:\n csrf_token.pop_key(token)\n Topic.update_by_form(topic_id, form)\n return redirect(url_for('.detail', id=topic_id))\n else:\n csrf_token.pop_key(token)\n return redirect(url_for('static.error'))\n else:\n token = create_token()\n csrf_token.set_value(token, u.id)\n topic_board = Board.find_by(id=topic.board_id)\n boards = Board.all()\n return render_template('topic/update.html', topic=topic, topic_board=topic_board, boards=boards, token=token)\n","sub_path":"controllers/topic.py","file_name":"topic.py","file_ext":"py","file_size_in_byte":2972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"503169030","text":"\"\"\"\nPix2Surf Single-View Version Evaluation in multi view protocol\n\"\"\"\n\nfrom .modelbase_v2 import ModelBase\nfrom .pix2surf_sv import Network as SV_Net\nfrom core.models.utils import *\n\nimport os\nimport torch\nfrom core.evaluation import eval_warp\n\n\nclass Model(ModelBase):\n def __init__(self, cfg):\n super(Model, self).__init__(cfg)\n self.name = 'pix2surf-sv'\n self.cfg = cfg\n # register key component\n self.network = Network()\n self.optimizer = torch.optim.Adam(params=self.network.parameters(), lr=self.lr,\n betas=(self.cfg.ADAM_BETA1, self.cfg.ADAM_BETA2))\n # initialize models\n self.resume = cfg.RESUME\n if self.resume:\n self.resume_id = cfg.RESUME_EPOCH_ID\n load_path = os.path.join(cfg.ROOT_DIR, 'log', cfg.LOG_DIR, 'model',\n 'epoch_%d' % cfg.RESUME_EPOCH_ID + '.model')\n self.load_model(loadpath=load_path, current_model_state='cpu', strict=False)\n elif cfg.MODEL_INIT_PATH != ['None']:\n self.load_model(loadpath=cfg.MODEL_INIT_PATH, strict=False)\n self.to_gpus()\n # config output meaning\n self.output_info_dict = {\n 'metric': ['batch-loss', 'reg-v-loss', 'reg-x-loss', 'mask-v-loss', 'mask-x-loss',\n 'sp-loss', 'crr-xyz-loss'],\n 'image': ['uni-rgb-v', 'nox-v-gt-uni', 'mask-v'] +\n ['unwrapped-chart', 'unwrapped-chart-uni', 'learned-chart', 'sp-image-uni'],\n 'xls': ['metric-report']\n }\n\n def _preprocess(self, in_batch):\n return load_multiview_batch(in_batch)\n\n def _postprocess(self, batch):\n # compute metric in multi thread\n batch = eval_warp(batch, method_name='pix2surf-sv', nox_gt_key='nox-v-gt', nox_pred_key='sp-image')\n # add crr_loss to xls report\n batch['metric-report']['consistency-error'] = [float(i) for i in\n batch['crr-xyz-loss-xls'].detach().cpu().numpy()]\n return batch\n\n\nclass Network(SV_Net):\n def __init__(self):\n super(Network, self).__init__()\n # visualization resolution\n self.vis_chart_res = 256\n self.vis_chart_container = torch.zeros(1, 3, self.vis_chart_res, self.vis_chart_res)\n # make eval config\n self.eval_image_res = (240, 320)\n self.eval_image_grid = make_grid(self.eval_image_res)\n\n def forward(self, pack, is_train=True):\n batch = dict()\n n_batch = pack['nox-v'][0].shape[0]\n n_view = len(pack['rgb-v'])\n\n # pred list is a list for each view\n pred_list, code_list = list(), list()\n for ii in range(n_view): # do for each view\n pred, fm = self.network_dict['seg-net'](pack['rgb-v'][ii], return_code=True)\n pred_list.append(pred)\n code_list.append(self.network_dict['global-code'](fm).reshape(n_batch, -1, 1).contiguous())\n\n # prepare gather container\n pred_nox_v_list, pred_nox_x_list, pred_mask_v_list, pred_mask_x_list = [], [], [], []\n pred_xyz_list, pred_uv_list = [], []\n learned_chart_list, unwrapped_chart_list = [], []\n reg_v_loss, reg_x_loss, mask_v_loss, mask_x_loss, sp_loss = 0, 0, 0, 0, 0\n eval_rendered_list = []\n\n for ii in range(n_view):\n mask_v = pack['mask-v'][ii]\n mask_x = pack['mask-x'][ii]\n\n # make cnn prediction\n pred = pred_list[ii]\n pred_nox_v = pred[:, :3, :, :]\n pred_nox_x = pred[:, 3:6, :, :]\n pred_score_v = pred[:, 6:8, :, :]\n pred_score_x = pred[:, 8:10, :, :]\n learned_uv = self.sgmd(pred[:, 10:12, :, :])\n\n # make NOCS-regression branch\n mask1c_v = mask_v[:, 0, :, :].unsqueeze(1).detach()\n mask_v_loss = mask_v_loss + self.cls_criterion(pred_score_v, mask1c_v.squeeze(1).long().detach()) / n_view\n pred_mask_v = torch.argmax(pred_score_v, dim=1, keepdim=True).float()\n mask1c_x = mask_x[:, 0, :, :].unsqueeze(1).detach()\n mask_x_loss = mask_x_loss + self.cls_criterion(pred_score_x, mask1c_x.squeeze(1).long().detach()) / n_view\n pred_mask_x = torch.argmax(pred_score_x, dim=1, keepdim=True).float()\n reg_v_loss = reg_v_loss + self.ml2_criterion(pred_nox_v, pack['nox-v'][ii], mask1c_v, True) / n_view\n reg_x_loss = reg_x_loss + self.ml2_criterion(pred_nox_x, pack['nox-x'][ii], mask1c_x, True) / n_view\n\n # make mlp prediction\n eachview_z = code_list[ii].squeeze(2)\n queried_uv = query_feature(learned_uv, pack['uv-v'][ii])\n pred_xyz = self.network_dict['mlp'](eachview_z, queried_uv, unique_code=True)\n pred_xyz = self.sgmd(pred_xyz)\n sp_loss = sp_loss + self.ml2_criterion(pred_xyz, pack['uv-xyz-v'][ii], pack['uv-mask-v'][ii]) / n_view\n\n # Do SP evaluation\n _eval_rendered_list = list()\n for bid in range(n_batch):\n # select mask\n _mask = pred_mask_v[bid, ...].reshape(-1) # H*W\n _learned_uv = learned_uv[bid, ...].reshape(1, 2, -1) # 1,2,H*W\n _learned_uv = _learned_uv[:, :, _mask > 0] # 1,2,S\n uv = self.eval_image_grid.cuda().reshape(1, 2, -1)[:, :, _mask > 0].unsqueeze(3) # 1,2,S,1\n # do Surface Parametrization\n eval_xyz_v = self.network_dict['mlp'](eachview_z[bid, ...].unsqueeze(0), _learned_uv.unsqueeze(3),\n unique_code=True)\n eval_xyz_v = self.sgmd(eval_xyz_v) # 1,3,S,1\n uv[:, 0, :, :] = uv[:, 0, :, :] * mask1c_v.shape[2]\n uv[:, 1, :, :] = uv[:, 1, :, :] * mask1c_v.shape[3]\n uv = uv.long()\n idx = uv[:, 0, :, :] * mask1c_v.shape[3] + uv[:, 1, :, :] # B,N,1\n idx = idx.permute(0, 2, 1) # B,1,N\n vis_eval = torch.ones_like(pack['rgb-v'][ii]).float()[bid, ...].unsqueeze(0)\n vis_eval = vis_eval.reshape(1, 3, -1) # B,3,R*R\n vis_eval = vis_eval.scatter(dim=2, index=idx.repeat(1, 3, 1), src=eval_xyz_v.squeeze(3))\n vis_eval = vis_eval.reshape(1, 3, mask1c_v.shape[2], mask1c_v.shape[3])\n _eval_rendered_list.append(vis_eval)\n eval_rendered = torch.cat(_eval_rendered_list, 0)\n eval_rendered_list.append(eval_rendered)\n\n # vis unwrapped chart\n unwrapped_chart = self.vis_chart_container.repeat(n_batch, 1, 1, 1).cuda()\n unwrapped_chart = spread_feature(unwrapped_chart, learned_uv, pack['rgb-v'][ii], pack['mask-v'][ii])\n\n # gather\n pred_nox_v_list.append(pred_nox_v)\n pred_nox_x_list.append(pred_nox_x)\n pred_mask_v_list.append(pred_mask_v)\n pred_mask_x_list.append(pred_mask_x)\n\n pred_xyz_list.append(pred_xyz)\n pred_uv_list.append(queried_uv)\n unwrapped_chart_list.append(unwrapped_chart)\n learned_chart_list.append(learned_uv.repeat(1, 2, 1, 1)[:, :3, :, :] * pred_mask_x + (1.0 - pred_mask_v))\n\n # make naive multi-view constrain:\n _p1_list, _p2_list, _m_list = [], [], []\n _uv1_list, _uv2_list = [], []\n for base_view_id in range(len(pack['crr-idx-mtx'])):\n for query_view_id in range(len(pack['crr-idx-mtx'][base_view_id])):\n base_pc = pred_xyz_list[base_view_id]\n query_pc = pred_xyz_list[base_view_id + query_view_id + 1]\n base_uv = pred_uv_list[base_view_id]\n query_uv = pred_uv_list[base_view_id + query_view_id + 1]\n pair_idx = pack['crr-idx-mtx'][base_view_id][query_view_id].squeeze(3)\n paired_pc_from_base_to_query = torch.gather(base_pc.squeeze(3), dim=2,\n index=pair_idx.repeat(1, 3, 1)).unsqueeze(3)\n paired_uv_from_base_to_query = torch.gather(base_uv.squeeze(3), dim=2,\n index=pair_idx.repeat(1, 2, 1)).unsqueeze(3)\n _p1_list.append(paired_pc_from_base_to_query)\n _p2_list.append(query_pc)\n _uv1_list.append(paired_uv_from_base_to_query)\n _uv2_list.append(query_uv)\n _m_list.append(pack['crr-mask-mtx'][base_view_id][query_view_id])\n\n crr_xyz_loss_each = self.ml2_criterion(torch.cat(_p1_list, dim=2).contiguous(),\n torch.cat(_p2_list, dim=2).contiguous(),\n torch.cat(_m_list, dim=2).contiguous(),\n detach=False, reduce_batch=False)\n crr_xyz_loss = crr_xyz_loss_each.mean()\n\n crr_uv_loss_each = self.ml2_criterion(torch.cat(_uv1_list, dim=2).contiguous(),\n torch.cat(_uv2_list, dim=2).contiguous(),\n torch.cat(_m_list, dim=2).contiguous(),\n detach=False, reduce_batch=False)\n crr_uv_loss = crr_uv_loss_each.mean()\n\n # summary\n batch['batch-loss'] = (((reg_v_loss + reg_x_loss) * 0.3 + (mask_v_loss + mask_x_loss) * 0.7) * 0.1 + \\\n sp_loss * 0.9).unsqueeze(0)\n\n batch['reg-v-loss'] = reg_v_loss.detach().unsqueeze(0)\n batch['reg-x-loss'] = reg_x_loss.detach().unsqueeze(0)\n batch['mask-v-loss'] = mask_v_loss.detach().unsqueeze(0)\n batch['mask-x-loss'] = mask_x_loss.detach().unsqueeze(0)\n batch['sp-loss'] = sp_loss.detach().unsqueeze(0)\n batch['crr-xyz-loss'] = crr_xyz_loss.detach().unsqueeze(0)\n batch['crr-xyz-loss-xls'] = crr_xyz_loss_each.detach()\n\n batch['mask-v'] = torch.cat(pred_mask_v_list, 3)\n batch['mask-x'] = torch.cat(pred_mask_x_list, 3)\n batch['rgb-v'] = pack['rgb-v']\n batch['uni-rgb-v'] = torch.cat(pack['rgb-v'], 3)\n\n batch['nox-v-gt'] = [p * m + (1.0 - m) for p, m in zip(pack['nox-v'], pack['mask-v'])]\n batch['nox-x-gt'] = [p * m + (1.0 - m) for p, m in zip(pack['nox-x'], pack['mask-x'])]\n batch['nox-v-gt-uni'] = torch.cat([p * m + (1.0 - m) for p, m in zip(pack['nox-v'], pack['mask-v'])], 3)\n batch['nox-x-gt-uni'] = torch.cat([p * m + (1.0 - m) for p, m in zip(pack['nox-x'], pack['mask-x'])], 3)\n\n batch['sp-image'] = eval_rendered_list\n batch['sp-image-uni'] = torch.cat(eval_rendered_list, 3)\n\n batch['learned-chart'] = torch.cat(learned_chart_list, 3)\n batch['unwrapped-chart'] = torch.cat(unwrapped_chart_list, 3)\n vis_nsc_uni = unwrapped_chart_list[0]\n for new_scatter in unwrapped_chart_list:\n vis_nsc_uni = torch.max(new_scatter, vis_nsc_uni)\n batch['unwrapped-chart-uni'] = vis_nsc_uni\n\n return batch\n","sub_path":"core/models/pix2surf_sv_mveval.py","file_name":"pix2surf_sv_mveval.py","file_ext":"py","file_size_in_byte":11066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"495007642","text":"import requests\nfrom bs4 import BeautifulSoup\n\ndef fetch_company_type(company_name, company_code):\n\troot_url = 'https://www.bseindia.com/SiteCache/1D/CompanyHeader.aspx?Type=EQ&text=' + company_code\n\tr = requests.get(root_url)\n\tsoup = BeautifulSoup(r.content, 'html.parser')\n\ttd_tags = soup.find_all('td')\n\t#hardcoded the below line. Can't find anything better right now.\n\tindustry_type = td_tags[-1]\n\tcompany_name = company_name.replace('+' , ' ')\n\twrite_string = company_name + ',' + company_code + ',' + industry_type.string + '\\n'\n\twith open('data/company_type.csv', 'a') as file:\n\t\tfile.write(write_string)\n\n\n\t","sub_path":"additional_attributes.py","file_name":"additional_attributes.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"527508100","text":"import numpy as np\nfrom optparse import OptionParser\nimport scipy.linalg as la\nimport scipy.stats as stats\nimport scipy.linalg.blas as blas\nimport pandas as pd\nimport csv\nimport time\nimport fastlmm.util.VertexCut as vc\nfrom pysnptools.snpreader.bed import Bed\nimport pysnptools.util as pstutil\nimport pysnptools.util.pheno as phenoUtils\nnp.set_printoptions(precision=3, linewidth=200)\n\n\n\ndef loadData(bfile, extractSim, phenoFile, missingPhenotype='-9', loadSNPs=False, standardize=True):\n\tbed = Bed(bfile, count_A1=True)\n\t\n\tif (extractSim is not None):\n\t\tf = open(extractSim)\n\t\tcsvReader = csv.reader(f)\n\t\textractSnpsSet = set([])\n\t\tfor l in csvReader: extractSnpsSet.add(l[0])\t\t\t\n\t\tf.close()\t\t\n\t\tkeepSnpsInds = [i for i in range(bed.sid.shape[0]) if bed.sid[i] in extractSnpsSet]\t\t\n\t\tbed = bed[:, keepSnpsInds]\n\t\t\n\tphe = None\n\tif (phenoFile is not None):\tbed, phe = loadPheno(bed, phenoFile, missingPhenotype)\n\t\n\tif (loadSNPs):\n\t\tbed = bed.read()\n\t\tif (standardize): bed = bed.standardize()\t\n\t\n\treturn bed, phe\n\t\n\t\ndef loadPheno(bed, phenoFile, missingPhenotype='-9', keepDict=False):\n\tpheno = phenoUtils.loadOnePhen(phenoFile, missing=missingPhenotype, vectorize=True)\n\tcheckIntersection(bed, pheno, 'phenotypes')\n\tbed, pheno = pstutil.intersect_apply([bed, pheno])\n\tif (not keepDict): pheno = pheno['vals']\n\treturn bed, pheno\n\t\n\t\ndef checkIntersection(bed, fileDict, fileStr, checkSuperSet=False):\n\tbedSet = set((b[0], b[1]) for b in bed.iid)\n\tfileSet = set((b[0], b[1]) for b in fileDict['iid'])\n\t\n\tif checkSuperSet:\n\t\tif (not fileSet.issuperset(bedSet)): raise Exception(fileStr + \" file does not include all individuals in the bfile\")\n\t\n\tintersectSet = bedSet.intersection(fileSet)\n\tif (len(intersectSet) != len (bedSet)):\n\t\tprint(len(intersectSet), 'individuals appear in both the plink file and the', fileStr, 'file')\n\n\t\ndef symmetrize(a):\n return a + a.T - np.diag(a.diagonal())\n\t\n\t\n\ndef loadRelatedFile(bed, relFile):\n\trelatedDict = phenoUtils.loadOnePhen(relFile, vectorize=True)\n\tcheckIntersection(bed, relatedDict, 'relatedness', checkSuperSet=True)\n\t_, relatedDict = pstutil.intersect_apply([bed, relatedDict])\n\trelated = relatedDict['vals']\n\tkeepArr = (related < 0.5)\n\tprint(np.sum(~keepArr), 'individuals will be removed due to high relatedness')\n\treturn keepArr\n\t\n\t\ndef findRelated(bed, cutoff, kinshipFile=None):\n\n\tif (kinshipFile is None):\n\t\tprint('Computing kinship matrix...')\n\t\tt0 = time.time()\t\n\t\tXXT = symmetrize(blas.dsyrk(1.0, bed.val, lower=1) / bed.val.shape[1])\n\t\tprint('Done in %0.2f'%(time.time()-t0), 'seconds')\n\telse:\n\t\tXXT = np.loadtxt(kinshipFile)\n\n\t#Find related individuals\n\tremoveSet = set(np.sort(vc.VertexCut().work(XXT, cutoff))) #These are the indexes of the IIDs to remove\t\t\n\tprint('Marking', len(removeSet), 'individuals to be removed due to high relatedness')\n\t\n\t#keepArr = np.array([(1 if iid in keepSet else 0) for iid in bed.iid], dtype=bool)\t\n\tkeepArr = np.ones(bed.iid.shape[0], dtype=bool)\n\tfor i in removeSet: keepArr[i] = False\t\n\treturn keepArr\n\t\n\t\n\t\ndef eigenDecompose(XXT, ignore_neig=False):\n\tt0 = time.time()\n\tprint('Computing eigendecomposition...')\n\ts,U = la.eigh(XXT)\n\tif (not ignore_neig and (np.min(s) < -1e-4)): raise Exception('Negative eigenvalues found')\n\ts[s<0]=0\t\n\tind = np.argsort(s)\n\tind = ind[s>1e-12]\n\tU = U[:, ind]\n\ts = s[ind]\n\tprint('Done in %0.2f'%(time.time()-t0), 'seconds')\n\treturn s,U\n\t\n\t\n\ndef loadCovars(bed, covarFile):\n\tcovarsDict = phenoUtils.loadPhen(covarFile)\n\tcheckIntersection(bed, covarsDict, 'covariates', checkSuperSet=True)\n\t_, covarsDict = pstutil.intersect_apply([bed, covarsDict])\n\tcovar = covarsDict['vals']\n\treturn covar\t\n\t\ndef getSNPCovarsMatrix(bed, resfile, pthresh, mindist):\n\tsnpNameToNumDict = dict([])\n\tfor i,s in enumerate(bed.sid): snpNameToNumDict[s] = i\t\n\n\tf = open(resfile)\n\tcsvReader = csv.reader(f, delimiter=\"\\t\")\n\tnext(csvReader)\t\n\tsignificantSNPs = []\n\tsignificantSNPNames = []\n\tlastPval = 0\n\tfeaturesPosList = []\n\tfor l in csvReader:\n\t\tsnpName, pVal = l[0], float(l[4])\n\t\tif (pVal < lastPval): raise Exception('P-values are not sorted in descending order: ' + str(pVal) + \">\" + str(lastPval))\n\t\tlastPval = pVal\n\t\tif (pVal > pthresh): break\t\t\n\t\tif (snpName not in snpNameToNumDict): continue\t\t\t\t\t\t\t\n\t\tsignificantSNPNames.append(snpName)\n\t\tif (mindist == 0):\n\t\t\tsignificantSNPs.append(snpNameToNumDict[snpName])\n\t\t\tprint('Using SNP', snpName, 'with p<%0.2e'%pVal, 'as a fixed effect')\n\t\telse:\n\t\t\tposArr = bed.pos[snpNameToNumDict[snpName]]\n\t\t\tchrom, pos = posArr[0], int(posArr[2])\t\t\t\t\n\t\t\taddSNP = True\n\t\t\tfor (c,p) in featuresPosList:\n\t\t\t\tif (chrom == c and abs(pos-p) < mindist):\n\t\t\t\t\taddSNP = False\n\t\t\t\t\tbreak\n\t\t\tif addSNP:\n\t\t\t\tsignificantSNPs.append(snpNameToNumDict[snpName])\n\t\t\t\tfeaturesPosList.append((chrom, pos))\n\t\t\t\tprint('Using SNP', snpName, '('+str(int(chrom))+':'+str(pos)+') with p<%0.2e'%pVal, 'as a fixed effect')\n\tf.close()\n\n\tsnpCovarsMat = bed.val[:, significantSNPs]\n\treturn snpCovarsMat\n\t\n\t\n\t\ndef getExcludedChromosome(bfile, chrom):\n\tbed = Bed(bfile, count_A1=True)\t\n\tindsToKeep = (bed.pos[:,0] != chrom)\n\tbed = bed[:, indsToKeep]\t\n\treturn bed.read().standardize()\n\t\ndef getChromosome(bfile, chrom):\n\tbed = Bed(bfile, count_A1=True)\n\tindsToKeep = (bed.pos[:,0] == chrom)\n\tbed = bed[:, indsToKeep]\t\n\treturn bed.read().standardize()\n\t\n\ndef _fixupBedAndPheno(bed, pheno, missingPhenotype='-9'):\n\tbed = _fixupBed(bed)\n\tbed, pheno = _fixup_pheno(pheno, bed, missingPhenotype)\n\treturn bed, pheno\n\t\ndef _fixupBed(bed):\n\tif isinstance(bed, str):\n\t\treturn Bed(bed, count_A1=True).read().standardize()\n\telse: return bed\n\ndef _fixup_pheno(pheno, bed=None, missingPhenotype='-9'):\n\tif (isinstance(pheno, str)):\n\t\tif (bed is not None):\n\t\t\tbed, pheno = loadPheno(bed, pheno, missingPhenotype, keepDict=True)\n\t\t\treturn bed, pheno\n\t\telse:\n\t\t\tphenoDict = phenoUtils.loadOnePhen(pheno, missing=missingPhenotype, vectorize=True)\n\t\t\treturn phenoDict\n\telse:\n\t\tif (bed is not None): return bed, pheno\t\t\t\n\t\telse: return pheno\n\ndef linreg(bed, pheno):\n\n\t#Extract snps and phenotype\n\tbed, pheno = _fixupBedAndPheno(bed, pheno)\t\n\tif isinstance(pheno, dict):\tphe = pheno['vals']\t\n\telse: phe = pheno\t\t\n\tif (len(phe.shape)==2):\n\t\tif (phe.shape[1]==1): phe=phe[:,0]\n\t\telse: raise Exception('More than one phenotype found')\t\n\n\t#Normalize y. We assume X is already normalized.\n\ty = phe - phe.mean(); y /= y.std()\n\n\t#Compute p-values\n\tXy = bed.val.T.dot(y) / y.shape[0]\n\tXy[Xy>1.0] = 1.0\n\tXy[Xy<-1.0] = -1.0\n\tdf = y.shape[0]-2\n\tTINY = 1.0e-20\n\tt = Xy * np.sqrt(df / ((1.0-Xy+TINY) * (1.0+Xy+TINY)))\n\tpValT = stats.t.sf(np.abs(t), df)*2\t\n\t\n\t#Create pandas data frame\n\titems = [\n\t\t('SNP', bed.sid),\n\t\t('Chr', bed.pos[:,0]), \n\t\t('GenDist', bed.pos[:,1]),\n\t\t('ChrPos', bed.pos[:,2]), \n\t\t('PValue', pValT), \n\t]\n\tframe = pd.DataFrame.from_items(items)\t\n\tframe.sort(\"PValue\", inplace=True)\n\tframe.index = np.arange(len(frame))\t\n\treturn frame\n\t\ndef powerPlot(df, causalSNPs, title=''):\n\timport pylab\n\tcausalSNPs = set(causalSNPs)\n\tcsnpPvals = df[df['SNP'].isin(causalSNPs)][\"PValue\"]\t\n\tpvalPoints = np.logspace(-6, -2, num=1000)\n\tpower = [np.mean(csnpPvals < p ) for p in list(pvalPoints)]\n\tpylab.plot(-np.log10(pvalPoints), power)\n\tpylab.xlabel(\"-log10(Significance Threshold)\")\n\tpylab.ylabel(\"Power\")\n\tpylab.title(title)\n\t\n\t\ndef computeCovar(bed, shrinkMethod, fitIndividuals):\n\teigen = dict([])\n\n\tif (shrinkMethod in ['lw', 'oas', 'l1', 'cv']):\n\t\timport sklearn.covariance as cov\n\t\tt0 = time.time()\n\t\tprint('Estimating shrunk covariance using', shrinkMethod, 'estimator...')\n\t\t\t\t\n\t\tif (shrinkMethod == 'lw'): covEstimator = cov.LedoitWolf(assume_centered=True, block_size = 5*bed.val.shape[0])\n\t\telif (shrinkMethod == 'oas'): covEstimator = cov.OAS(assume_centered=True)\n\t\telif (shrinkMethod == 'l1'): covEstimator = cov.GraphLassoCV(assume_centered=True, verbose=True)\n\t\telif (shrinkMethod == 'cv'):\n\t\t\tshrunkEstimator = cov.ShrunkCovariance(assume_centered=True)\n\t\t\tparam_grid = {'shrinkage': [0.01, 0.1, 0.3, 0.5, 0.7, 0.9, 0.99]}\t\t\t\n\t\t\tcovEstimator = sklearn.grid_search.GridSearchCV(shrunkEstimator, param_grid)\t\t\n\t\telse: raise Exception('unknown covariance regularizer')\n\t\t\n\t\tcovEstimator.fit(bed.val[fitIndividuals, :].T)\n\t\tif (shrinkMethod == 'l1'):\n\t\t\talpha = covEstimator.alpha_\n\t\t\tprint('l1 alpha chosen:', alpha)\n\t\t\tcovEstimator2 = cov.GraphLasso(alpha=alpha, assume_centered=True, verbose=True)\n\t\telse:\n\t\t\tif (shrinkMethod == 'cv'): shrinkEstimator = clf.best_params_['shrinkage']\n\t\t\telse: shrinkEstimator = covEstimator.shrinkage_\n\t\t\tprint('shrinkage estimator:', shrinkEstimator)\n\t\t\tcovEstimator2 = cov.ShrunkCovariance(shrinkage=shrinkEstimator, assume_centered=True)\n\t\tcovEstimator2.fit(bed.val.T)\n\t\tXXT = covEstimator2.covariance_ * bed.val.shape[1]\n\t\tprint('Done in %0.2f'%(time.time()-t0), 'seconds')\n\t\t\t\n\telse:\n\t\tprint('Computing kinship matrix...')\t\n\t\tt0 = time.time()\n\t\tXXT = symmetrize(blas.dsyrk(1.0, bed.val, lower=1))\n\t\tprint('Done in %0.2f'%(time.time()-t0), 'seconds')\t\t\n\t\ttry: shrinkParam = float(shrinkMethod)\n\t\texcept: shrinkParam = -1\n\t\tif (shrinkMethod == 'mylw'):\n\t\t\tXXT_fit = XXT[np.ix_(fitIndividuals, fitIndividuals)]\n\t\t\tsE2R = (np.sum(XXT_fit**2) - np.sum(np.diag(XXT_fit)**2)) / (bed.val.shape[1]**2)\n\t\t\t#temp = (bed.val**2).dot((bed.val.T)**2)\n\t\t\ttemp = symmetrize(blas.dsyrk(1.0, bed.val[fitIndividuals, :]**2, lower=1))\n\t\t\tsER2 = (temp.sum() - np.diag(temp).sum()) / bed.val.shape[1]\n\t\t\tshrinkParam = (sER2 - sE2R) / (sE2R * (bed.val.shape[1]-1))\t\t\n\t\tif (shrinkParam > 0):\n\t\t\tprint('shrinkage estimator:', 1-shrinkParam)\n\t\t\tXXT = (1-shrinkParam)*XXT + bed.val.shape[1]*shrinkParam*np.eye(XXT.shape[0])\n\t\n\treturn XXT\n\n\n\t\n\t\ndef standardize(X, method, optionsDict):\n\tfitIndividuals = np.ones(X.shape[0], dtype=np.bool)\n\tif (method == 'frq'):\n\t\tempMean = X.mean(axis=0) / 2.0\n\t\tX[:, empMean>0.5] = 2 - X[:, empMean>0.5]\t\n\t\tprint('regularizng SNPs according to frq file...')\n\t\tfrqFile = (optionsDict['bfilesim']+'.frq' if (optionsDict['frq'] is None) else optionsDict['frq'])\n\t\tmafs = np.loadtxt(frqFile, usecols=[1,2]).mean(axis=1)\n\t\tsnpsMean = 2*mafs\n\t\tsnpsStd = np.sqrt(2*mafs*(1-mafs))\t\n\telif (method == 'related'):\n\t\tif (optionsDict['related'] is None): raise Exception('related file not supplied')\n\t\tprint('regularizng SNPs according to non-related individuals...')\n\t\trelLines = np.loadtxt(optionsDict['related'], usecols=[2])\t\n\t\tkeepArr = (relLines != 1)\n\t\tprint('Excluding', np.sum(~keepArr), 'from the covariance matrix standardization')\n\t\tsnpsMean = X[keepArr, :].mean(axis=0)\n\t\tsnpsStd = X[keepArr, :].std(axis=0)\n\t\tfitIndividuals = keepArr\n\telif (method == 'controls'):\n\t\tphe = optionsDict['pheno']\n\t\tpheThreshold = phe.mean()\n\t\tcontrols = (phe int:\n import collections\n if not tasks: return 0\n dic = collections.Counter(tasks)\n max_task = sorted(dic.items(), reverse=True, key = lambda x:x[1])[0][0]\n res = (dic[max_task]-1) * (n+1) + 1\n for k, v in dic.items():\n if v == dic[max_task] and k != max_task:\n res += 1\n return len(tasks) if res < len(tasks) else res","sub_path":"Week_04/[621]任务调度器.py","file_name":"[621]任务调度器.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"157793042","text":"import numpy as np\n\ndef prim(G):\n mst = np.zeros(shape=G.adj.shape)\n T = np.array([0])\n W = np.array([i for i in range(1, len(G.adj))])\n sum_mst = 0\n\n while len(T) != len(G.adj):\n min = None\n for t in T:\n for w in W:\n if G.adj[t][w] != 0:\n if min == None or G.adj[t][w] < G.adj[min[0]][min[1]]:\n min = [t, w]\n \n mst[min[0]][min[1]] = mst[min[1]][min[0]] = G.adj[min[0]][min[1]]\n T = np.append(T, min[1])\n W = W[W != min[1]]\n sum_mst += G.adj[min[0]][min[1]]\n \n return [mst, sum_mst]","sub_path":"03_project/mst.py","file_name":"mst.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"637475622","text":"\nimport json\nimport requests\nfrom urllib.request import urljoin\nfrom collections import OrderedDict\n\nfrom btcmarkets.auth import build_headers\n\n\nclass BTCMarkets:\n\n base_url = 'https://api.btcmarkets.net'\n\n def __init__(self):\n self.session = requests.Session()\n\n def get_accounts(self):\n return self.request('GET', end_point='/account/balance')\n\n def get_order_book(self, instrument, currency):\n return self.request('GET', end_point='/market/%s/%s/orderbook' % (instrument, currency))\n\n def get_trades(self, instrument, currency, since=0):\n return self.request('GET', end_point='/market/%s/%s/trades?since=%s' % (instrument, currency, since))\n\n def get_open_orders(self, instrument, currency, limit=100, since=0):\n data = OrderedDict([\n ('currency', currency), ('instrument', instrument), ('limit', limit), ('since', since),\n ])\n return self.request('POST', '/order/open', data=data)\n\n def get_order_history(self, instrument, currency, limit=100, since=0):\n data = OrderedDict([\n ('currency', currency), ('instrument', instrument), ('limit', limit), ('since', since)\n ])\n return self.request('POST', '/order/history', data=data)\n\n def get_trade_history(self, instrument, currency, limit=100, since=0):\n data = OrderedDict([\n ('currency', currency), ('instrument', instrument), ('limit', limit), ('since', since)\n ])\n return self.request('POST', '/order/trade/history', data=data)\n\n def get_order_detail(self, order_ids):\n data = OrderedDict([('orderIds', order_ids)])\n return self.request('POST', end_point='/order/detail', data=data)\n\n def insert_order(self, instrument, currency, order_side, price, volume, order_type):\n \"\"\"\n :param instrument: {'BTC', 'ETH', 'LTC'}\n :param currency: {'BTC', 'AUD'}\n :param order_side: ('Bid', 'Ask')\n :param price: price for order. Must be * 100,000,000 as per https://github.com/BTCMarkets/API/wiki/Trading-API\n :param volume: volume for order. Must be * 100,000,000 as per https://github.com/BTCMarkets/API/wiki/Trading-API\n :param order_type: {'Limit', 'Market')\n :return:\n \"\"\"\n assert len(str(int(price))) > 5 and len(str(int(volume))) > 5\n data = OrderedDict([\n ('currency', currency),\n ('instrument', instrument),\n ('price', price),\n ('volume', volume),\n ('orderSide', order_side),\n ('ordertype', order_type),\n ('clientRequestId', '1'),\n ])\n return self.request('POST', end_point='/order/create', data=data)\n\n def delete_order(self, order_ids: list):\n data = OrderedDict([('orderIds', order_ids)])\n return self.request('POST', end_point='/order/cancel', data=data)\n\n def request(self, method, end_point, data=None):\n url = urljoin(self.base_url, end_point)\n if data is not None:\n data = json.dumps(data, separators=(',', ':'))\n headers = build_headers(end_point, data)\n resp = self.session.request(method, url=url, headers=headers, data=data)\n resp_json = resp.json()\n if 'success' in resp_json and not resp_json['success']:\n raise Exception('ErrorCode: %s Message: %s' % (resp_json['errorCode'], resp_json['errorMessage']))\n return resp_json\n","sub_path":"btcmarkets/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":3414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"208327613","text":"# Copyright (C) 2010-2011 Richard Lincoln\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\nfrom CIM16.Element import Element\n\nclass CurveData(Element):\n \"\"\"Multi-purpose data points for defining a curve.Multi-purpose data points for defining a curve.\n \"\"\"\n\n def __init__(self, y3value=0.0, xvalue=0.0, y2value=0.0, y1value=0.0, Curve=None, *args, **kw_args):\n \"\"\"Initialises a new 'CurveData' instance.\n\n @param y3value: The data value of the third Y-axis variable (if present), depending on the Y-axis units \n @param xvalue: The data value of the X-axis variable, depending on the X-axis units \n @param y2value: The data value of the second Y-axis variable (if present), depending on the Y-axis units \n @param y1value: The data value of the first Y-axis variable, depending on the Y-axis units \n @param Curve: The Curve defined by this CurveData.\n \"\"\"\n #: The data value of the third Y-axis variable (if present), depending on the Y-axis units\n self.y3value = y3value\n\n #: The data value of the X-axis variable, depending on the X-axis units\n self.xvalue = xvalue\n\n #: The data value of the second Y-axis variable (if present), depending on the Y-axis units\n self.y2value = y2value\n\n #: The data value of the first Y-axis variable, depending on the Y-axis units\n self.y1value = y1value\n\n self._Curve = None\n self.Curve = Curve\n\n super(CurveData, self).__init__(*args, **kw_args)\n\n _attrs = [\"y3value\", \"xvalue\", \"y2value\", \"y1value\"]\n _attr_types = {\"y3value\": float, \"xvalue\": float, \"y2value\": float, \"y1value\": float}\n _defaults = {\"y3value\": 0.0, \"xvalue\": 0.0, \"y2value\": 0.0, \"y1value\": 0.0}\n _enums = {}\n _refs = [\"Curve\"]\n _many_refs = []\n\n def getCurve(self):\n \"\"\"The Curve defined by this CurveData.\n \"\"\"\n return self._Curve\n\n def setCurve(self, value):\n if self._Curve is not None:\n filtered = [x for x in self.Curve.CurveDatas if x != self]\n self._Curve._CurveDatas = filtered\n\n self._Curve = value\n if self._Curve is not None:\n if self not in self._Curve._CurveDatas:\n self._Curve._CurveDatas.append(self)\n\n Curve = property(getCurve, setCurve)\n\n","sub_path":"CIM16/IEC61970/Core/CurveData.py","file_name":"CurveData.py","file_ext":"py","file_size_in_byte":3325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"89104676","text":"import json\n\n\nclass Generic(object):\n def __init__(self,object):\n self.__dict__.update(object)\n self.__original__=object\n\n def __repr__(self):\n rep='<'+self.__class__.__name__+' {'\n first=True\n for attrib in dir(self):\n if not attrib.startswith('__'):\n if first:\n first=False\n else:\n rep=rep+','\n rep=rep+attrib\n return rep+'}>'\n\n def pretty(self):\n return json.dumps(self.__original__,indent=3)\n\n def get(self,key):\n return self.__dict__.get(key)\n","sub_path":"dhis/types/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"135637227","text":"import pytest\n\nfrom vkbottle import ManySessionManager\nfrom vkbottle.tools.test_utils import MockedClient\n\n\n@pytest.mark.asyncio\nasync def test_client():\n client = MockedClient(\"some text\")\n text = await client.request_text(\"GET\", \"https://example.com\")\n await client.close()\n assert text == \"some text\"\n\n\n@pytest.mark.asyncio\nasync def test_session_manager():\n session_manager = ManySessionManager(lambda: MockedClient(\"some text\"))\n async with session_manager as session:\n assert await session.request_text(\"GET\", \"https://example.com\") == \"some text\"\n","sub_path":"tests/http_test.py","file_name":"http_test.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"329776988","text":"import requests\nimport json\nimport logging\nfrom datetime import datetime\nfrom urllib import parse\nfrom sys import exit\n\nlogging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)\n\nf = open(\"token.txt\", \"r\")\nTOKEN = f.readline()\nif not TOKEN:\n logging.error(\"Error occurred, have you filled the token.txt file with your bot token?\")\n exit()\n\nURL = \"https://api.telegram.org/bot{}/\".format(TOKEN)\n\nf = open(\"master.txt\", \"r\")\nmaster = int(f.readline())\nif not master:\n logging.error(\"Error occurred, have you filled the master.txt file with your master id?\")\n exit()\n\n\nclass MessageHandler:\n\n def __init__(self):\n self.master = master\n self.allowed = [self.master]\n\n #\n def get_url(self, url):\n try:\n response = requests.get(url)\n content = response.content.decode(\"utf8\")\n except requests.exceptions.ConnectionError:\n logging.info(\"Max retries exceed\")\n content = \"\"\n return content\n\n #\n def get_json_from_url(self, url):\n try:\n content = self.get_url(url)\n js = json.loads(content)\n except AttributeError:\n event_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n logging.error(\"\\nFailed to load json content at {}, content was {}\\n\".format(event_time, self.get_url(url)))\n js = []\n return js\n\n #\n def get_updates(self, offset=None):\n url = URL + \"getUpdates?timeout=1\"\n if offset:\n url += \"&offset={}\".format(offset)\n js = self.get_json_from_url(url)\n return js\n\n #\n def get_last_update_id(self, updates):\n update_ids = []\n for update in updates[\"result\"]:\n update_ids.append(int(update[\"update_id\"]))\n return max(update_ids)\n\n #\n def send_message(self, text, chat_id, reply_markup=None):\n text = parse.quote_plus(text)\n url = URL + \"SendMessage?text={}&chat_id={}&parse_mode=Markdown\".format(text, chat_id)\n if reply_markup:\n url += \"&reply_markup={}\".format(reply_markup)\n self.get_url(url)\n\n #\n def get_text_and_chat(self, updates):\n len_updates = len(updates[\"result\"])\n last_update = len_updates - 1\n try:\n text = updates[\"result\"][last_update][\"message\"][\"text\"]\n except:\n text = \"no valid text\"\n logging.error(\"no valid text\")\n chat_id = updates[\"result\"][last_update][\"message\"][\"chat\"][\"id\"]\n return text, chat_id\n\n #\n def get_name(self, updates):\n for update in updates[\"result\"]:\n chat = update[\"message\"][\"chat\"][\"id\"]\n try:\n name = update[\"message\"][\"chat\"][\"first_name\"]\n except:\n # write_log2(\"no_name\", time)\n name = \"n/a\"\n try:\n surname = update[\"message\"][\"chat\"][\"last_name\"]\n except:\n # write_log2(\"no_surname\", time)\n surname = \"n/a\"\n return name\n\n #\n def id_check(self, updates):\n for update in updates[\"result\"]:\n chat = update[\"message\"][\"chat\"][\"id\"]\n logging.info(\"chat: {}, allowed: {}\".format(chat, self.allowed))\n date = update[\"message\"][\"date\"]\n time = datetime.fromtimestamp(date)\n time = time.strftime('%Y-%m-%d at %H:%M:%S')\n try:\n name = update[\"message\"][\"chat\"][\"first_name\"]\n except:\n name = \"n/a\"\n try:\n surname = update[\"message\"][\"chat\"][\"last_name\"]\n except:\n surname = \"n/a\"\n try:\n username = update[\"message\"][\"chat\"][\"username\"]\n except:\n username = \"n/a\"\n\n if chat in self.allowed:\n #logging.info(\"\\nconnection from: {} ... \\nconnection successful\".format(chat))\n return 1\n else:\n self.send_message(\"Unknown user, access denied. Contact system admin\", chat)\n message = [name, \" \", surname, \"\\nUsername: \", username, \"\\nID: \", chat, \"\\nAt: \", str(time),\n \"Concedere i privilegi all'utente?\"]\n message = ''.join(map(str, message))\n keyboard = [[chat], [\"Home\"]]\n self.send_message(message, self.master, keyboard)\n return 0","sub_path":"messageHandler.py","file_name":"messageHandler.py","file_ext":"py","file_size_in_byte":4390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"218396426","text":"from sentiment_classifier import SentimentClassifier\r\n\r\nclf = SentimentClassifier()\r\n\r\nprediction = clf.get_prediction_message('Ужасно слабый аккумулятор, это основной минус этого аппарата, разряжается '\r\n 'буквально за пару часов при включенном wifi и на макс подсветке, '\r\n 'например если играть или смотреть видео, следовательно использовать можно '\r\n 'только если есть постоянная возможность подзарядиться. Качества звука через '\r\n 'динамик далеко не на высоте.Наблюдаются незначительные тормоза в некоторых '\r\n 'приложениях и вообще в меню. Очень мало встроенной памяти, а приложения '\r\n 'устанавливаются именно туда, с этим связанны неудобства - нужно постоянно '\r\n 'переносить их на карту памяти.\\ Несколько неудобно что нету отдельной кнопки '\r\n 'для фото. Подумываю купить батарею большей емкость мб что нибудь измениться.')\r\n\r\nprint(prediction[0])\r\n","sub_path":"classifier_test.py","file_name":"classifier_test.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"52373397","text":"from typing import List\nfrom collections import deque\n\n\nclass Solution:\n def swimInWater(self, grid: List[List[int]]) -> int:\n n = len(grid)\n\n def bfs(t):\n queue = deque([(0, 0)])\n visited_node = set((0, 0))\n while queue:\n queue_length = len(queue)\n for _ in range(queue_length):\n i, j = queue.pop()\n if (i, j) == (n - 1, n - 1):\n return True\n for delta_i, delta_j in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n new_i, new_j = i + delta_i, j + delta_j\n if (\n 0 <= new_i < n\n and 0 <= new_j < n\n and (new_i, new_j) not in visited_node\n and grid[new_i][new_j] <= t\n ):\n queue.appendleft((new_i, new_j))\n visited_node.add((new_i, new_j))\n return False\n\n start, end = grid[0][0], max((max(grid[i]) for i in range(n)))\n while start <= end:\n mid = (start + end) // 2\n if bfs(mid):\n end = mid - 1\n else:\n start = mid + 1\n\n return start\n\n\nif __name__ == \"__main__\":\n solution = Solution()\n print(solution.swimInWater(grid=[[0]]))\n print(solution.swimInWater(grid=[[0, 2], [1, 3]]))\n print(\n solution.swimInWater(\n grid=[\n [0, 1, 2, 3, 4],\n [24, 23, 22, 21, 5],\n [12, 13, 14, 15, 16],\n [11, 17, 18, 19, 20],\n [10, 9, 8, 7, 6],\n ]\n )\n )\n","sub_path":"binary_search/778SwiminRisingWater.py","file_name":"778SwiminRisingWater.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"173477954","text":"# Napisać program, który zapyta użytkownika o kod waluty\n# a następnie pobierze z NBP kurs z ostatnich kilkunastu dni\n# i wygeneruje wykres w formacie SVG.\n\nimport json\nimport requests\n\nURL = \"http://api.nbp.pl/api/exchangerates/rates/A/%s/last/%d/?format=JSON\"\n\ndef rysuj_wykres(dane, nazwapliku):\n SZER = 800\n WYS = 600\n MARGIN = 20\n ODSTEP = 10\n f = open(nazwapliku, \"w\")\n f.write('')\n f.write(f'\\n')\n maxwartosc = max([x[1] for x in dane])\n minwartosc = min([x[1] for x in dane])*0.99\n x = MARGIN\n for etykieta, wartosc in dane:\n wysokosc_wzg = (wartosc-minwartosc)/(maxwartosc-minwartosc)\n wysokosc_max = WYS-2*MARGIN\n wysokosc = wysokosc_wzg * wysokosc_max\n szer = (SZER-2*MARGIN-(len(dane)-1)*ODSTEP )/len(dane)\n y = ODSTEP + wysokosc_max - wysokosc\n f.write(f'\\n')\n f.write(f'{wartosc}\\n')\n f.write(f'{etykieta}\\n')\n x+=szer+ODSTEP\n f.write('')\n f.close()\n\n#waluta = input(\"Podaj kod waluty: \")\nwaluta = \"EUR\"\nile = 12\n\nurl = URL % ( waluta, ile )\n\nkursy = [ (k['effectiveDate'], k['mid']) for k in json.loads(requests.get(url).text)['rates'] ]\n\nrysuj_wykres( kursy, \"zadanie04.svg\")","sub_path":"pliki/zadanie04.py","file_name":"zadanie04.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"375056437","text":"# -*-coding:utf-8 -*\nimport pygame, math, narro.directions, narro.tmxreader, numpy, os, sys\nfrom pygame.locals import *\nfrom narro import *\nimport pygame.surfarray as surfarray\nfrom collections import OrderedDict\nfrom .tile import *\nfrom .constantes import *\nfrom .observateur import *\nfrom .zonePensee import *\n\nif SESSION_DEBUG:\n import pdb\n\n\nclass Carte(Observateur):\n \"\"\"Classe représentant une carte au niveau des données\"\"\"\n def __init__(self, nomCarte, jeu): \n \"\"\"Initialise la carte à exécuter à partir des données issues de son fichier SQMAP.\n Cette méthode se charge surtout du transfert du format de carte .narromap à celui du Narro Engine (purement mémoriel).\"\"\"\n Observateur.__init__(self)\n self._carteTiled = tmxreader.TileMapParser().parse_decode(os.path.join(DOSSIER_RESSOURCES, nomCarte + \".tmx\"))\n self._nom, self._description = self._carteTiled.properties.get(\"nom\", nomCarte), self._carteTiled.properties.get(\"description\", \"\")\n self._musique = self._carteTiled.properties.get(\"musique\", \"\")\n self._longueur, self._largeur = self._carteTiled.width, self._carteTiled.height\n self._nombreCouches, self._hauteurTile = len(self._carteTiled.layers), self._carteTiled.tilewidth\n self._scrollingX, self._scrollingY = 0,0\n self._jeu, self._toutAChanger = jeu, True\n self._dicoSurfaces, self._tiles, self._blocsRef, self._pnj, i = dict(), list(), dict(), dict(), 0\n self._ecran = Rect(0, 0, self._longueur*32, self._largeur*32)\n self._scrollingPossible, self._etapeScrolling = False, 0\n self._surfaceZonePensee, self._positionZonePensee, self._besoinAffichageZonePensee = None, None, False\n self._emplacementScrollingX, self._emplacementScrollingY = int(int(FENETRE[\"longueurFenetre\"]/2) / 32)*32, int(int(FENETRE[\"largeurFenetre\"]/2)/32)*32\n self._ecranVisible = Rect(0, 0, FENETRE[\"longueurFenetre\"], FENETRE[\"largeurFenetre\"])\n self._positionsDepart = dict()\n self._fenetre, self._blitFrame = self._jeu.fenetre, False\n self._transformationsGlobales, self._transformationsParties, self._parametresTransformations = list(), list(), dict()\n\n self._dicoGid = dict()\n for tileset in self._carteTiled.tile_sets:\n for image in tileset.images:\n self._ajouterSurface(False, image.source, False, tileset=tileset, mobile=False)\n \n self._tilesLayers = []\n i, x, y = 0, 0, 0\n while i < self._nombreCouches:\n x = 0\n self._tilesLayers.append(pygame.Surface((self._longueur * 32, self._largeur * 32), flags=SRCALPHA))\n self._pnj[i] = dict()\n while x < self._longueur:\n y = 0\n self._tiles.append(list())\n while y < self._largeur:\n self._tiles[x].append(Tile(self._nombreCouches))\n gid = self._carteTiled.layers[i].content2D[x][y]\n if gid != 0: #Bloc plein\n self._tiles[x][y].bloc.append(Bloc(infos=self._dicoGid[gid]))\n surfaceTileset, positionSource = self._dicoSurfaces[self._dicoGid[gid][0]][\"Source\"], self._dicoGid[gid][2]\n self._tilesLayers[i].blit(surfaceTileset, (x * self._hauteurTile, y * self._hauteurTile), area=positionSource) \n else: #Bloc vide\n self._tiles[x][y].bloc.append(Bloc(vide=True))\n if i == 0: #Sur la couche 0, il faut mettre du noir pour les blocs vides\n self._tilesLayers[i].fill((0,0,0), (x * self._hauteurTile, y * self._hauteurTile, self._hauteurTile, self._hauteurTile))\n if i == self._nombreCouches - 1:\n self._tiles[x][y].recalculerPraticabilites()\n y += 1\n x += 1\n i += 1\n del self._dicoGid\n\n def _completerDicoGids(self, nomTileset, tileset, longueur, largeur):\n \"\"\"Lors du chargement d'un tileset dans _ajouterSurface (quand une carte est créée), cette fonction se charge de faire correspondre à chaque tile du tileset les infos\n qui lui correspondent. 1 Tile dans le tileset = 1 GID = 1 position source, 1 praticabilité, 1 nom de tileset\"\"\"\n gid, idTileset, x, y, tileWidth, tileHeight = int(tileset.firstgid), 0, 0, 0, int(tileset.tilewidth), int(tileset.tileheight)\n while y < largeur:\n x = 0\n while x < longueur:\n if len(tileset.tiles) > 0:\n praticabilite = tileset.tiles[idTileset].properties.get(\"Praticabilite\", False) == \"True\"\n else: #Tileset importé d'ailleurs, les praticabilités n'ont pas été indiquées\n praticabilite = True\n self._dicoGid[gid] = nomTileset, praticabilite, (x, y, tileWidth, tileHeight)\n gid, idTileset, x = gid + 1, idTileset + 1, x + tileWidth #increments\n y += tileHeight\n\n def _ajouterSurface(self, positionSource, cheminVersTileset,couleurTransparente, tileset=False, mobile=True):\n \"\"\"Ajoute la surface correspondant à un bloc dans le dico de surfaces, si elle n'y est pas déjà. \n Pour les tilesets, on ajoute la surface entière seulement. Pour les mobiles, on enregistre aussi la partie du tileset qui nous intéresse.\n Pour les tilesets, on complète le dico de GIDs (lors de la création de la carte).\"\"\"\n nomTileset = os.path.basename(cheminVersTileset)\n if nomTileset not in self._dicoSurfaces:\n self._dicoSurfaces[nomTileset] = dict()\n try:\n self._dicoSurfaces[nomTileset][\"Source\"] = pygame.image.load(os.path.join(DOSSIER_RESSOURCES,cheminVersTileset))\n if tileset is not False:\n self._completerDicoGids(nomTileset, tileset, self._dicoSurfaces[nomTileset][\"Source\"].get_width(), self._dicoSurfaces[nomTileset][\"Source\"].get_height())\n except pygame.error as erreur:\n print( MESSAGE_ERREUR_CHARGEMENT_TILESET.format(nomTileset), str(erreur) )\n if mobile is True and positionSource not in self._dicoSurfaces[nomTileset].keys(): #On ne conserve les sous-surfaces que des mobiles\n self._dicoSurfaces[nomTileset][positionSource] = pygame.Surface((positionSource[2],positionSource[3]), flags=SRCALPHA).convert_alpha()\n self._dicoSurfaces[nomTileset][positionSource].blit(self._dicoSurfaces[nomTileset][\"Source\"], (0,0), area=positionSource)\n elif mobile is False and positionSource is not False: #pour changerBloc : on retourne la sous-surface pour la blitter sur les tiles layers\n return self._dicoSurfaces[nomTileset][\"Source\"].subsurface(positionSource)\n\n def changerBloc(self, x, y, c, nomTileset, positionSource, couleurTransparente, praticabilite, vide=False):\n if self.tileExistant(x,y) is True and c < self.nombreCouches:\n bloc, jeu = self._tiles[x][y].bloc[c], self._jeu\n if vide is False:\n bloc = Bloc(nomTileset=nomTileset, positionSource=positionSource, couleurTransparente=couleurTransparente, praticabilite=praticabilite)\n self._tiles[x][y].bloc[c] = bloc\n surfaceBloc = self._ajouterSurface(positionSource, nomTileset, couleurTransparente, tileset=False, mobile=False)\n self._tilesLayers[c].blit(surfaceBloc, (x*self._hauteurTile, y*self._hauteurTile) )\n else:\n bloc, praticabilite = Bloc(jeu, vide=True), True\n self._tiles[x][y].bloc[c] = bloc\n absi, ordo, i, a = x*self._hauteurTile, y*self._hauteurTile, 0, 0\n couleurEntierementTransparente = Color(0,0,0,0)\n while i < self._hauteurTile: #On rend transparent les pixels du tile désormais vide\n a = 0\n while a < self._hauteurTile:\n self._tilesLayers[c].set_at((absi+i, ordo+a), couleurEntierementTransparente)\n a += 1\n i += 1\n self._tiles[x][y].modifierPraticabilite(c, praticabilite)\n self.mettreToutAChanger()\n\n\n def tileExistant(self,x,y):\n \"\"\"Retourne True si le tile de coordonnées , existe\"\"\"\n return x >= 0 and x < self._longueur and y >= 0 and y < self._largeur\n\n def tilePraticable(self, x, y, c):\n if x < len(self._tiles):\n if y < len(self._tiles[x]):\n if c < len(self._tiles[x][y].praticabilite):\n return self._tiles[x][y].praticabilite[c]\n else:\n return False\n else:\n return False\n else:\n return False\n\n def _determinerPresenceSurTiles(self, x, y, longueur, largeur):\n abscisses, ordonnees, x, y, longueur, largeur, i = [], [], (x / 32), (y/32), int(longueur/32), int(largeur/32), 0\n abscisses = list(range(math.floor(x), math.ceil(x) + longueur))\n ordonnees = list(range(math.floor(y), math.ceil(y) + largeur))\n listeTilesPresence = [(absa, ordo) for absa in abscisses for ordo in ordonnees]\n return listeTilesPresence\n\n def coordonneesAuTileSuivant(self, direction, x, y):\n \"\"\"Retourne les deux coordonnées au tile suivant en fonction de la direction.\"\"\"\n xReponse, yReponse = int(x/32), int(y/32)\n if direction is \"Gauche\" or direction is \"Droite\":\n xReponse = int( directions.ajusterCoordonneesLorsDeplacement(x, direction) / 32)\n elif direction is \"Haut\" or direction is \"Bas\":\n yReponse = int( directions.ajusterCoordonneesLorsDeplacement(y, direction) / 32)\n return (xReponse, yReponse)\n\n def deplacementPossible(self, positionCarte, c, nomPNJ):\n \"\"\"Indique si un déplacement en est possible. Retourne un 2-tuple avec :\n * si un PNJ peut être positionné en , sinon . Si , sont fournis, ne prend pas en compte le PNJ à cette position pour les collisions.\n * Le tile qui vient d'être quitté.\"\"\"\n deplacementPossible = True\n if self._ecran.contains(positionCarte) == 0: #Si la position d'arrivée existe dans la carte\n deplacementPossible = False\n pnjsEnCollision = [pnj for pnj in self._pnj[c].values() if pnj.nomPNJ != nomPNJ and (pnj.positionCarte.colliderect(positionCarte) == 1 and (pnj.positionCarteSuivante == positionCarte or pnj.positionCarteSuivante == False))]\n if len(pnjsEnCollision) > 0:\n deplacementPossible = False\n for (x,y) in self._determinerPresenceSurTiles(positionCarte.left, positionCarte.top, positionCarte.width, positionCarte.height):\n if self.tilePraticable(x, y, c) is False: #Si le tile est impraticable\n deplacementPossible = False\n return deplacementPossible\n\n def supprimerPNJ(self, nomPNJ, couche):\n \"\"\"Supprime un PNJ à l'écran.\"\"\"\n if nomPNJ in self._pnj[couche].keys():\n del self._pnj[couche][nomPNJ]\n self._toutAChanger = True\n\n def poserPNJ(self, positionCarte, c, positionSource, nomTileset, couleurTransparente, nomPNJ, positionCarteSuivante=False):\n \"\"\"Ordonne l'affichage à l'écran d'un PNJ à une nouvelle position et l'effacement du PNJ à sa position précedente\"\"\"\n hauteurTile = self._hauteurTile\n x,y = float(positionCarte.left), float(positionCarte.top)\n if nomPNJ not in self._pnj[c].keys():\n self._pnj[c][nomPNJ] = Bloc(self._jeu, pnj=True, nomPNJ=nomPNJ, nomTileset=nomTileset, positionCarte=positionCarte, positionCarteSuivante=positionCarteSuivante, positionSource=positionSource)\n pnj = self._pnj[c][nomPNJ]\n if pnj.positionSource != positionSource:\n pnj.positionSource = positionSource\n if pnj.nomTileset != nomTileset:\n pnj.nomTileset = nomTileset\n if pnj.couleurTransparente != couleurTransparente:\n pnj.couleurTransparente = couleurTransparente\n if pnj.positionCarte != positionCarte:\n pnj.positionCarte = positionCarte\n if pnj.positionCarteSuivante != positionCarteSuivante:\n pnj.positionCarteSuivante = positionCarteSuivante\n self._toutAChanger = True\n self._ajouterSurface( (positionSource.left, positionSource.top, positionSource.width, positionSource.height), nomTileset, couleurTransparente)\n \n def mettreToutAChanger(self):\n self._toutAChanger = True\n\n def _coordonneeScrollingPossible(self, coor, abs=False):\n \"\"\"Retourne si est dans un emplacement où le scrolling est possible. \n Paramètre : quand vaut , il s'agit non pas d'une ordonnée, mais d'une abscisse.\"\"\"\n if abs is False: #Ordonnée\n return coor == self._emplacementScrollingY\n else: #Abscisse\n return coor == self._emplacementScrollingX\n\n def verifierScrollingPossible(self, x, y, direction):\n \"\"\"Vérifie si le scrolling est possible pour faciliter le traitement dans gererScrolling\"\"\"\n self._scrollingPossible, scrollingDirection = False, True\n if direction == \"Bas\" and int(self._scrollingY / 32) + int(FENETRE[\"largeurFenetre\"]/32) >= self._largeur:\n scrollingDirection = False\n if direction == \"Haut\" and self._scrollingY == 0:\n scrollingDirection = False\n if direction == \"Droite\" and int(self._scrollingX / 32) + int(FENETRE[\"longueurFenetre\"]/32) >= self._longueur:\n scrollingDirection = False\n if direction == \"Gauche\" and self._scrollingX == 0:\n scrollingDirection = False\n if scrollingDirection is True:\n x, y = x - self._scrollingX, y - self._scrollingY\n scrollingPossibleX = self._coordonneeScrollingPossible(x, abs=True)\n scrollingPossibleY = self._coordonneeScrollingPossible(y, abs=False)\n if (direction == \"Haut\" or direction == \"Bas\") and scrollingPossibleY is True:\n self._scrollingPossible, self._directionScrolling = True, direction\n elif (direction == \"Gauche\" or direction == \"Droite\") and scrollingPossibleX is True:\n self._scrollingPossible, self._directionScrolling = True, direction\n \n def gererScrolling(self, changement, direction):\n \"\"\"Gère le scrolling\"\"\"\n if (direction == \"Droite\" or direction == \"Gauche\") and self._scrollingPossible is True:\n self._scrollingX += changement\n self.mettreToutAChanger()\n self._ecranVisible.move_ip(changement, 0)\n return True\n elif (direction == \"Bas\" or direction == \"Haut\") and self._scrollingPossible is True:\n self._scrollingY += changement\n self.mettreToutAChanger()\n self._ecranVisible.move_ip(0, changement)\n return True\n else:\n return False\n\n def initialiserScrolling(self, x, y):\n \"\"\"Après la création de la carte, initialise le scrolling à la position du joueur si nécessaire.\n est l'abscisse du joueur, son ordonnée. Ces coordonnées sont données en pixels.\"\"\"\n self._ecranVisible.top, self._ecranVisible.left = 0, 0\n scrollingAInitialiserX, scrollingAInitialiserY, x, y = True, True, x, y\n if FENETRE[\"largeurFenetre\"] >= self._largeur * 32: #Carte petite\n scrollingAInitialiserY = False\n if FENETRE[\"longueurFenetre\"] >= self._longueur * 32:\n scrollingAInitialiserX = False\n if x < self._emplacementScrollingX: #On est dans une partie de la carte où le scrolling est inutile\n scrollingAInitialiserX = False\n if y < self._emplacementScrollingY:\n scrollingAInitialiserY = False\n if scrollingAInitialiserX is True:\n self._scrollingX = x - self._emplacementScrollingX #A chaque instant, on a x - scrollingX = emplacementScrollingX, d'où cette relation\n if int(FENETRE[\"longueurFenetre\"]/32) + int(self._scrollingX/32) >= self._longueur: #Quand on est aux bords de la carte\n self._scrollingX = (self._longueur*32) - FENETRE[\"longueurFenetre\"]\n if scrollingAInitialiserY is True:\n self._scrollingY = y - self._emplacementScrollingY\n if int(FENETRE[\"largeurFenetre\"]/32) + int(self._scrollingY/32) >= self._largeur:\n self._scrollingY = (self._largeur*32) - FENETRE[\"largeurFenetre\"]\n self._ecranVisible = Rect(0, 0, FENETRE[\"longueurFenetre\"], FENETRE[\"largeurFenetre\"])\n self._ecranVisible.move_ip(self._scrollingX, self._scrollingY)\n\n def obsOnNouvelleObservation(self, instance, nomAttribut, info):\n if isinstance(instance, ZonePensee) is True and nomAttribut == \"_surface\":\n self._surfaceZonePensee, self._besoinAffichageZonePensee = info.copy(), True\n elif isinstance(instance, ZonePensee) is True and nomAttribut == \"_positionSurface\":\n self._positionZonePensee = list(info)\n\n def _transformerPartie(self, surface, nomPnj, positionCarte, **p):\n \"\"\"Applique une transformation individuellement à chaque (mobile) lors de sa pose.\"\"\"\n for nomTransformation in self._transformationsParties:\n p = self._parametresTransformations[nomTransformation]\n if nomTransformation == \"AlphaFixe\":\n pixels = surfarray.pixels_alpha(surface)\n positionsNulles = numpy.where(pixels == 0)\n pixels[:,:] = p[\"alpha\"]\n pixels[positionsNulles] = 0\n elif nomTransformation == \"Action Joueur\" and nomPnj == \"Joueur\":\n centre = positionCarte.move(-self._scrollingX, -self._scrollingY).center\n pygame.draw.circle(self._fenetre, (255,255,255), centre, p[\"rayon\"], 1)\n\n def _appliquerTransformationGlobale(self, nomTransformation, **p):\n \"\"\"Applique la transformation globale avec le dico de paramètres

.\"\"\"\n if nomTransformation == \"Rouge\":\n pixels = surfarray.pixels3d(self._fenetre)[:FENETRE[\"longueurFenetre\"], :FENETRE[\"largeurFenetre\"]] #On exclut la zone de pensée\n pixels[:,:,1:] = 0\n elif nomTransformation == \"Noir\":\n pixels = surfarray.pixels3d(self._fenetre)[:FENETRE[\"longueurFenetre\"],:FENETRE[\"largeurFenetre\"]]\n pixels /= p[\"coef\"]\n if p[\"coef\"] >= 12:\n pixels[:] = (0,0,0)\n elif nomTransformation == \"NoirTotal\":\n pixels = surfarray.pixels3d(self._fenetre)[:FENETRE[\"longueurFenetre\"],:FENETRE[\"largeurFenetre\"]]\n pixels[:] = (0,0,0)\n elif nomTransformation == \"RemplirNoir\":\n self._fenetre.fill((0,0,0), rect=(0,0,FENETRE[\"longueurFenetre\"],FENETRE[\"largeurFenetre\"]))\n elif \"SplashText\" in nomTransformation:\n if \"couleurFond\" in p.keys():\n couleurFond=p[\"couleurFond\"]\n else:\n couleurFond=None\n surfaceTexte = self._jeu.zonePensee.polices[\"splashText\"].render(p[\"texte\"], p[\"antialias\"], p[\"couleurTexte\"], couleurFond)\n self._fenetre.blit(surfaceTexte, p[\"position\"])\n elif nomTransformation == \"Nuit\":\n self._fenetre.fill((0,0,0), rect=(0,0,FENETRE[\"longueurFenetre\"],FENETRE[\"largeurFenetre\"]))\n c = 0\n while c < self._nombreCouches: \n for nomPnj in self._pnj[c]: \n self._afficherBlocPnj(c, nomPnj)\n c += 1\n\n\n def _transformerSurfaceGlobalement(self, affichageComplet=False):\n \"\"\"A chaque frame, regarde s'il y a des transformations globales à appliquer, et les exécute lorsque c'est le cas.\n doit valoir si la fonction doit mettre à jour l'écran entier elle-même (car personne ne le fait après).\n Retourne quand la fonction s'est occupée de la mise à jour de l'écran (car on le lui a demandé ET qu'il y avait des transfos à traiter).\"\"\"\n if len(self._transformationsGlobales) > 0: #S'il y a des transformations à opérer\n longueurFenetre, largeurFenetre = FENETRE[\"longueurFenetre\"], FENETRE[\"largeurFenetre\"]\n for nomTransformation in self._transformationsGlobales:\n self._appliquerTransformationGlobale(nomTransformation, **self._parametresTransformations[nomTransformation]) #On applique la transfo\n\n def _afficherZonePensee(self, affichageComplet=False):\n \"\"\"S'il y a quelque chose à afficher, réaffiche la zone de pensée. \n est un booléen qui vaut lorsque pygame.display.flip est appelée à la suite de l'appel de la fonction.\"\"\"\n positionZoneEntiere = (0, FENETRE[\"largeurFenetre\"], FENETRE[\"longueurFenetre\"], FENETRE[\"largeurFenetreReelle\"] - FENETRE[\"largeurFenetre\"])\n self._fenetre.fill(COULEUR_FOND_ZONE_PENSEE,rect=positionZoneEntiere)\n if self._surfaceZonePensee is not None:\n self._fenetre.blit(self._surfaceZonePensee, self._positionZonePensee)\n if affichageComplet is False:\n pygame.display.update(positionZoneEntiere)\n self._besoinAffichageZonePensee = False\n\n def _afficherBlocPnj(self, c, nomPnj):\n \"\"\"Affiche un PNJ sur un bloc\"\"\"\n pnj = self._pnj[c][nomPnj]\n if self._ecranVisible.contains(pnj.positionCarte) or self._ecranVisible.colliderect(pnj.positionCarte):\n positionCollage = pnj.positionCarte.move(-self._scrollingX, -self._scrollingY)\n if len(self._transformationsParties) > 0:\n surfaceCollage = self._dicoSurfaces[pnj.nomTileset][(pnj.positionSource.left, pnj.positionSource.top, pnj.positionSource.width, pnj.positionSource.height)].copy()\n self._transformerPartie(surfaceCollage, nomPnj, pnj.positionCarte)\n else:\n surfaceCollage = self._dicoSurfaces[pnj.nomTileset][(pnj.positionSource.left, pnj.positionSource.top, pnj.positionSource.width, pnj.positionSource.height)]\n self._fenetre.blit(surfaceCollage, positionCollage)\n \n def afficher(self):\n \"\"\"Cette méthode gère l'affichage de la carte\"\"\"\n self._blitFrame = False\n if self._toutAChanger is True:\n coucheActuelle = 0\n self._fenetre.fill((0,0,0))\n while coucheActuelle < self._nombreCouches: \n self._fenetre.blit(self._tilesLayers[coucheActuelle], (0,0), area=self._ecranVisible)\n nomsPnjs = sorted(self._pnj[coucheActuelle], key=lambda nomPNJ: self._pnj[coucheActuelle][nomPNJ].positionCarte.top)\n #Tri des PNJs selon leur ordonnée (de manière croissante) : on affiche ceux en haut de l'écran avant ceux en bas, pour avoir une superposition\n for nomPnj in nomsPnjs: \n self._afficherBlocPnj(coucheActuelle, nomPnj)\n coucheActuelle += 1\n self._afficherZonePensee(affichageComplet=True)\n self._transformerSurfaceGlobalement()\n self._blitFrame = True\n \n if self._blitFrame is True:\n if LIMITER_FPS:\n self._jeu.horlogeFps.tick(NOMBRE_MAX_DE_FPS)\n else:\n self._jeu.horlogeFps.tick()\n pygame.display.flip()\n\n def _getNombreCouches(self):\n \"\"\"Retourne le nombre de couches défini sur la carte\"\"\"\n return self._nombreCouches\n\n def _getHauteurTile(self):\n \"\"\"Retourne la hauteur d'un tile sur la carte\"\"\"\n return self._hauteurTile\n\n def _getNom(self):\n return self._nom\n\n def _getLongueur(self):\n return self._longueur\n\n def _getLargeur(self):\n return self._largeur\n\n def _getTransformationsGlobales(self):\n return self._transformationsGlobales\n\n def _setTransformationsGlobales(self, val):\n self._transformationsGlobales = val\n\n def _getTransformationsParties(self):\n return self._transformationsParties\n\n def _setTransformationsParties(self, val):\n self._transformationsParties = val\n\n def _getParametresTransformations(self):\n return self._parametresTransformations\n\n def _getTiles(self):\n return self._tiles\n\n nombreCouches = property(_getNombreCouches)\n hauteurTile = property(_getHauteurTile)\n nom = property(_getNom)\n longueur = property(_getLongueur)\n largeur = property(_getLargeur)\n tiles = property(_getTiles)\n transformationsGlobales = property(_getTransformationsGlobales, _setTransformationsParties)\n transformationsParties = property(_getTransformationsParties, _setTransformationsParties)\n parametresTransformations = property(_getParametresTransformations)\n","sub_path":"Releases/0.2/narro/carte.py","file_name":"carte.py","file_ext":"py","file_size_in_byte":24827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"44342577","text":"def calculate_weight(x_data, y_data):\n\tn = x_data.__len__()\n\tm = (\n\t\t\t((n * sum_all(x_data, y_data, 0, n - 1, lambda x, y: x * y)) - (\n\t\t\t\tsum_all(x_data, y_data, 0, n - 1, lambda x, y: x)) * (\n\t\t\t\t sum_all(x_data, y_data, 0, n - 1, lambda x, y: y))) /\n\t\t\t((n * sum_all(x_data, y_data, 0, n - 1, lambda x, y: x ** 2)) - (\n\t\t\t\tsum_all(x_data, y_data, 0, n - 1, lambda x, y: x)) ** 2)\n\t)\n\treturn m\n\n\ndef sum_all(x_data, y_data, a, b, func):\n\ti = a\n\tsum_res = 0\n\twhile i < b:\n\t\tsum_res += func(x_data[i], y_data[i])\n\t\ti += 1\n\treturn sum_res\n\n\ndef convert_to_float(s):\n\ttry:\n\t\treturn float(s)\n\texcept ValueError:\n\t\tassert False, \"\\\"\" + s + \"\\\" can't be converted\"\n\n\ndef read_file(file_name):\n\tls = []\n\twith open(file_name, \"r\") as f:\n\t\twhile True:\n\t\t\ts = f.readline()\n\t\t\tif s == '':\n\t\t\t\tbreak\n\t\t\tls.append(convert_to_float(s.replace('\\n', '')))\n\treturn ls\n\n\nif __name__ == '__main__':\n\tfile_prefix = input(\"File prefix : \")\n\tx_file_name = file_prefix + \"_x\"\n\ty_file_name = file_prefix + \"_y\"\n\tx_raw_data = read_file(x_file_name)\n\ty_raw_data = read_file(y_file_name)\n\tassert x_raw_data.__len__() == y_raw_data.__len__(), \"Data does not has a same length\"\n\tassert x_raw_data.__len__() is not 0, \"No data acquired\"\n\tprint(\"Appropriate description=\" + str(calculate_weight(x_raw_data, y_raw_data)))\n","sub_path":"Assignment 3/findLineOfTheBestFit.py","file_name":"findLineOfTheBestFit.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"571628772","text":"\"\"\"\nFunctional test\n\nDeletion Abuse Epic\n\nStoryboard is defined within the comments of the program itself\n\"\"\"\n\nimport unittest\nfrom flask import url_for\nfrom biblib.tests.stubdata.stub_data import UserShop, LibraryShop\nfrom biblib.tests.base import TestCaseDatabase, MockEmailService\nfrom biblib.views.http_errors import NO_PERMISSION_ERROR\n\nclass TestDeletionAbuseEpic(TestCaseDatabase):\n \"\"\"\n Base class used to test the Deletion Abuse Epic\n \"\"\"\n\n def test_deletion_abuse_epic(self):\n \"\"\"\n Carries out the epic 'Deletion Abuse', where each type of permission\n for a library: None, Read, Write, Admin, try to delete a library and\n get permission denied. The owner then deletes the library, and it is\n successful.\n\n :return: no return\n \"\"\"\n\n # Load stub data\n stub_owner = UserShop(name='owner')\n stub_none = UserShop(name='none')\n stub_reader = UserShop(name='reader')\n stub_editor = UserShop(name='editor')\n stub_admin = UserShop(name='admin')\n stub_library = LibraryShop(public=False)\n\n # Makes the library\n url = url_for('userview')\n response = self.client.post(\n url,\n data=stub_library.user_view_post_data_json,\n headers=stub_owner.headers\n )\n library_id = response.json['id']\n self.assertEqual(response.status_code, 200, response)\n self.assertTrue('name' in response.json)\n self.assertTrue(response.json['name'] == stub_library.name)\n\n # Give the correct permissions to each user\n url = url_for('permissionview', library=library_id)\n for stub_user, permission in [[stub_reader, 'read'],\n [stub_editor, 'write'],\n [stub_admin, 'admin']]:\n with MockEmailService(stub_user):\n response = self.client.post(\n url,\n data=stub_user.permission_view_post_data_json(\n {permission: True}\n ),\n headers=stub_owner.headers\n )\n self.assertEqual(response.status_code, 200)\n\n # The following users try to the delete the library, and fail:\n # reader, editor, admin\n url = url_for('documentview', library=library_id)\n for stub_user in [stub_none, stub_reader, stub_editor, stub_admin]:\n response = self.client.delete(\n url,\n headers=stub_user.headers\n )\n self.assertEqual(response.status_code,\n NO_PERMISSION_ERROR['number'],\n 'User: {0}'.format(stub_user.name))\n self.assertEqual(response.json['error'],\n NO_PERMISSION_ERROR['body'])\n\n # Owner deletes the library, success\n url = url_for('documentview', library=library_id)\n response = self.client.delete(\n url,\n headers=stub_owner.headers\n )\n self.assertEqual(response.status_code, 200)\n\n # Checks that it is deleted\n url = url_for('userview')\n response = self.client.get(\n url,\n headers=stub_owner.headers\n )\n self.assertTrue(len(response.json['libraries']) == 0)\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)","sub_path":"biblib/tests/functional_tests/test_deletion_abuse_epic.py","file_name":"test_deletion_abuse_epic.py","file_ext":"py","file_size_in_byte":3407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"455833172","text":"import pandas as pd\r\nimport numpy as np\r\nfrom sklearn import preprocessing\r\nfrom sklearn.cluster import estimate_bandwidth, MeanShift\r\nimport matplotlib.pyplot as plt\r\n\r\ndef load_csv():\r\n '''\r\n 1. load 'iris_data.csv into a dataframe'\r\n '''\r\n df = pd.read_csv('resources/iris_data.csv')\r\n return df\r\n\r\ndef unique_labels():\r\n '''\r\n 2. get unique labels(Species column)\r\n\r\n ????\r\n '''\r\n df = load_csv()\r\n arr = np.unique(df['Species'].values)\r\n return df, arr\r\n\r\ndef encode():\r\n df,_ = unique_labels()\r\n label_enc = preprocessing.LabelEncoder()\r\n df['Species'] = label_enc.fit_transform(df['Species'].astype(str))\r\n return df\r\n\r\ndef scatter01():\r\n \"\"\"\r\n 3. plot with a scatter plot each iris flower sample colored by label(3 different colors)\r\n \"\"\" \r\n df = encode()\r\n fig, axes = plt.subplots(nrows=2)\r\n\r\n df.plot.scatter(ax=axes[0], x='Petal length', y='Petal width', c='Species', colormap='viridis')\r\n df.plot.scatter(ax=axes[1],x='Sepal length', y='Sepal width', c='Species', colormap='viridis')\r\n\r\n plt.show()\r\n\r\ndef cluster():\r\n \"\"\"\r\n 4. use: MeanShift and estimate_bandwidth from sklearn.cluster to first estimate bandwidth and then get the clusters \r\n (HINT: estimate_bandwidth() takes an argument: quantile set it to 0.2 for best result\r\n \"\"\" \r\n df = encode().replace(',','.',regex=True)\r\n bandwidth = estimate_bandwidth(df,quantile=0.2)\r\n analyzer = MeanShift(bandwidth=bandwidth)\r\n analyzer.fit(df)\r\n\r\n labels = analyzer.labels_\r\n centers = analyzer.cluster_centers_\r\n\r\n return bandwidth, labels, centers, df\r\n\r\ndef cluster_print():\r\n \"\"\"\r\n 5. print labels, cluster centers and number of clusters (as returned from the MeanShift function)\r\n \"\"\" \r\n\r\n bandwidth, labels, centers, _ = cluster()\r\n unique = np.unique(labels)\r\n\r\n print('\\n\\n#########\\n')\r\n print(f\"Bandwidth: {bandwidth}\\n\")\r\n print(f'Labels:\\n {labels}\\n')\r\n print(f'Unique labels: {unique}\\n')\r\n print(f'Centers: {centers}\\n')\r\n print(f'Cluster count: {len(centers)}\\n')\r\n print('########\\n')\r\n\r\ndef scatter02():\r\n \"\"\"\r\n 6. Create a new scatter plot where each flower is colored according to cluster label\r\n \"\"\" \r\n bandwidth, labels, centers, df = cluster()\r\n unique = np.unique(labels)\r\n\r\n fig, axes = plt.subplots(nrows=2)\r\n\r\n df.plot.scatter(ax=axes[0], x='Petal length', y='Petal width', c=labels, colormap='viridis')\r\n plt.scatter(centers[:,2], centers[:,3], marker='.')\r\n\r\n plt.show()\r\n\r\n","sub_path":"week10/logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":2546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"55187802","text":"\"\"\"\r\nThis module illustrates how to compute Precision at k and Recall at k metrics.\r\n\"\"\"\r\n\r\nfrom __future__ import (absolute_import, division, print_function,\r\n unicode_literals)\r\nfrom collections import defaultdict\r\nimport time\r\nimport datetime\r\nimport random\r\n\r\nimport numpy as np\r\nimport six\r\nfrom tabulate import tabulate\r\n\r\nfrom surprise import Dataset\r\nfrom surprise.model_selection import cross_validate\r\nfrom surprise.model_selection import KFold\r\nfrom surprise import NormalPredictor\r\nfrom surprise import BaselineOnly\r\nfrom surprise import KNNBasic\r\nfrom surprise import KNNWithMeans\r\nfrom surprise import KNNBaseline\r\nfrom surprise import SVD\r\nfrom surprise import SVDpp\r\nfrom surprise import NMF\r\nfrom surprise import SlopeOne\r\nfrom surprise import CoClustering\r\nfrom surprise.model_selection import train_test_split\r\n\r\nclasses = (SVD, SVDpp, NMF, SlopeOne, KNNBasic, KNNWithMeans, KNNBaseline,\r\n CoClustering, BaselineOnly, NormalPredictor)\r\n\r\n# ugly dict to map algo names and datasets to their markdown links in the table\r\nstable = 'http://surprise.readthedocs.io/en/stable/'\r\nLINK = {'SVD': '[{}]({})'.format('SVD',\r\n stable +\r\n 'matrix_factorization.html#surprise.prediction_algorithms.matrix_factorization.SVD'),\r\n 'SVDpp': '[{}]({})'.format('SVD++',\r\n stable +\r\n 'matrix_factorization.html#surprise.prediction_algorithms.matrix_factorization.SVDpp'),\r\n 'NMF': '[{}]({})'.format('NMF',\r\n stable +\r\n 'matrix_factorization.html#surprise.prediction_algorithms.matrix_factorization.NMF'),\r\n 'SlopeOne': '[{}]({})'.format('Slope One',\r\n stable +\r\n 'slope_one.html#surprise.prediction_algorithms.slope_one.SlopeOne'),\r\n 'KNNBasic': '[{}]({})'.format('k-NN',\r\n stable +\r\n 'knn_inspired.html#surprise.prediction_algorithms.knns.KNNBasic'),\r\n 'KNNWithMeans': '[{}]({})'.format('Centered k-NN',\r\n stable +\r\n 'knn_inspired.html#surprise.prediction_algorithms.knns.KNNWithMeans'),\r\n 'KNNBaseline': '[{}]({})'.format('k-NN Baseline',\r\n stable +\r\n 'knn_inspired.html#surprise.prediction_algorithms.knns.KNNBaseline'),\r\n 'CoClustering': '[{}]({})'.format('Co-Clustering',\r\n stable +\r\n 'co_clustering.html#surprise.prediction_algorithms.co_clustering.CoClustering'),\r\n 'BaselineOnly': '[{}]({})'.format('Baseline',\r\n stable +\r\n 'basic_algorithms.html#surprise.prediction_algorithms.baseline_only.BaselineOnly'),\r\n 'NormalPredictor': '[{}]({})'.format('Random',\r\n stable +\r\n 'basic_algorithms.html#surprise.prediction_algorithms.random_pred.NormalPredictor'),\r\n 'ml-100k': '[{}]({})'.format('Movielens 100k',\r\n 'http://grouplens.org/datasets/movielens/100k'),\r\n 'ml-1m': '[{}]({})'.format('Movielens 1M',\r\n 'http://grouplens.org/datasets/movielens/1m'),\r\n }\r\n\r\n\r\ndef precision_recall_at_k(predictions, k=10, threshold=3.5):\r\n '''Return precision and recall at k metrics for each user.'''\r\n\r\n # First map the predictions to each user.\r\n user_est_true = defaultdict(list)\r\n for uid, _, true_r, est, _ in predictions:\r\n user_est_true[uid].append((est, true_r))\r\n\r\n precisions = dict()\r\n recalls = dict()\r\n for uid, user_ratings in user_est_true.items():\r\n\r\n # Sort user ratings by estimated value\r\n user_ratings.sort(key=lambda x: x[0], reverse=True)\r\n\r\n # Number of relevant items\r\n n_rel = sum((true_r >= threshold) for (_, true_r) in user_ratings)\r\n\r\n # Number of recommended items in top k\r\n n_rec_k = sum((est >= threshold) for (est, _) in user_ratings[:k])\r\n\r\n # Number of relevant and recommended items in top k\r\n n_rel_and_rec_k = sum(((true_r >= threshold) and (est >= threshold))\r\n for (est, true_r) in user_ratings[:k])\r\n\r\n # Precision@K: Proportion of recommended items that are relevant\r\n precisions[uid] = n_rel_and_rec_k / n_rec_k if n_rec_k != 0 else 1\r\n\r\n # Recall@K: Proportion of relevant items that are recommended\r\n recalls[uid] = n_rel_and_rec_k / n_rel if n_rel != 0 else 1\r\n\r\n return precisions, recalls\r\n\r\ndataset = 'ml-100k'\r\ndata = Dataset.load_builtin('ml-100k')\r\nkf = KFold(n_splits=5)\r\ntrainset,testset = train_test_split(data,test_size=.75)\r\n'''\r\nfor trainset, testset in kf.split(data):\r\n algo.fit(trainset)\r\n predictions = algo.test(testset)\r\n precisions, recalls = precision_recall_at_k(predictions, k=5, threshold=4)\r\n\r\n # Precision and recall can then be averaged over all users\r\n prec = sum(p for p in precisions.values()) / len(precisions)\r\n recall = sum(rec for rec in recalls.values()) / len(recalls)\r\n f1 = 2 * prec * recall / (prec + recall)\r\n print(prec)\r\n print(recall)\r\n print(f1)\r\n'''\r\ntable = []\r\nfor klass in classes:\r\n start = time.time()\r\n if klass == 'SVD':\r\n algo = SVD()\r\n elif klass == 'SVDpp':\r\n algo = SVDpp()\r\n elif klass == 'NMF':\r\n algo = NMF()\r\n elif klass == 'SlopeOne':\r\n algo = SlopeOne()\r\n elif klass == 'KNNBasic':\r\n algo = KNNBasic()\r\n elif klass == 'KNNWithMeans':\r\n algo = KNNWithMeans()\r\n elif klass == 'KNNBaseline':\r\n algo = KNNBaseline()\r\n elif klass == 'CoClustering':\r\n algo = CoClustering()\r\n elif klass == 'BaselineOnly':\r\n algo = BaselineOnly()\r\n else :\r\n algo = NormalPredictor()\r\n #cv_time = str(datetime.timedelta(seconds=int(time.time() - start)))\r\n algo.fit(trainset)\r\n predictions = algo.test(testset)\r\n precisions, recalls = precision_recall_at_k(predictions, k=5, threshold=4)\r\n\r\n # Precision and recall can then be averaged over all users\r\n prec = sum(p for p in precisions.values()) / len(precisions)\r\n recall = sum(rec for rec in recalls.values()) / len(recalls)\r\n f1 = 2 * prec * recall / (prec + recall)\r\n link = LINK[klass.__name__]\r\n\r\n new_line = [link, prec, recall, f1]\r\n print(tabulate([new_line], tablefmt=\"pipe\")) # print current algo perf\r\n table.append(new_line)\r\n\r\nheader = [LINK[dataset],\r\n 'Precision',\r\n 'Recall',\r\n 'F1',\r\n 'Time'\r\n ]\r\nprint(tabulate(table, header, tablefmt=\"pipe\"))","sub_path":"examples/precision_recall_at_k1.py","file_name":"precision_recall_at_k1.py","file_ext":"py","file_size_in_byte":6988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"338605637","text":"import json\nimport pytest\nimport os\nimport itertools\nimport requests\nfrom pprint import pprint\nfrom deepdiff import DeepDiff\nfrom tranql.main import TranQL\nfrom tranql.main import TranQLParser, set_verbose\nfrom tranql.tranql_ast import SetStatement, SelectStatement\nfrom tranql.tests.util import assert_lists_equal, set_mock, ordered\nfrom tranql.tests.mocks import MockHelper\nfrom tranql.tests.mocks import MockMap\n#set_verbose ()\n\ndef assert_parse_tree (code, expected):\n \"\"\" Parse a block of code into a parse tree. Then assert the equality\n of that parse tree to a list of expected tokens. \"\"\"\n tranql = TranQL ()\n tranql.resolve_names = False\n actual = tranql.parser.parse (code).parse_tree\n #print (f\"{actual}\")\n assert_lists_equal (\n actual,\n expected)\n\n#####################################################\n#\n# Parser tests. Verify we produce the AST for the\n# expected grammar correctly.\n#\n#####################################################\n\ndef test_parse_predicate (requests_mock):\n set_mock(requests_mock, \"predicates\")\n\n \"\"\" Test parsing a predicate. \"\"\"\n print (f\"test_parse_predicate()\")\n assert_parse_tree (\n code = \"\"\"\n SELECT chemical_substance-[treats]->disease\n FROM \"/graph/gamma/quick\"\n WHERE chemical_substance='PUBCHEM:2083'\n SET \"$.knowledge_graph.nodes.[*].id as indications\n \"\"\",\n expected = [\n [ [ \"select\",\n \"chemical_substance\",\n [ \"-[\",\n \"treats\",\n \"]->\"\n ], \"disease\", \"\\n\"\n ],\n \" \",\n [ \"from\", [ \"/graph/gamma/quick\"] ],\n [\"where\",\n [\n \"chemical_substance\",\n \"=\",\n \"PUBCHEM:2083\"\n ]\n ], [ \"\" ]\n ]])\n\ndef test_parse_set (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n\n \"\"\" Test parsing set statements. \"\"\"\n print (f\"test_parse_set()\")\n assert_parse_tree (\n code = \"\"\"\n SET disease = 'asthma'\n SET max_p_value = '0.5'\n SET cohort = 'COHORT:22'\n SET population_density = 2\n SET icees.population_density_cluster = 'http://localhost/ICEESQuery'\n SET gamma.quick = 'http://robokop.renci.org:80/api/simple/quick/' \"\"\",\n expected = [\n [\"set\", \"disease\", \"=\", \"asthma\"],\n [\"set\", \"max_p_value\", \"=\", \"0.5\"],\n [\"set\", \"cohort\", \"=\", \"COHORT:22\"],\n [\"set\", \"population_density\", \"=\", 2],\n [\"set\", \"icees.population_density_cluster\", \"=\", \"http://localhost/ICEESQuery\"],\n [\"set\", \"gamma.quick\", \"=\", \"http://robokop.renci.org:80/api/simple/quick/\"]\n ])\n\ndef test_parse_set_with_comment (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n \"\"\" Test parsing set statements with comments. \"\"\"\n print (f\"test_parse_set_with_comment()\")\n assert_parse_tree (\n code = \"\"\"\n -- This is a comment\n SET disease = 'asthma' \"\"\",\n expected = [\n [\"set\", \"disease\", \"=\", \"asthma\"]\n ])\n\ndef test_parse_select_simple (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n \"\"\" Verify the token stream of a simple select statement. \"\"\"\n print (f\"test_parse_select_simple()\")\n assert_parse_tree (\n code = \"\"\"\n SELECT chemical_substance->gene->biological_process->phenotypic_feature\n FROM \"/graph/gamma/quick\"\n WHERE chemical_substance = $chemical_exposures\n SET knowledge_graph \"\"\",\n expected = [\n [[\"select\", \"chemical_substance\", \"->\", \"gene\", \"->\", \"biological_process\", \"->\", \"phenotypic_feature\", \"\\n\"],\n \" \",\n [\"from\", [\"/graph/gamma/quick\"]],\n [\"where\", [\"chemical_substance\", \"=\", \"$chemical_exposures\"]],\n [\"set\", [\"knowledge_graph\"]]]\n ])\n\ndef test_parse_select_complex (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n \"\"\" Verify the token stream of a more complex select statement. \"\"\"\n print (f\"test_parse_select_complex()\")\n assert_parse_tree (\n code = \"\"\"\n SELECT disease->chemical_substance\n FROM \"/flow/5/mod_1_4/icees/by_residential_density\"\n WHERE disease = \"asthma\"\n AND EstResidentialDensity < \"2\"\n AND cohort = \"COHORT:22\"\n AND max_p_value = \"0.5\"\n SET '$.nodes.[*].id' AS chemical_exposures \"\"\",\n expected = [\n [[\"select\", \"disease\", \"->\", \"chemical_substance\", \"\\n\"],\n \" \",\n [\"from\", [\"/flow/5/mod_1_4/icees/by_residential_density\"]],\n [\"where\",\n [\"disease\", \"=\", \"asthma\"], \"and\",\n [\"EstResidentialDensity\", \"<\", \"2\"], \"and\",\n [\"cohort\", \"=\", \"COHORT:22\"], \"and\",\n [\"max_p_value\", \"=\", \"0.5\"]\n ],\n [\"set\", [\"$.nodes.[*].id\", \"as\", \"chemical_exposures\"]]]\n ])\n\ndef test_parse_query_with_repeated_concept (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n \"\"\" Verify the parser accepts a grammar allowing concept names to be prefixed by a name\n and a colon. \"\"\"\n print (f\"test_parse_query_with_repeated_concept\")\n assert_parse_tree (\n code=\"\"\"\n SELECT cohort_diagnosis:disease->diagnoses:disease\n FROM '/clinical/cohort/disease_to_chemical_exposure'\n WHERE cohort_diagnosis = 'asthma'\n AND Sex = '0'\n AND cohort = 'all_patients'\n AND max_p_value = '0.5'\n SET '$.knowledge_graph.nodes.[*].id' AS diagnoses\n \"\"\",\n expected = [\n [[\"select\", \"cohort_diagnosis:disease\",\"->\",\"diagnoses:disease\",\"\\n\"],\n \" \",\n [\"from\",\n [\"/clinical/cohort/disease_to_chemical_exposure\"]\n ],\n [\"where\",\n [\"cohort_diagnosis\",\"=\",\"asthma\"],\n \"and\",\n [\"Sex\",\"=\",\"0\"],\n \"and\",\n [\"cohort\",\"=\",\"all_patients\"],\n \"and\",\n [\"max_p_value\",\"=\",\"0.5\"]\n ],\n [\"set\",\n [\"$.knowledge_graph.nodes.[*].id\",\"as\",\"diagnoses\"]\n ]\n ]])\n\n#####################################################\n#\n# AST tests. Test abstract syntax tree components.\n#\n#####################################################\ndef test_ast_set_variable (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n \"\"\" Test setting a varaible to an explicit value. \"\"\"\n print (\"test_ast_set_variable ()\")\n tranql = TranQL ()\n tranql.resolve_names = False\n statement = SetStatement (variable=\"variable\", value=\"x\")\n statement.execute (tranql)\n assert tranql.context.resolve_arg (\"$variable\") == 'x'\ndef test_ast_set_graph (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n \"\"\" Set a variable to a graph passed as a result. \"\"\"\n print (\"test_ast_set_graph ()\")\n tranql = TranQL ()\n tranql.resolve_names = False\n statement = SetStatement (variable=\"variable\", value=None, jsonpath_query=None)\n statement.execute (tranql, context={ 'result' : { \"a\" : 1 } })\n assert tranql.context.resolve_arg (\"$variable\")['a'] == 1\ndef test_ast_set_graph (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n \"\"\" Set a variable to the value returned by executing a JSONPath query. \"\"\"\n print (\"test_ast_set_graph ()\")\n tranql = TranQL ()\n tranql.resolve_names = False\n statement = SetStatement (variable=\"variable\", value=None, jsonpath_query=\"$.nodes.[*]\")\n statement.execute (tranql, context={\n 'result' : {\n \"nodes\" : [ {\n \"id\" : \"x:y\"\n } ]\n }\n })\n assert tranql.context.resolve_arg (\"$variable\")[0]['id'] == \"x:y\"\ndef test_ast_generate_questions (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n \"\"\" Validate that\n -- named query concepts work.\n -- the question graph is build incorporating where clause constraints.\n \"\"\"\n print (\"test_ast_set_generate_questions ()\")\n app = TranQL ()\n app.resolve_names = False\n ast = app.parse (\"\"\"\n SELECT cohort_diagnosis:disease->diagnoses:disease\n FROM '/clinical/cohort/disease_to_chemical_exposure'\n WHERE cohort_diagnosis = 'MONDO:0004979' --asthma\n AND Sex = '0'\n AND cohort = 'all_patients'\n AND max_p_value = '0.5'\n SET '$.knowledge_graph.nodes.[*].id' AS diagnoses\n \"\"\")\n questions = ast.statements[0].generate_questions (app)\n assert questions[0]['question_graph']['nodes'][0]['curie'] == 'MONDO:0004979'\n assert questions[0]['question_graph']['nodes'][0]['type'] == 'disease'\ndef test_ast_format_constraints (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n \"\"\" Validate that\n -- The syntax to pass values to reasoners in the where clause (e.g. \"icees.foo = bar\") functions properly\n \"\"\"\n print(\"test_ast_format_constraints ()\")\n tranql = TranQL ()\n ast = tranql.parse (\"\"\"\n SELECT population_of_individual_organisms->chemical_substance\n FROM \"/clinical/cohort/disease_to_chemical_exposure\"\n WHERE icees.should_format = 1\n AND robokop.should_not_format = 0\n \"\"\")\n select = ast.statements[0]\n select.format_constraints(tranql)\n print(select.where)\n assert_lists_equal(select.where, [\n ['should_format', '=', 1],\n ['should_format', '=', 1],\n ['robokop.should_not_format', '=', 0],\n ['robokop.should_not_format', '=', 0]\n ])\ndef test_ast_backwards_arrow (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n print(\"test_ast_backwards_arrow ()\")\n tranql = TranQL ()\n ast = tranql.parse (\"\"\"\n SELECT gene->biological_process<-microRNA\n FROM \"/schema\"\n \"\"\")\n select = ast.statements[0]\n statements = select.plan (select.planner.plan (select.query))\n backwards_questions = statements[1].generate_questions(tranql)\n\n assert len(backwards_questions) == 1\n assert len(backwards_questions[0][\"question_graph\"][\"edges\"]) == 1\n assert backwards_questions[0][\"question_graph\"][\"edges\"][0][\"source_id\"] == \"microRNA\"\n assert backwards_questions[0][\"question_graph\"][\"edges\"][0][\"target_id\"] == \"biological_process\"\ndef test_ast_decorate_element (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n \"\"\" Validate that\n -- The SelectStatement::decorate method properly decorates both nodes and edges\n \"\"\"\n print(\"test_ast_decorate_element ()\")\n tranql = TranQL ()\n ast = tranql.parse (\"\"\"\n SELECT chemical_substance->disease\n FROM \"/graph/gamma/quick\"\n \"\"\")\n select = ast.statements[0]\n node = {\n \"id\": \"CHEBI:36314\",\n \"name\": \"glycerophosphoethanolamine\",\n \"omnicorp_article_count\": 288,\n \"type\": \"chemical_substance\"\n }\n edge = {\n \"ctime\": [\n 1544077522.7678425\n ],\n \"edge_source\": [\n \"chembio.graph_pubchem_to_ncbigene\"\n ],\n \"id\": \"df662e2842d44fa2c0b5d945044317e3\",\n \"predicate_id\": \"SIO:000203\",\n \"publications\": [\n \"PMID:16217747\"\n ],\n \"relation\": [\n \"CTD:interacts_with\"\n ],\n \"relation_label\": [\n \"interacts\"\n ],\n \"source_id\": \"CHEBI:36314\",\n \"target_id\": \"HGNC:8971\",\n \"type\": \"directly_interacts_with\",\n \"weight\": 0.4071474314830641\n }\n select.decorate(node,True,{\n \"schema\" : select.get_schema_name(tranql)\n })\n select.decorate(edge,False,{\n \"schema\" : select.get_schema_name(tranql)\n })\n\n assert_lists_equal(node[\"reasoner\"],[\"robokop\"])\n\n assert_lists_equal(edge[\"reasoner\"],[\"robokop\"])\n assert_lists_equal(edge[\"source_database\"],[\"unknown\"])\ndef test_ast_resolve_name (requests_mock):\n set_mock(requests_mock, \"resolve_name\")\n \"\"\" Validate that\n -- The SelectStatement::resolve_name method will correctly retrieve equivalent identifiers from a given name\n \"\"\"\n print(\"test_ast_resolve_name ()\")\n assert_lists_equal(SelectStatement.resolve_name(\"ibuprofen\",\"chemical_substance\"),[\n 'CHEBI:132922',\n 'CHEBI:5855',\n 'CHEBI:43415',\n 'PUBCHEM:3672',\n 'MESH:D007052',\n 'CHEBI:5855',\n 'CHEMBL:CHEMBL521']\n )\ndef test_ast_predicate_question (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n \"\"\" Validate that\n -- A query with a predicate will be properly formatted into a question graph\n \"\"\"\n print(\"test_ast_predicates ()\")\n tranql = TranQL ()\n ast = tranql.parse (\"\"\"\n SELECT chemical_substance-[treats]->disease\n FROM \"/graph/gamma/quick\"\n WHERE chemical_substance='CHEMBL:CHEMBL521'\n \"\"\")\n select = ast.statements[0]\n question = select.generate_questions(tranql)[0][\"question_graph\"]\n\n assert len(question[\"edges\"]) == 1\n\n assert \"type\" in question[\"edges\"][0]\n assert question[\"edges\"][0][\"type\"] == \"treats\"\ndef test_ast_multiple_reasoners (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n \"\"\" Validate that\n -- A query spanning multiple reasoners will query multiple reasoners.\n -- A transitions that multiple reasoners support will query each reasoner that supports it.\n \"\"\"\n print(\"test_ast_multiple_reasoners ()\")\n tranql = TranQL ()\n ast = tranql.parse (\"\"\"\n SELECT chemical_substance->disease->gene\n FROM \"/schema\"\n \"\"\")\n # RTX and Robokop both support transitions between chemical_substance->disease and only Robokop supports transitions between disease->gene\n select = ast.statements[0]\n statements = select.plan (select.planner.plan (select.query))\n assert_lists_equal(statements[0].query.order,['chemical_substance','disease'])\n assert statements[0].get_schema_name(tranql) == \"robokop\"\n\n assert_lists_equal(statements[1].query.order,['chemical_substance','disease'])\n assert statements[1].get_schema_name(tranql) == \"rtx\"\n\n assert_lists_equal(statements[2].query.order,['disease','gene'])\n assert statements[2].get_schema_name(tranql) == \"robokop\"\ndef test_ast_merge_knowledge_maps (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n tranql = TranQL ()\n tranql.asynchronous = False\n tranql.resolve_names = False\n ast = tranql.parse (\"\"\"\n select chemical_substance->disease->gene\n from \"/schema\"\n where chemical_substance=\"CHEMBL:CHEMBL3\"\n \"\"\")\n\n # select = ast.statements[0]\n # statements = select.plan (select.planner.plan (select.query))\n # print(statements[0].query.order)\n\n # (select.execute_plan(tranql))\n\n responses = [\n {\n 'knowledge_map' : [\n {\n 'node_bindings' : {\n 'chemical_substance' : 'CHEBI:100',\n 'disease' : 'MONDO:50'\n },\n 'edge_bindings' : {\n 'e0' : 'ROOT_EDGE'\n }\n }\n ],\n 'question_order' : ['chemical_substance','disease']\n },\n {\n 'knowledge_map' : [\n {\n 'node_bindings' : {\n 'disease' : 'MONDO:50',\n 'gene' : 'HGNC:1',\n 'metabolite' : 'KEGG:C00017'\n },\n 'edge_bindings' : {\n 'e1' : 'TEST_EDGE'\n }\n }\n ],\n 'question_order' : ['disease','gene','metabolite']\n },\n {\n 'knowledge_map' : [\n {\n 'node_bindings' : {\n 'disease' : 'MONDO:50',\n 'gene' : 'HGNC:1',\n 'metabolite' : 'KEGG:FOOBAR'\n },\n 'edge_bindings' : {\n\n }\n }\n ],\n 'question_order' : ['disease','gene','metabolite']\n },\n {\n 'knowledge_map' : [\n {\n 'node_bindings' : {\n 'metabolite' : 'KEGG:FOOBAR',\n 'protein' : 'UniProtKB:TESTING'\n },\n 'edge_bindings' : {\n\n }\n }\n ],\n 'question_order' : ['metabolite','protein']\n },\n {\n 'knowledge_map' : [\n {\n 'node_bindings' : {\n 'metabolite' : 'KEGG:C00017',\n 'protein' : 'UniProtKB:Q9NZJ5'\n },\n 'edge_bindings' : {\n\n }\n }\n ],\n 'question_order' : ['metabolite','protein']\n }\n ]\n\n merged = SelectStatement.connect_knowledge_maps(responses,[\n 'chemical_substance',\n 'disease',\n 'gene',\n 'metabolite',\n 'protein'\n ])\n\n assert_lists_equal(ordered(merged), ordered([\n {\n \"node_bindings\" : {\n \"chemical_substance\" : \"CHEBI:100\",\n \"disease\" : \"MONDO:50\",\n \"gene\" : \"HGNC:1\",\n \"metabolite\" : \"KEGG:FOOBAR\",\n \"protein\" : \"UniProtKB:TESTING\"\n },\n \"edge_bindings\" : {\n \"e0\" : \"ROOT_EDGE\"\n }\n },\n {\n \"node_bindings\" : {\n \"chemical_substance\" : \"CHEBI:100\",\n \"disease\" : \"MONDO:50\",\n \"gene\" : \"HGNC:1\",\n \"metabolite\" : \"KEGG:C00017\",\n \"protein\" : \"UniProtKB:Q9NZJ5\"\n },\n \"edge_bindings\" : {\n \"e0\" : \"ROOT_EDGE\",\n \"e1\" : \"TEST_EDGE\",\n }\n }\n ]))\n\n # print(json.dumps(merged,indent=2))\n\ndef test_ast_merge_results (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n \"\"\" Validate that\n -- Results from the query plan are being merged together correctly\n \"\"\"\n print(\"test_ast_merge_answers ()\")\n tranql = TranQL ()\n tranql.resolve_names = False\n ast = tranql.parse (\"\"\"\n SELECT cohort_diagnosis:disease->diagnoses:disease\n FROM '/clinical/cohort/disease_to_chemical_exposure'\n WHERE cohort_diagnosis = 'MONDO:0004979' --asthma\n AND Sex = '0'\n AND cohort = 'all_patients'\n AND max_p_value = '0.5'\n SET '$.knowledge_graph.nodes.[*].id' AS diagnoses\n \"\"\")\n\n select = ast.statements[0]\n\n # What is the proper format for the name of a mock file? This should be made into one\n mock_responses = [\n {\n 'knowledge_graph': {\n 'nodes': [\n {'id': 'CHEBI:28177', 'type': 'chemical_substance'},\n {'id': 'HGNC:2597', 'type': 'gene'},\n {\n 'id': 'egg',\n 'name':'test_name_merge',\n 'type': 'foo_type',\n 'test_attr': ['a','b']\n },\n {\n 'id': 'equivalent_identifier_merge',\n 'equivalent_identifiers': ['TEST:00000'],\n 'merged_property': [\n 'a',\n 'b'\n ]\n }\n ],\n 'edges': [\n {'id': 'e0', 'source_id': 'CHEBI:28177', 'target_id': 'HGNC:2597'},\n {\n # Test if edges that are connected to merged nodes will be successfully merged with other duplicate edges\n 'source_id' : 'CHEBI:28177',\n 'target_id' : 'egg',\n 'type': ['merge_this'],\n 'merge_this_list' : ['edge_1'],\n 'unique_attr_e_1' : 'e_1',\n 'id' : 'winning_edge_id'\n },\n ]\n },\n 'knowledge_map': [\n {\n 'node_bindings': {\n 'chemical_substance': 'CHEBI:28177',\n 'gene': 'HGNC:2597'\n },\n 'edge_bindings': {}\n }\n ]\n },\n {\n 'knowledge_graph': {\n 'nodes': [\n {'id': 'CHEBI:28177', 'type': 'chemical_substance'},\n {\n 'id': 'also_test_array_type_and_string_type_merge',\n 'name':'test_name_merge',\n 'type': ['foo_type','bar_type'],\n 'test_attr': ['a','c']\n },\n {'id': 'TEST:00000', 'type': 'test', 'merged_property': ['a','c']},\n ],\n 'edges': [\n {'id': 'e0', 'source_id': 'CHEBI:28177', 'target_id': 'TEST:00000'},\n {\n 'source_id' : 'CHEBI:28177',\n 'target_id' : 'also_test_array_type_and_string_type_merge',\n 'type': ['merge_this'],\n 'merge_this_list' : ['edge_2'],\n 'unique_attr_e_2' : 'e_2'\n }\n ]\n },\n 'knowledge_map': [\n {\n 'node_bindings': {\n 'chemical_substance': 'CHEBI:28177',\n 'test': 'TEST:00000'\n },\n 'edge_bindings': {}\n }\n ]\n }\n ]\n\n expected_result = {\n \"knowledge_graph\": {\n \"edges\": [\n {\n \"id\": \"e0\",\n \"source_id\": \"CHEBI:28177\",\n \"target_id\": \"HGNC:2597\",\n \"type\": []\n },\n {\n \"id\": \"e0\",\n \"source_id\": \"CHEBI:28177\",\n \"target_id\": \"equivalent_identifier_merge\",\n \"type\": []\n },\n {\n \"id\" : \"winning_edge_id\",\n \"source_id\" : \"CHEBI:28177\",\n \"target_id\" : \"egg\",\n \"type\" : [\"merge_this\"],\n \"merge_this_list\" : [\"edge_1\", \"edge_2\"],\n \"unique_attr_e_1\" : \"e_1\",\n \"unique_attr_e_2\" : \"e_2\"\n }\n ],\n \"nodes\": [\n {\n \"equivalent_identifiers\": [\n \"CHEBI:28177\"\n ],\n \"id\": \"CHEBI:28177\",\n \"type\": [\"chemical_substance\"]\n },\n {\n \"equivalent_identifiers\": [\n \"HGNC:2597\"\n ],\n \"id\": \"HGNC:2597\",\n \"type\": [\"gene\"]\n },\n {\n \"equivalent_identifiers\": [\n \"also_test_array_type_and_string_type_merge\",\n \"egg\"\n ],\n \"type\": [\n \"foo_type\",\n \"bar_type\"\n ],\n \"id\": \"egg\",\n \"name\": \"test_name_merge\",\n \"test_attr\": [\n \"a\",\n \"b\",\n \"c\"\n ]\n },\n {\n \"equivalent_identifiers\": [\n \"TEST:00000\",\n \"equivalent_identifier_merge\"\n ],\n \"merged_property\": [\"a\", \"b\", \"c\"],\n \"id\": \"equivalent_identifier_merge\",\n \"type\": [\"test\"]\n }\n ]\n },\n \"knowledge_map\": [\n {\n \"edge_bindings\": {},\n \"node_bindings\": {\n \"chemical_substance\": \"CHEBI:28177\",\n \"gene\": \"HGNC:2597\"\n }\n },\n {\n \"edge_bindings\": {},\n \"node_bindings\": {\n \"chemical_substance\": \"CHEBI:28177\",\n \"test\": \"equivalent_identifier_merge\"\n }\n }\n ],\n 'question_graph': {\n 'edges': [\n {\n 'id': 'foo',\n 'type': 'test'\n }\n ],\n 'nodes': [\n {\n 'id': 'bar',\n 'type': 'bartest'\n }\n ]\n }\n }\n merged_results = select.merge_results (\n mock_responses,\n tranql,\n {\n 'edges': [\n {\n 'id': 'foo',\n 'type': 'test'\n }\n ],\n 'nodes': [\n {\n 'id': 'bar',\n 'type': 'bartest'\n }\n ]\n },\n root_order=None\n )\n assert ordered(merged_results) == ordered(expected_result)\ndef test_ast_plan_strategy (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n print (\"test_ast_plan_strategy ()\")\n tranql = TranQL ()\n tranql.resolve_names = False\n # QueryPlanStrategy always uses /schema regardless of the `FROM` clause.\n ast = tranql.parse (\"\"\"\n SELECT cohort_diagnosis:disease->diagnoses:disease\n FROM '/schema'\n WHERE cohort_diagnosis = 'MONDO:0004979' --asthma\n AND Sex = '0'\n AND cohort = 'all_patients'\n AND max_p_value = '0.5'\n SET '$.knowledge_graph.nodes.[*].id' AS diagnoses\n \"\"\")\n\n select = ast.statements[0]\n plan = select.planner.plan (select.query)\n\n # Assert that it has planned to query both gamma and rtx\n assert (\n (plan[0][1] == \"/graph/gamma/quick\" and plan[1][1] == \"/graph/rtx\") or\n (plan[1][1] == \"/graph/rtx\" and plan[1][1] == \"/graph/gamma/quick\")\n )\n # Both should be querying the same thing (disease->diseasee), differing only in the sub_schema that they are querying\n for sub_schema_plan in plan:\n assert sub_schema_plan[2][0][0].type_name == \"disease\"\n assert sub_schema_plan[2][0][0].name == \"cohort_diagnosis\"\n assert sub_schema_plan[2][0][0].nodes == [\"MONDO:0004979\"]\n\n assert sub_schema_plan[2][0][1].direction == \"->\"\n assert sub_schema_plan[2][0][1].predicate == None\n\n assert sub_schema_plan[2][0][2].type_name == \"disease\"\n assert sub_schema_plan[2][0][2].name == \"diagnoses\"\n assert sub_schema_plan[2][0][2].nodes == []\ndef test_ast_implicit_conversion (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n tranql = TranQL ()\n ast = tranql.parse (\"\"\"\n SELECT drug_exposure->chemical_substance\n FROM '/schema'\n \"\"\")\n select = ast.statements[0]\n statements = select.plan (select.planner.plan (select.query))\n\n assert_lists_equal(statements[0].query.order,[\"drug_exposure\",\"chemical_substance\"])\n assert statements[0].get_schema_name(tranql) == \"implicit_conversion\"\n\ndef test_ast_plan_statements (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n print(\"test_ast_plan_statements ()\")\n tranql = TranQL ()\n tranql.resolve_names = False\n # QueryPlanStrategy always uses /schema regardless of the `FROM` clause.\n ast = tranql.parse (\"\"\"\n SELECT cohort_diagnosis:disease->diagnoses:disease\n FROM '/schema'\n WHERE cohort_diagnosis = 'MONDO:0004979' --asthma\n AND Sex = '0'\n AND cohort = 'all_patients'\n AND max_p_value = '0.5'\n SET '$.knowledge_graph.nodes.[*].id' AS diagnoses\n \"\"\")\n\n\n select = ast.statements[0]\n statements = select.plan (select.planner.plan (select.query))\n\n assert len(statements) == 2\n\n for statement in statements:\n assert_lists_equal(\n list(statement.query.concepts.keys()),\n [\n \"cohort_diagnosis\",\n \"diagnoses\"\n ]\n )\n\n assert statement.query.concepts['cohort_diagnosis'].nodes == [\"MONDO:0004979\"]\n assert statement.query.concepts['diagnoses'].nodes == []\n # TODO: figure out why there are duplicates generated??\n assert_lists_equal(statement.where, [\n ['cohort_diagnosis', '=', 'MONDO:0004979'],\n ['Sex', '=', '0'], ['Sex', '=', '0'],\n ['cohort', '=', 'all_patients'],\n ['cohort', '=', 'all_patients'],\n ['max_p_value', '=', '0.5'],\n ['max_p_value', '=', '0.5']\n ])\n assert statement.set_statements == []\n\n assert (\n (statements[0].service == \"/graph/gamma/quick\" and statements[1].service == \"/graph/rtx\") or\n (statements[0].service == \"/graph/rtx\" and statements[1].service == \"/graph/gamma/quick\")\n )\n\ndef test_ast_bidirectional_query (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n \"\"\" Validate that we parse and generate queries correctly for bidirectional queries. \"\"\"\n print (\"test_ast_bidirectional_query ()\")\n app = TranQL ()\n app.resolve_names = False\n disease_id = \"MONDO:0004979\"\n chemical = \"PUBCHEM:2083\"\n app.context.set (\"drug\", chemical)\n app.context.set (\"disease\", disease_id)\n mocker = MockHelper ()\n expectations = {\n \"cop.tranql\" : mocker.get_obj (\"bidirectional_question.json\")\n }\n queries = { os.path.join (os.path.dirname (__file__), \"..\", \"queries\", k) : v\n for k, v in expectations.items () }\n for program, expected_output in queries.items ():\n ast = app.parse_file (program)\n statement = ast.statements\n \"\"\" This uses an unfortunate degree of knowledge about the implementation,\n both of the AST, and of theq query. Consider alternatives. \"\"\"\n questions = ast.statements[2].generate_questions (app)\n nodes = questions[0]['question_graph']['nodes']\n edges = questions[0]['question_graph']['edges']\n node_index = { n['id'] : i for i, n in enumerate (nodes) }\n assert nodes[-1]['curie'] == disease_id\n assert nodes[0]['curie'] == chemical\n assert node_index[edges[-1]['target_id']] == node_index[edges[-1]['source_id']] - 1\n\n#####################################################\n#\n# Interpreter tests. Test the interpreter interface.\n#\n#####################################################\ndef test_interpreter_set (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n \"\"\" Test set statements by executing a few and checking values after. \"\"\"\n print (\"test_interpreter_set ()\")\n tranql = TranQL ()\n tranql.resolve_names = False\n tranql.execute (\"\"\"\n -- Test set statements.\n SET disease = 'asthma'\n SET max_p_value = '0.5'\n SET cohort = 'COHORT:22'\n SET population_density = 2\n SET icees.population_density_cluster = 'http://localhost/ICEESQuery'\n SET gamma.quick = 'http://robokop.renci.org:80/api/simple/quick/' \"\"\")\n\n variables = [ \"disease\", \"max_p_value\", \"cohort\", \"icees.population_density_cluster\", \"gamma.quick\" ]\n output = { k : tranql.context.resolve_arg (f\"${k}\") for k in variables }\n #print (f\"resolved variables --> {json.dumps(output, indent=2)}\")\n assert output['disease'] == \"asthma\"\n assert output['cohort'] == \"COHORT:22\"\n\ndef test_program (requests_mock):\n print (\"test_program ()\")\n mock_map = MockMap (requests_mock, \"workflow-5\")\n tranql = TranQL (options = {\n \"asynchronous\" : False,\n \"resolve_names\" : False\n })\n ast = tranql.execute (\"\"\"\n --\n -- Workflow 5\n --\n -- Modules 1-4: Chemical Exposures by Clinical Clusters\n -- For sub-clusters within the overall ICEES asthma cohort defined by\n -- differential population density, which chemicals are related to these\n -- clusters with a p_value less than some threshold?\n --\n -- Modules 5-*: Knowledge Graph Phenotypic Associations\n -- For chemicals produced by the first steps, what phenotypes are\n -- associated with exposure to these chemicals?\n --\n SET id_filters = \"SCTID,rxcui,CAS,SMILES,umlscui\"\n\n SELECT population_of_individual_organisms->drug_exposure\n FROM \"/clinical/cohort/disease_to_chemical_exposure\"\n WHERE EstResidentialDensity < '2'\n AND population_of_individual_organizms = 'x'\n AND cohort = 'all_patients'\n AND max_p_value = '0.1'\n SET '$.knowledge_graph.nodes.[*].id' AS chemical_exposures\n\n SELECT chemical_substance->gene->biological_process->phenotypic_feature\n FROM \"/graph/gamma/quick\"\n WHERE chemical_substance = $chemical_exposures\n SET knowledge_graph\n \"\"\")\n\n #print (f\"{ast}\")\n expos = tranql.context.resolve_arg(\"$chemical_exposures\")\n #print (f\" expos =======> {json.dumps(expos)}\")\n\n kg = tranql.context.resolve_arg(\"$knowledge_graph\")\n assert kg['knowledge_graph']['nodes'][0]['id'] == \"CHEBI:28177\"\n assert kg['knowledge_map'][0]['node_bindings']['chemical_substance'] == \"CHEBI:28177\"\n","sub_path":"tranql/tests/test_tranql.py","file_name":"test_tranql.py","file_ext":"py","file_size_in_byte":33425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"613254284","text":"import logging\nfrom random import choice\n\nfrom classes.fighter import Fighter\nfrom common import global_vars as gv\nfrom common.helpers import tile_blocked_by\nfrom data.actor_data.barks_data import barks_data\nfrom gui.messages import Message, MessageType, LogLevel\n\n\nclass Monster(Fighter):\n \"\"\" base-class for all hostile mobs \"\"\"\n\n def __init__(self, x, y, name, char, color, descr, hp, nat_armor, vision, endurance, agility, unarmed_dmg, bark_types=None, loadouts=None, faction=None, ai_comp=None):\n super().__init__(x, y, name, char, color, descr, hp, nat_armor, vision, endurance, agility, unarmed_dmg, loadouts=loadouts, faction=faction, ai_comp=ai_comp)\n\n # create the bark.dictionary for the specific monster\n self.barks = None\n if bark_types is not None:\n\n barks = {}\n\n # merge the dictionaries of all types into a single dictionary\n for type in bark_types:\n d = barks_data[type]\n for key, value in d.items():\n for i in value:\n try:\n barks[key].append(i)\n except:\n barks[key] = [i]\n\n self.barks = barks\n\n def attack(self, target):\n \"\"\" basic attack function for monsters \"\"\"\n\n weapon = self.get_weapon()\n\n # if no weapon exists, use unarmed damage\n dmg_done = weapon.hit(target) if weapon else self.hit(target)\n\n if dmg_done > 0 and weapon:\n # make the target take some damage\n Message('The {0} hits you with a {1}'.format(self.name, weapon.name),\n msg_type=MessageType.INFO_BAD, log_level=LogLevel.COMBAT)\n target.take_damage(dmg_done)\n elif dmg_done > 0 and not weapon:\n Message('The {0} rends you with his claws.'.format(self.name),\n msg_type=MessageType.INFO_BAD, log_level=LogLevel.COMBAT)\n target.take_damage(dmg_done)\n elif dmg_done <= 0 and weapon:\n Message(self.name.capitalize() + ' attacks you but it has no effect!',\n msg_type=MessageType.INFO_GOOD, log_level=LogLevel.COMBAT)\n else:\n Message('The {0} pummels you with their fist without effect'.format(self.name),\n msg_type=MessageType.INFO_GOOD, log_level=LogLevel.COMBAT)\n\n # engage the target lock if possible\n if target == gv.player and gv.player.opponent is None and self.hp > 0:\n gv.player.opponent = self\n\n def move(self, dx, dy):\n \"\"\" Basic move function for monsters \"\"\"\n\n to_x, to_y = self.x + dx, self.y + dy\n\n if gv.game_map.walkable[to_x][to_y]:\n\n # check if a blocking object is in the target tile\n target = tile_blocked_by(to_x, to_y)\n\n if target is None:\n self.x += dx\n self.y += dy\n\n # if blocking object is an enemy target\n elif target is gv.player:\n self.attack(target)\n\n def bark(self,type):\n \"\"\" make some sounds \"\"\"\n if self.barks is not None:\n\n try:\n bark = choice(self.barks[type])\n except:\n logging.error('Could not find bark-type {0} in {1}.'.format(type, self.barks))\n else:\n Message('The {0} {1}'.format(self.name, bark), msg_type=MessageType.FLUFF, log_level=LogLevel.GAMEPLAY)\n #Message(random.choice(self.owner.barks), msg_type=MessageType.FLUFF, log_level=LogLevel.GAMEPLAY)","sub_path":"classes/monsters/monster.py","file_name":"monster.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"396150683","text":"import pgzrun\n\nWIDTH = 300\nHEIGHT = 300\n\na = Actor('alien', pos=(100, 100))\n# a.angle = 90\n# a.pos = (120, 200)\n# a.x = 30\n# a.y = 30\n\ndef draw():\n screen.clear()\n #screen.fill((255, 255, 255))\n a.draw()\n\npgzrun.go()\n","sub_path":"common_py/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"336621499","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nimport numpy as np\nfrom numpy.testing import assert_allclose\nfrom astropy.tests.helper import pytest\nfrom .. import (calculate_total_error, subtract_background,\n interpolate_masked_data)\n\nSHAPE = (5, 5)\nDATAVAL = 2.\nDATA = np.ones(SHAPE) * DATAVAL\nMASK = np.zeros_like(DATA, dtype=bool)\nMASK[2, 2] = True\nERROR = np.ones(SHAPE)\nEFFGAIN = np.ones(SHAPE) * DATAVAL\nBACKGROUND = np.ones(SHAPE)\nWRONG_SHAPE = np.ones((2, 2))\n\n\nclass TestCalculateTotalError(object):\n def test_error_shape(self):\n with pytest.raises(ValueError):\n calculate_total_error(DATA, error=WRONG_SHAPE,\n effective_gain=EFFGAIN)\n\n def test_gain_shape(self):\n with pytest.raises(ValueError):\n calculate_total_error(DATA, error=ERROR,\n effective_gain=WRONG_SHAPE)\n\n @pytest.mark.parametrize('effective_gain', (0, -1))\n def test_gain_le_zero(self, effective_gain):\n with pytest.raises(ValueError):\n calculate_total_error(DATA, error=ERROR,\n effective_gain=effective_gain)\n\n def test_gain_scalar(self):\n error_tot = calculate_total_error(DATA, error=ERROR,\n effective_gain=2.)\n assert_allclose(error_tot, np.sqrt(2.) * ERROR)\n\n def test_gain_array(self):\n error_tot = calculate_total_error(DATA, error=ERROR,\n effective_gain=EFFGAIN)\n assert_allclose(error_tot, np.sqrt(2.) * ERROR)\n\n\nclass TestSubtractBackground(object):\n def test_background_shape(self):\n with pytest.raises(ValueError):\n subtract_background(DATA, WRONG_SHAPE)\n\n def test_background_scalar(self):\n data, background = subtract_background(DATA, 1.)\n assert_allclose(data, DATA - 1.)\n assert_allclose(background, BACKGROUND)\n\n def test_background_array(self):\n data, background = subtract_background(DATA, BACKGROUND)\n assert_allclose(data, DATA - 1.)\n assert_allclose(background, BACKGROUND)\n\n\nclass TestInterpolateMaskedData(object):\n def test_mask_shape(self):\n with pytest.raises(ValueError):\n interpolate_masked_data(DATA, WRONG_SHAPE)\n\n def test_error_shape(self):\n with pytest.raises(ValueError):\n interpolate_masked_data(DATA, MASK, error=WRONG_SHAPE)\n\n def test_background_shape(self):\n with pytest.raises(ValueError):\n interpolate_masked_data(DATA, MASK, background=WRONG_SHAPE)\n\n def test_interpolation(self):\n data2 = DATA.copy()\n data2[2, 2] = 100.\n error2 = ERROR.copy()\n error2[2, 2] = 100.\n background2 = BACKGROUND.copy()\n background2[2, 2] = 100.\n data, error, background = interpolate_masked_data(\n data2, MASK, error=error2, background=background2)\n assert_allclose(data, DATA)\n assert_allclose(error, ERROR)\n assert_allclose(background, BACKGROUND)\n\n def test_interpolation_larger_mask(self):\n data2 = DATA.copy()\n data2[2, 2] = 100.\n error2 = ERROR.copy()\n error2[2, 2] = 100.\n background2 = BACKGROUND.copy()\n background2[2, 2] = 100.\n mask2 = MASK.copy()\n mask2[1:4, 1:4] = True\n data, error, background = interpolate_masked_data(\n data2, MASK, error=error2, background=background2)\n assert_allclose(data, DATA)\n assert_allclose(error, ERROR)\n assert_allclose(background, BACKGROUND)\n","sub_path":"photutils/utils/tests/test_prepare_data.py","file_name":"test_prepare_data.py","file_ext":"py","file_size_in_byte":3721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"252324986","text":"import minqlx\n\nimport requests\nimport threading\n\n\"\"\"\nPlugin that restricts playing on the server to certain QLStats.net privacy settings.\n\nUses:\n- qlx_qlstatsPrivacyKick (default: 0), set to 1 to kick any clients with unallowed privacy settings upon connect.\n- qlx_qlstatsPrivacyWhitelist (default: \"public, anoynmous, private, untracked\")\n List of allowed privacy settings on this server. Take out any value from the default expansive list.\n- qlx_qlstatsPrivacyJoinAttempts (default: 5), amount of join attempts before the player gets kicked,\n if privacyKick is disabled. Set to -1 to disable kicking of players for their join attempts.\n\"\"\"\n\nCOLORED_QLSTATS_INSTRUCTIONS = \"Error: Open qlstats.net, click Login/Sign-up, set privacy settings to ^6{}^7, \" \\\n \"click save and reconnect!\"\n\n\nclass qlstats_privacy_policy(minqlx.Plugin):\n\n def __init__(self):\n super().__init__()\n self.set_cvar_once(\"qlx_qlstatsPrivacyBlock\", \"1\")\n self.set_cvar_once(\"qlx_qlstatsPrivacyKick\", \"0\")\n self.set_cvar_once(\"qlx_qlstatsPrivacyWhitelist\", \"public, anonymous, private, untracked\")\n self.set_cvar_once(\"qlx_qlstatsPrivacyJoinAttempts\", \"5\")\n\n self.plugin_enabled = True\n self.kick_players = self.get_cvar(\"qlx_qlstatsPrivacyKick\", bool)\n self.allowed_privacy = self.get_cvar(\"qlx_qlstatsPrivacyWhitelist\", list)\n self.max_num_join_attempts = self.get_cvar(\"qlx_qlstatsPrivacyJoinAttempts\", int)\n\n self.exceptions = set()\n self.join_attempts = dict()\n\n # Collection of threads looking up elo of players {steam_id: thread }\n self.connectthreads = {}\n\n self.add_hook(\"player_connect\", self.handle_player_connect, priority=minqlx.PRI_HIGHEST)\n self.add_hook(\"player_disconnect\", self.handle_player_disconnect)\n self.add_hook(\"team_switch_attempt\", self.handle_team_switch_attempt)\n\n self.add_command((\"except\", \"e\"), self.cmd_policy_exception, permission=5, usage=\"\")\n self.add_command(\"privacy\", self.cmd_switch_plugin, permission=1, usage=\"[status]\")\n\n def check_balance_plugin_loaded(self):\n return 'balance' in self.plugins\n\n def check_for_right_version_of_balance_plugin(self):\n return hasattr(self.plugins[\"balance\"], \"player_info\")\n\n def check_for_correct_balance_plugin(self):\n if not self.check_balance_plugin_loaded():\n self.logger.info(\"Balance plugin not loaded. \"\n \"This plugin just works with the balance plugin in place.\")\n return False\n\n if not self.check_for_right_version_of_balance_plugin():\n self.logger.info(\"Wrong version of the ^6balance^7 plugin loaded. Make sure to load \"\n \"https://github.com/MinoMino/minqlx-plugins/blob/master/balance.py.\")\n return False\n\n return True\n\n def handle_player_connect(self, player):\n if not self.plugin_enabled:\n return\n\n if not self.game:\n return\n\n if not self.check_for_correct_balance_plugin():\n self.disable_policy_check(minqlx.CHAT_CHANNEL)\n return\n\n b = minqlx.Plugin._loaded_plugins['balance']\n b.add_request({player.steam_id: self.game.type_short}, self.callback_connect, minqlx.CHAT_CHANNEL)\n\n if not self.get_cvar(\"qlx_qlstatsPrivacyBlock\", bool):\n return\n\n if player.steam_id not in self.connectthreads:\n ct = ConnectThread(player.steam_id, self.get_cvar(\"qlx_balanceApi\"))\n self.connectthreads[player.steam_id] = ct\n ct.start()\n self.remove_thread(player.steam_id) # remove it after a while\n\n # Check if thread is ready or not\n ct = self.connectthreads[player.steam_id]\n if ct.isAlive():\n return \"Fetching your qlstats settings...\"\n\n # Check if thread is ready or not\n try:\n res = ct._result\n if not res:\n return \"Fetching your qlstats settings...\"\n\n if res.status_code != requests.codes.ok:\n raise IOError(\"Invalid response code from qlstats.net.\")\n self.logger.debug(res.text)\n js = res.json()\n\n if \"playerinfo\" not in js:\n raise IOError(\"Invalid response content from qlstats.net.\")\n\n if str(player.steam_id) not in js[\"playerinfo\"]:\n raise IOError(\"Response from qlstats.net did not include data for the requested player.\")\n\n if \"privacy\" not in js[\"playerinfo\"][str(player.steam_id)]:\n raise IOError(\"Response from qlstats.net did not include privacy information.\")\n\n if js[\"playerinfo\"][str(player.steam_id)][\"privacy\"] not in self.allowed_privacy:\n return minqlx.Plugin.clean_text(self.colored_qlstats_instructions())\n\n except Exception as e:\n minqlx.console_command(\"echo QLStatsPrivacyError: {}\".format(e))\n\n def callback_connect(self, players, channel):\n if not self.plugin_enabled:\n return\n\n if not self.kick_players:\n return\n\n player_info = self.plugins[\"balance\"].player_info\n\n for sid in players:\n if sid in self.exceptions:\n continue\n\n if sid not in player_info:\n continue\n\n if player_info[sid][\"privacy\"] not in self.allowed_privacy:\n self.delayed_kick(sid, minqlx.Plugin.clean_text(self.colored_qlstats_instructions()))\n\n def colored_qlstats_instructions(self):\n return COLORED_QLSTATS_INSTRUCTIONS.format(\"^7, ^6\".join(self.allowed_privacy))\n\n @minqlx.delay(5)\n def delayed_kick(self, sid, reason):\n self.kick(sid, reason)\n\n def handle_player_disconnect(self, player, reason):\n if player.steam_id in self.exceptions:\n self.exceptions.remove(player.steam_id)\n\n if player.steam_id in self.join_attempts:\n del self.join_attempts[player.steam_id]\n\n def handle_team_switch_attempt(self, player, old, new):\n if not self.plugin_enabled:\n return\n\n if not self.game:\n return\n\n if player.steam_id in self.exceptions:\n return\n\n if not self.check_for_correct_balance_plugin():\n self.disable_policy_check(minqlx.CHAT_CHANNEL)\n return\n\n if new in [\"red\", \"blue\", \"any\"]:\n player_info = self.plugins[\"balance\"].player_info\n if player.steam_id not in player_info:\n player.tell(\"We couldn't fetch your ratings, yet. You will not be able to join, until we did.\")\n return minqlx.RET_STOP_ALL\n if player_info[player.steam_id][\"privacy\"] not in self.allowed_privacy:\n if self.max_num_join_attempts > 0:\n if player.steam_id not in self.join_attempts:\n self.join_attempts[player.steam_id] = self.max_num_join_attempts\n\n self.join_attempts[player.steam_id] -= 1\n\n if self.join_attempts[player.steam_id] < 0:\n player.kick(minqlx.Plugin.clean_text(self.colored_qlstats_instructions()))\n return minqlx.RET_STOP_ALL\n self.msg(\"{}^7 not allowed to join due to {} QLStats.net privacy settings. \"\n \"{} join attempts before automatically kicking you.\"\n .format(player.name, player_info[player.steam_id][\"privacy\"].lower(),\n self.join_attempts[player.steam_id]))\n player.tell(\"Not allowed to join due to ^6{}1^7 QLStats.net data. \"\n \"{} join attempts before automatically kicking you.\"\n .format(player_info[player.steam_id][\"privacy\"].lower(),\n self.join_attempts[player.steam_id]))\n else:\n self.msg(\"{}^7 not allowed to join due to {} QLStats.net privacy settings. \"\n .format(player.name, player_info[player.steam_id][\"privacy\"].lower()))\n player.tell(\"Not allowed to join due to ^6{}1^7 QLStats.net data. \"\n .format(player_info[player.steam_id][\"privacy\"].lower()))\n\n player.center_print(\"^3Join not allowed. See instructions in console!\")\n player.tell(self.colored_qlstats_instructions())\n\n if old in [\"spectator\", \"free\"]:\n return minqlx.RET_STOP_ALL\n\n player.put(\"spectator\")\n\n def cmd_policy_exception(self, player, msg, channel):\n if len(msg) != 2:\n return minqlx.RET_USAGE\n\n teams = self.teams()\n speccing_players = teams[\"spectator\"] + teams[\"free\"]\n except_player = self.find_player(msg[1], speccing_players)\n\n if except_player is None or len(except_player) == 0:\n player.tell(\"^7Could not find player identified by ^1{}^7.\".format(msg[1]))\n return\n\n if len(except_player) > 1:\n player.tell(\"^7More than one matching spectator found: {}\"\n .format(\"^7, \".join([player.name for player in except_player])))\n player.tell(\"^7Please be more specific which one to put on the exception list!\")\n return\n\n channel.reply(\"^7An admin has allowed ^2{}^7 to temporarily join despite missing or \"\n \"inadequate qlstats privacy information.\"\n .format(except_player[0].clean_name))\n self.exceptions.add(except_player[0].steam_id)\n\n def cmd_switch_plugin(self, player, msg, channel):\n if len(msg) > 2:\n return minqlx.RET_USAGE\n\n if len(msg) == 2:\n if msg[1] != \"status\":\n return minqlx.RET_USAGE\n\n channel.reply(\"^7QLStats policy check is {}\".format(\"enabled\" if self.plugin_enabled else \"disabled\"))\n return\n\n if not self.plugin_enabled:\n self.enable_policy_check(channel)\n return\n\n self.disable_policy_check(channel)\n\n def disable_policy_check(self, channel):\n self.plugin_enabled = False\n channel.reply(\"^7QLStats policy check disabled. Everyone will be able to join.\")\n\n def enable_policy_check(self, channel):\n if not self.check_for_correct_balance_plugin():\n return\n\n self.plugin_enabled = True\n channel.reply(\"^7QLStats policy check enabled.\")\n\n if self.kick_players:\n self.callback_connect(\n {player.steam_id: self.game.type_short for player in self.players()}, channel)\n return\n\n teams = self.teams()\n player_info = self.plugins[\"balance\"].player_info\n\n for player in teams[\"red\"] + teams[\"blue\"]:\n if player.steam_id not in player_info:\n player.tell(\"We couldn't fetch your ratings, yet. You will not be able to play, until we did.\")\n player.put(\"spectator\")\n continue\n\n if player_info[player.steam_id][\"privacy\"] not in self.allowed_privacy:\n self.msg(\"{}^7 not allowed to join due to {} QLStats.net privacy settings.\"\n .format(player.name, player_info[player.steam_id][\"privacy\"].lower()))\n player.center_print(\"^3Join not allowed. See instructions in console!\")\n player.tell(\"Not allowed to join due to ^6{}1 7 QLStats.net data.\"\n .format(player_info[player.steam_id][\"privacy\"].lower()))\n player.tell(self.colored_qlstats_instructions())\n player.put(\"spectator\")\n\n @minqlx.delay(30) # 30 seconds\n def remove_thread(self, sid):\n if sid in self.connectthreads:\n del self.connectthreads[sid]\n\n\nclass ConnectThread(threading.Thread):\n\n def __init__(self, steam_id, balance_api):\n super(ConnectThread, self).__init__()\n self._balance_api = balance_api\n self._steam_id = steam_id\n self._result = None\n\n def run(self):\n url = \"http://qlstats.net/{elo}/{}\".format(self._steam_id, elo=self._balance_api)\n self._result = requests.get(url)\n","sub_path":"src/main/python/qlstats_privacy_policy.py","file_name":"qlstats_privacy_policy.py","file_ext":"py","file_size_in_byte":12291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"595870762","text":"import logging\n\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.db import connection\n\ntry:\n from django.apps import apps\n\n get_model = apps.get_model\nexcept ImportError:\n from django.db.models.loading import get_model\n\nfrom django.http import HttpResponse\n\ntry:\n model = get_model(settings.HEALTH_MODEL)\nexcept Exception as e:\n raise ImproperlyConfigured(\n f\"settings.HEALTH_MODEL doesn't resolve to a useable model {str(e)}\"\n )\n\n\nlog = logging.getLogger(__name__)\n\n\ndef health(request):\n # check debug\n if settings.DEBUG:\n log.exception(\"Debug mode not allowed in production\")\n return HttpResponse(\n \"Debug mode not allowed in production\",\n content_type=\"text/plain\",\n status=500,\n )\n\n # check database\n try:\n with connection.cursor() as cursor:\n cursor.execute(\"select 1\")\n assert cursor.fetchone()\n except Exception as e:\n log.exception(f\"Database connectivity failed: {str(e)}\")\n return HttpResponse(\n \"Database connectivity failed\", content_type=\"text/plain\", status=500\n )\n\n return HttpResponse(\"Connectivity OK\", content_type=\"text/plain\", status=200)\n\n\ndef check_data(request):\n if model.objects.all().count() < 30000:\n return HttpResponse(\n \"Too few tellus data in the database\", content_type=\"text/plain\", status=500\n )\n return HttpResponse(\"Database data OK\", content_type=\"text/plain\", status=200)\n","sub_path":"src/health/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"469869297","text":"# server_skel.py\r\nimport socket\r\nimport time\r\nimport threading\r\nimport random\r\n\r\nserver_ip = \"127.0.0.1\"\r\nserver_port = 5005\r\n\r\n# class for thread\r\nclass serverThread (threading.Thread):\r\n\tAVERAGE_DELAY = 100 # ms\r\n\r\n\t# constructor\r\n\tdef __init__(self, data, addr):\r\n\t\tthreading.Thread.__init__(self)\r\n\t\tself.data = data\r\n\t\tself.addr = addr\r\n\r\n\t# method for handling incoming packets\r\n\tdef run(self):\r\n\t\tprint (\"Server: recv \\\"\" + data.decode('utf-8') + \"\\\"\") # receive message\r\n\t\tdropRate = random.uniform(0, 0.4) # unifrom distribution with an average of 0.2\r\n\t\tif (random.random() < dropRate): # drop packet with probability dropRate\r\n\t\t\tprint (\"Server: drop \\\"\" + data.decode('utf-8') + \"\\\"\") # drop message\r\n\t\telse:\r\n\t\t\tdelay = random.expovariate(1/self.AVERAGE_DELAY) # exponential distribution with an average of AVERAGE_DELAY\r\n\t\t\ttime.sleep(delay*0.001) # sleep for delay ms\r\n\t\t\tsock.sendto(self.data, self.addr) # send datagram to client\r\n\t\t\tprint (\"Server: reply \\\"\" + self.data.decode('utf-8') + \"\\\"\") # reply message\r\n\r\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # open UDP socket\r\nsock.bind((server_ip, server_port)) # bind socket\r\n\r\nwhile True:\r\n\tdata, addr = sock.recvfrom(1024) # receive incoming packets\r\n\tserverThread(data, addr).start() # create and start new thread\r\n","sub_path":"server_skel_optional.py","file_name":"server_skel_optional.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"20850401","text":"import os.path\nimport sqlite3\nimport unittest\n\nDATABASE = os.path.join(os.path.dirname(__file__), 'data/test.db')\n\n\nclass TablesMatchTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.conn = sqlite3.connect(DATABASE)\n\n @classmethod\n def tearDownClass(cls):\n cls.conn.close()\n\n def setUp(self):\n self.cursor = self.conn.cursor()\n self.addCleanup(self.cursor.close)\n\n def test_data_match(self):\n \"\"\"Test that same data is stored for same ids in both tables.\"\"\"\n query = \"\"\"\n SELECT COUNT(*) FROM (\n SELECT * FROM (\n SELECT * FROM consultants_backend\n UNION\n SELECT * FROM consultants_frontend)\n GROUP BY id HAVING COUNT(*) > 1);\n \"\"\"\n self.cursor.execute(query)\n rows = self.cursor.fetchone()\n non_matching = rows[0]\n self.assertEqual(\n non_matching, 0,\n \"The number of entries with different data - {}\".format(non_matching))\n\n def test_count_match(self):\n \"\"\"Test that the number of records is the same.\"\"\"\n query = \"\"\"\n SELECT COUNT(*) FROM consultants_backend\n UNION ALL\n SELECT COUNT(*) FROM consultants_frontend;\"\"\"\n self.cursor.execute(query)\n rows = self.cursor.fetchall()\n backend_count, frontend_count = [row[0] for row in rows]\n self.assertEqual(\n backend_count, frontend_count,\n 'The number of entries in consultants_backend (count {}) and consultants_frontend '\n '(count {}) tables do not match'.format(backend_count, frontend_count))\n\n def test_id_match(self):\n \"\"\"Test that ids are the same in both tables.\"\"\"\n query = \"\"\"\n SELECT COUNT(*) FROM (\n SELECT * FROM (\n SELECT id FROM consultants_backend\n UNION ALL\n SELECT id FROM consultants_frontend )\n GROUP BY id HAVING COUNT(*) = 1);\n \"\"\"\n self.cursor.execute(query)\n rows = self.cursor.fetchone()\n id_diff_num = rows[0]\n self.assertEqual(\n id_diff_num, 0,\n \"The number of different ids - {}\".format(id_diff_num))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tables_test.py","file_name":"tables_test.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"485092469","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nfrom sys import stdout\nfrom twisted.internet import interfaces, reactor, task, defer, protocol\nfrom twisted.internet.protocol import Factory, Protocol\nfrom twisted.internet.endpoints import TCP4ServerEndpoint\nimport construct as c2\nimport simple_message as sm\nfrom twisted.protocols.basic import LineReceiver\n\n\n\nclass feedbackPublisher(Protocol):\n\n def __init__(self):\n self.lc = task.LoopingCall(self.FeedbackMessage) #add both feedback and status as loops to a connection\n self.lc1 = task.LoopingCall(self.StatusMessage)\n self.data = {}\n self.drives_powered = 0\n self.e_stopped = 0\n self.error_code = 0\n self.in_error = 0\n self.in_motion = 0\n self.mode = 0\n self.motion_possible = 0\n\n def connectionMade(self):\n print('Connection made from {}'.format(self.transport.getPeer()))\n self.lc.start(0.5) #Start loops on intervals\n self.lc1.start(3.0)\n print(\"starting feedback\")\n\n def connectionLost(self, reason):\n print('Connection lost from {}'.format(self.transport.getPeer()))\n self.lc.stop() #Stop loops on disconnect\n self.lc1.stop()\n print(\"Stopping feedback\")\n\n def dataReceived(self, data):\n print(\"connect\")\n print(data)\n\n def FeedbackMessage(self):\n #create a feedback message we populate and send\n joint_1 = 0.0\n joint_2 = 0.0\n joint_3 = 0.0\n joint_4 = 0.0\n joint_5 = 0.0\n joint_6 = 0.0\n\n SimpleMessage = c2.Struct(\n 'Header' / c2.Struct(\n 'msg_type' / c2.Int32sl,\n 'comm_type' / c2.Int32sl,\n 'reply_type' / c2.Int32sl,\n ),\n 'body' / c2.Struct(\n 'seq_nr' / c2.Int32sl ,\n 'joint_data'/ c2.Float32b[10]\n ),\n c2.Terminated\n ) #packa a message.\n msg = dict(\n Header=dict(msg_type=10, comm_type=1, reply_type=0),\n body=dict(seq_nr=0,joint_data=[joint_1, joint_2, joint_3, joint_4, joint_5, joint_6,0.0,0.0,0.0,0.0]\n ))\n feedback_data = SimpleMessage.build(msg)\n #print(feedback_data)\n data_len = c2.Int32sl.build(len(feedback_data))\n #print('sending feedback')\n self.transport.write(data_len + feedback_data)\n\n def StatusMessage(self):\n #Create a statusmessage that we populate and send\n StatusMessage = c2.Struct(\n 'Header' / c2.Struct(\n 'msg_type' / c2.Int32sl,\n 'comm_type' / c2.Int32sl,\n 'reply_type' / c2.Int32sl,\n ),\n 'body' / c2.Struct(\n 'drives_powered' / c2.Int32sl,\n 'e_stopped' / c2.Int32sl,\n 'error_code' / c2.Int32sl,\n 'in_error' / c2.Int32sl,\n 'in_motion' / c2.Int32sl,\n 'mode' / c2.Int32sl,\n 'motion_possible' / c2.Int32sl\n ),\n c2.Terminated\n )\n\n self.mode = 2\n self.drives_powered = 1\n self.motion_possible = 1\n\n msg = dict(\n Header=dict(msg_type=13, comm_type=1, reply_type=0),\n body=dict(drives_powered= self.drives_powered,\n e_stopped=self.e_stopped,\n error_code= self.error_code,\n in_error=self.in_error,\n in_motion=self.in_motion,\n mode=self.mode,\n motion_possible=self.motion_possible\n ))\n status_data = StatusMessage.build(msg)\n #print(status_data)\n data_len = c2.Int32sl.build(len(status_data))\n #print('sending status')\n self.transport.write(data_len + status_data)\n\n def jointMessage(self):\n\n\n JointMessage = c2.Struct(\n 'Header' / c2.Struct(\n 'msg_type' / c2.Int32sl,\n 'comm_type' / c2.Int32sl,\n 'reply_type' / c2.Int32sl,\n ),\n 'body' / c2.Struct(\n 'SequenceNumber' / c2.Int32sl,\n 'Joint_data'/c2.Float32b[10],\n 'velocity' / c2.Int32sl,\n 'duration' / c2.Int32sl,\n ),\n c2.Terminated\n )\n\nclass feedbackfactory(Factory):\n protocol = feedbackPublisher\n","sub_path":"simple_message/feedback.py","file_name":"feedback.py","file_ext":"py","file_size_in_byte":4667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"224000425","text":"# Import\nimport numpy as np\nimport colorsys\nfrom PIL import Image, ImageDraw\nimport pandas as pd\nfrom sklearn.cluster import MeanShift\n\n\nclass FindAPool:\n\n def __init__(self):\n pass\n\n @staticmethod\n def load_image(image):\n return Image.open(\"{}\".format(image))\n\n @staticmethod\n def image_rgb(image):\n ''' Get the RGB of each pixel of the image'''\n r = [i / 255.0 for i in image.getdata(0)]\n g = [i / 255.0 for i in image.getdata(1)]\n b = [i / 255.0 for i in image.getdata(2)]\n return pd.DataFrame({'r': r, 'g': g, 'b': b})\n\n @staticmethod\n def rgb_2_hsv(dataframe):\n ''' Convert the RGB to HSV '''\n hsv = np.array([colorsys.rgb_to_hsv(x, y, z) for x, y, z\n in zip(dataframe.r, dataframe.g, dataframe.b)])\n df = pd.DataFrame(data=hsv, columns=['h', 's', 'v'])\n return df\n\n @staticmethod\n def find_pix(image, dataframe, h1, h2, s, v):\n ''' Select the pixels who fit the criteria'''\n mask = (dataframe['v'] > v / 100.0) & (dataframe['s'] > s / 100.0) & \\\n (dataframe['h'] > h2 / 360.0) & (dataframe['h'] < h1 / 360.0)\n xs, ys = image.size\n maskbool = mask.values.reshape(ys, xs)\n return maskbool, np.argwhere(maskbool)\n\n @staticmethod\n def colour_pix(image, maskbool):\n ''' Colour the good pixels in red'''\n new_image = image.copy()\n mask = Image.fromarray(np.uint8(255 * maskbool))\n new_image.paste('red', mask=mask)\n\n return new_image\n\n @staticmethod\n def cluster_pixels(piscine_locs,\n clust_algo='meanshift', quantile=0.003):\n ''' Function to group the pixels into swimming pools'''\n\n if clust_algo == 'meanshift':\n # bandwidth = estimate_bandwidth(piscine_locs, quantile=quantile)\n # if bandwidth == 0:\n # while bandwidth ==0:\n # quantile = quantile + 0.001\n # bandwidth = estimate_bandwidth(piscine_locs,\n # quantile=quantile)\n bandwidth = 6\n\n ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)\n ms.fit(piscine_locs)\n labels = ms.labels_\n cluster_centers = ms.cluster_centers_\n\n labels_unique = np.unique(labels)\n n_clusters_ = len(labels_unique)\n\n return n_clusters_, cluster_centers\n\n @staticmethod\n def draw_pisc(image, clusters):\n ''' Draw circles on'''\n imc = image.copy()\n draw = ImageDraw.Draw(imc)\n r = 2\n for t in clusters:\n draw.ellipse((int(t[1]) - r, int(t[0]) - r, int(t[1]) + r,\n int(t[0]) + r), fill=(255, 255, 255, 255))\n\n return imc\n\n def run(self, image, h1=205, h2=140, s=20, v=65,\n clust_algo='meanshift', quantile=0.003):\n\n im = self.load_image(image)\n print(im.size)\n rgb_df = self.image_rgb(im)\n hsv_df = self.rgb_2_hsv(rgb_df)\n maskbool, piscine_locs = self.find_pix(im, hsv_df, h1, h2, s, v)\n coloredim = self.colour_pix(im, maskbool)\n\n nclusts, ccents = self.cluster_pixels(piscine_locs, clust_algo,\n quantile)\n piscim = self.draw_pisc(coloredim, ccents)\n\n return coloredim, piscim, len(piscine_locs), nclusts\n","sub_path":"src/FindAPool/findapool.py","file_name":"findapool.py","file_ext":"py","file_size_in_byte":3409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"347232391","text":"\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras import layers\r\nfrom tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply, GaussianNoise\r\nfrom tensorflow.keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D\r\nfrom tensorflow.keras.layers import Concatenate\r\nfrom tensorflow.keras.layers import LeakyReLU\r\nfrom tensorflow.keras.layers import UpSampling2D, Conv2D, Conv2DTranspose\r\nfrom tensorflow.keras.models import Sequential, Model\r\nfrom tensorflow.keras.optimizers import Adam, RMSprop\r\nfrom tensorflow.keras import losses\r\nfrom tensorflow.keras.utils import to_categorical\r\nimport tensorflow.keras.backend as K\r\nimport scipy\r\nimport logging\r\nimport matplotlib.pyplot as plt\r\nimport os\r\n\r\nimport cv2\r\nimport numpy as np\r\nimport utils.plots as plots\r\nfrom skimage.util import random_noise\r\nfrom IPython import display\r\n\r\ndef get_noisy_data(data):\r\n lst_noisy = []\r\n sigma = 0.155\r\n for image in data:\r\n noisy = random_noise(image, var=sigma ** 2)\r\n lst_noisy.append(noisy)\r\n return np.array(lst_noisy)\r\n\r\nclass ALOCC():\r\n def __init__(self, \r\n image_shape,\r\n latent_dim = 100, \r\n filters = (16, 64, 128, 256), \r\n kernel_size = 3, \r\n strides = 1, \r\n padding = \"same\",\r\n learning_rate = 0.002,\r\n r_alpha = 0.2,\r\n checkpoint_dir = None):\r\n\r\n self.image_shape = image_shape\r\n self.latent_dim = latent_dim\r\n self.filters = filters\r\n self.kernel_size = kernel_size\r\n self.strides = strides\r\n self.padding = padding\r\n self.learning_rate = learning_rate\r\n\r\n self.r_alpha = r_alpha\r\n self.checkpoint_dir = checkpoint_dir\r\n\r\n self.best_se = -1\r\n self.best_eval_loss = -1\r\n self.best_mse = -1\r\n self.__build_model() \r\n\r\n def __build_model(self):\r\n image_dims = self.image_shape\r\n optimizer = RMSprop(lr=self.learning_rate, clipvalue=1.0, decay=1e-8)\r\n self.discriminator = self.__build_discriminator(image_dims)\r\n self.discriminator.compile(optimizer=optimizer, loss='binary_crossentropy')\r\n self.generator = self.__build_generator(image_dims)\r\n img = Input(shape=image_dims)\r\n reconstructed_img = self.generator(img)\r\n self.discriminator.trainable = False\r\n validity = self.discriminator(reconstructed_img)\r\n self.adversarial_model = Model(img, [reconstructed_img, validity])\r\n self.adversarial_model.compile(loss=['binary_crossentropy', 'binary_crossentropy'],\r\n loss_weights=[self.r_alpha, 1],\r\n optimizer=optimizer)\r\n\r\n def __build_generator(self, input_shape):\r\n inputs = tf.keras.Input(shape=self.image_shape)\r\n x = inputs\r\n for f in self.filters:\r\n x = layers.Conv2D(\r\n filters=f, kernel_size=self.kernel_size, strides=self.strides, padding=self.padding\r\n )(x)\r\n #x = layers.BatchNormalization(axis=self.image_shape[2])(x)\r\n x = layers.LeakyReLU()(x)\r\n x = layers.MaxPooling2D(pool_size=2)(x)\r\n self.volume_size = tf.keras.backend.int_shape(x)\r\n x = layers.Flatten()(x)\r\n x = layers.Dense(self.latent_dim)(x)\r\n enc = Model(inputs, x, name=\"encoder\")\r\n\r\n latent_inputs = keras.Input(shape=(self.latent_dim,))\r\n x = layers.Dense(np.prod(self.volume_size[1:]))(latent_inputs)\r\n x = layers.Reshape((self.volume_size[1], self.volume_size[2], self.volume_size[3]))(x)\r\n for f in self.filters[::-1]:\r\n x = layers.UpSampling2D(size=2)(x)\r\n x = layers.Conv2DTranspose(\r\n filters=f, kernel_size=self.kernel_size, strides=self.strides, padding=self.padding\r\n )(x)\r\n #x = layers.BatchNormalization(axis=self.image_shape[2])(x)\r\n x = layers.LeakyReLU()(x)\r\n x = layers.Conv2DTranspose(filters=self.image_shape[2], kernel_size=self.kernel_size, padding=self.padding)(\r\n x\r\n )\r\n #x = layers.BatchNormalization()(x)\r\n x = layers.LeakyReLU()(x)\r\n outputs = layers.Conv2DTranspose(self.image_shape[2], 3, activation=\"sigmoid\", padding=self.padding)(x)\r\n dec = keras.Model(latent_inputs, outputs, name=\"decoder\")\r\n\r\n return Model(inputs, dec(enc(inputs)), name=\"generator\")\r\n #return Model(inputs, outputs, name=\"generator\")\r\n\r\n # def __build_discriminator(self, input_shape):\r\n # image = Input(shape=input_shape, name='d_input')\r\n # x = layers.Conv2D(filters=16, kernel_size=self.kernel_size, strides=self.strides, padding=self.padding)(image)\r\n # x = layers.BatchNormalization()(x)\r\n # x = layers.LeakyReLU()(x)\r\n # x = layers.MaxPooling2D(pool_size=2)(x)\r\n\r\n # x = layers.Conv2D(filters=32, kernel_size=self.kernel_size, strides=self.strides, padding=self.padding)(x)\r\n # x = layers.BatchNormalization()(x)\r\n # x = layers.LeakyReLU()(x)\r\n # x = layers.MaxPooling2D(pool_size=2)(x)\r\n\r\n # x = Conv2D(filters=64, kernel_size=self.kernel_size, strides=self.strides, padding=self.padding)(x)\r\n # x = layers.BatchNormalization()(x)\r\n # x = layers.LeakyReLU()(x)\r\n # x = layers.MaxPooling2D(pool_size=2)(x)\r\n\r\n # x = layers.Flatten()(x)\r\n # x = layers.Dense(1, activation='sigmoid')(x)\r\n\r\n # return Model(image, x, name='discriminator')\r\n\r\n def __build_discriminator(self, input_shape):\r\n image = Input(shape=input_shape, name='d_input')\r\n cla_model = tf.keras.applications.MobileNetV2(include_top=False, weights=None, input_shape=input_shape, pooling=\"avg\")\r\n\r\n return Model(image, cla_model(image), name='discriminator')\r\n \r\n def train(self, dataset, epochs=100, batch_size=32):\r\n sample_count = 10\r\n sample = dataset[0:sample_count]\r\n sample_inputs = np.array(sample).astype(np.float32)\r\n predict_data = np.array(sample_inputs, dtype=np.float32)\r\n\r\n plt_shape = (self.image_shape[0], self.image_shape[1])\r\n if self.image_shape[2] > 1:\r\n plt_shape = (\r\n self.image_shape[0],\r\n self.image_shape[1],\r\n self.image_shape[2],\r\n )\r\n\r\n counter = 1\r\n # Record generator/R network reconstruction training losses.\r\n plot_epochs = []\r\n plot_g_recon_losses = []\r\n plot_mse = []\r\n\r\n # Load traning data, add random noise.\r\n sample_w_noise = get_noisy_data(dataset)\r\n\r\n # Adversarial ground truths\r\n ones = np.ones((batch_size, 1))\r\n zeros = np.zeros((batch_size, 1))\r\n\r\n for epoch in range(epochs):\r\n # Number of batches computed by total number of target data / batch size.\r\n batch_idxs = len(dataset) // batch_size\r\n \r\n for idx in range(0, batch_idxs):\r\n # Get a batch of images and add random noise.\r\n batch = dataset[idx * batch_size:(idx + 1) * batch_size]\r\n batch_noise = sample_w_noise[idx * batch_size:(idx + 1) * batch_size]\r\n batch_clean = dataset[idx * batch_size:(idx + 1) * batch_size]\r\n # Turn batch images data to float32 type.\r\n batch_images = np.array(batch).astype(np.float32)\r\n batch_noise_images = np.array(batch_noise).astype(np.float32)\r\n batch_clean_images = np.array(batch_clean).astype(np.float32)\r\n batch_fake_images = self.generator.predict(batch_noise_images)\r\n # Update D network, minimize real images inputs->D-> ones, noisy z->R->D->zeros loss.\r\n d_loss_real = self.discriminator.train_on_batch(batch_images, ones)\r\n d_loss_fake = self.discriminator.train_on_batch(batch_fake_images, zeros)\r\n\r\n # Update R network twice, minimize noisy z->R->D->ones and reconstruction loss.\r\n self.adversarial_model.train_on_batch(batch_noise_images, [batch_clean_images, ones])\r\n g_loss = self.adversarial_model.train_on_batch(batch_noise_images, [batch_clean_images, ones])\r\n last_g_loss = g_loss[1]\r\n val_loss = d_loss_real+d_loss_fake\r\n plot_epochs.append(epoch+idx/batch_idxs)\r\n plot_g_recon_losses.append(g_loss[1])\r\n \r\n # Create difference and MSE\r\n mse = []\r\n predictions = self.adversarial_model.predict(\r\n predict_data, batch_size=sample_count\r\n )[0]\r\n\r\n for pred_idx in range(0, len(predictions)):\r\n pred_image = predictions[pred_idx].reshape(plt_shape)\r\n diff = cv2.absdiff(sample_inputs[pred_idx], pred_image)\r\n mse.append(np.sum(diff * diff))\r\n\r\n mse = np.mean(mse)\r\n plot_mse.append(mse)\r\n #val_loss, val_gen_loss, val_dis_loss = self.adversarial_model.evaluate(sample_inputs, [sample_inputs, ones[0:sample_count]], sample_count, verbose=0)\r\n\r\n counter += 1\r\n if self.best_mse == -1 or mse < self.best_mse or val_loss-0.5 > 0.0:\r\n #self.best_se = last_se\r\n if self.best_mse == -1 or mse < self.best_mse:\r\n self.best_mse = mse\r\n self.save_weights()\r\n display.clear_output(wait=True)\r\n plots.plot_difference([predictions[0]], [sample_inputs[0]], self.image_shape, threshold=0.0, mask_color=\"Reds_r\")\r\n\r\n if val_loss-0.5 > 0.0:\r\n self.save_weights(\"disc_best.h5\")\r\n msg = 'Epoch:[{0}] --> dis_loss: {1:>0.3f}, recon_loss:{2:>0.3f}, mse:{3:>0.3f}'.format(epoch, val_loss, g_loss[1], mse)\r\n print(msg)\r\n return plot_epochs, plot_g_recon_losses, plot_mse\r\n\r\n def save_weights(self, model_name=\"best.h5\"):\r\n if self.checkpoint_dir == None:\r\n return\r\n os.makedirs(self.checkpoint_dir, exist_ok=True)\r\n self.adversarial_model.save_weights(os.path.join(self.checkpoint_dir, model_name))\r\n \r\n def load_weights(self, model_name=\"best.h5\"):\r\n file_path = os.path.join(self.checkpoint_dir, model_name)\r\n if os.path.exists(file_path) == False:\r\n return\r\n self.adversarial_model.load_weights(file_path)\r\n","sub_path":"src/models/anomaly_detection/ALOCC.py","file_name":"ALOCC.py","file_ext":"py","file_size_in_byte":10546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"500441092","text":"from pyspark import SparkContext\nsc = SparkContext()\n\n#checkin_id user_id session_id utc_time timezone_offset lat lon category subcategory\nval = 0\n\ndef filter_first(line):\n if (\"checkin_id\" in line):\n return False\n else:\n return True\n\ndef sumCheckins(x):\n val += int(x)\n\nraw_data = sc.textFile(\"dataset_TIST2015.tsv\")\n#raw_data = sc.textFile(\"dataset_TIST2015.tsv\")\n\n#mapping value second value with 1 to giv each ID a value of 1\n#then using reduceByKey to sum up the value of that ID with each matching ID\n#returning a list of ID's with count\ndata = raw_data.map(lambda line: line.split('\\t')) \\\n .filter(lambda line: filter_first(line))\n\nusers = data.map(lambda x: (x[1], 1)) \\\n .reduceByKey(lambda x,y: x+y)\n\ncheckins = users.map(lambda x: x[1]).sum()\n\nsessions = data.map(lambda x: (x[2], 1)) \\\n .reduceByKey(lambda x,y: x+y)\n\n\n#printing unique users\nprint(\"Num unique users: \" + str(users.count()))\nprint(\"Num checkin's: \" + str(checkins))\nprint(\"Num unique sessions: \" + str(sessions.count()))\n","sub_path":"scripts/unique.py","file_name":"unique.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"40902089","text":"from django.conf.urls.defaults import *\nfrom oswalpalash.views import api_view, api_fb,api_po,poke_api,ascii,gif\nurlpatterns = patterns('',\n (r'^', include('oswalpalash.urls')),\n \n)\nurlpatterns += patterns('',\n url(r'^football/(?P[\\w-]+)/?', api_view, name = \"api\"),\n url(r'^pokemon/(?P[\\w-]+)/?', poke_api, name = \"api\"),\n url(r'^pokemon', api_po),\n url(r'^football', api_fb),\n\turl(r'^ascii',ascii),\n\turl(r'^gif',gif),\n )\n","sub_path":"Django_AppEngine/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"442435866","text":"import pandas as pd\nimport numpy as np\n\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.naive_bayes import MultinomialNB, GaussianNB, BernoulliNB\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\nfrom sklearn.svm import LinearSVC, SVC\nfrom sklearn.metrics import classification_report, accuracy_score\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.decomposition import PCA\n\nTOPIC_5_BIGRAM = './topic_modeling_data/topic-5-bigram-dis-vector.csv'\nTOPIC_5 = './topic_modeling_data/topic-5-dis-vector.csv'\nTOPIC_222_BIGRAM = './topic_modeling_data/topic-222-bigram-dis-vector.csv'\nTOPIC_222 = './topic_modeling_data/topic-222-dis-vector.csv'\n\nTOPIC_5_BIGRAM_NEW = './topic_modeling_data/topic_modeling_new/topic-5-bigram-new-dis-vector.csv'\nTOPIC_5_NEW = './topic_modeling_data/topic_modeling_new/topic-5-new-dis-vector.csv'\nTOPIC_222_BIGRAM_NEW = './topic_modeling_data/topic_modeling_new/topic-222-bigram-new-dis-vector.csv'\nTOPIC_222_NEW = './topic_modeling_data/topic_modeling_new/topic-222-new-dis-vector.csv'\n\nTEST_PREDICT_FILE = './dataset/topic-test-222-dis-vector.csv'\nSUBMISSION_FILE = './submission/disaster_topic_modelling.csv'\nSAMPLE_SUBMISSION_FILE = './dataset/sample_submission.csv'\n\ndef read_csv(file_name):\n print(str(file_name))\n print('------------------------------')\n data = pd.read_csv(file_name)\n\n # create dataframe\n train_df = pd.DataFrame(data)\n\n return train_df\n\ndf = read_csv(TOPIC_222_NEW)\n# print(df)\n\ny = df['target']\nX = df.drop(columns=['Unnamed: 0', 'target'])\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, shuffle=True)\n\n# classification model\n# these 3 models, especially BernoulliNB does not work well with these dataset\n# clf = BernoulliNB()\n# clf = MultinomialNB()\n# clf = SVC(kernel='linear')\n\n# clf = GaussianNB()\nclf = RandomForestClassifier(n_jobs=3, n_estimators=500, verbose=True)\n# clf = LogisticRegression(class_weight='balanced', solver='newton-cg')\n# clf = AdaBoostClassifier(n_estimators=500)\n\nclf.fit(X_train, y_train)\n\nprint(clf)\ny_pred = clf.predict(X_test)\nprint(accuracy_score(y_pred, y_test))\nprint(classification_report(y_pred, y_test))\n\n\n# prepare file to submission to kaggle\ntest = read_csv(TEST_PREDICT_FILE)\nsample_sub= read_csv(SAMPLE_SUBMISSION_FILE)\ntest = test.drop(columns=['Unnamed: 0'])\n\nsample_sub['target'] = clf.predict(test)\nsample_sub.to_csv(SUBMISSION_FILE,index=False)","sub_path":"disaster_topic_modeling.py","file_name":"disaster_topic_modeling.py","file_ext":"py","file_size_in_byte":2523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"638849751","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 26 17:39:52 2017\n\n@author: Varun\n\"\"\"\nimport pandas as pd\nimport xgboost as xgb\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import roc_auc_score\n\nprint(\"XGBoost:\")\n\nsamples = pd.read_csv('FINAL.csv')\nfeatures = ['LOCATION','SHOT_NUMBER','GAME_CLOCK','SHOT_CLOCK','TOUCH_TIME','SHOT_DIST','CLOSE_DEF_DIST','FG%','DBPM','DAYS_SINCE_START','1','2','3','4','5','6']\n\nX_train, X_test, y_train, y_test = train_test_split(samples[['LOCATION','SHOT_NUMBER','GAME_CLOCK','SHOT_CLOCK','TOUCH_TIME','SHOT_DIST','CLOSE_DEF_DIST','FG%','DBPM','DAYS_SINCE_START','1','2','3','4','5','6']],samples['FGM'],test_size = 0.25, random_state=10)\n\nmodel0 = xgb.XGBClassifier()\nkfold = KFold(n_splits = 10, random_state = 7)\nresults = cross_val_score(model0, X_train, y_train, cv = kfold)\nprint(\"(all shots) Accuracy: %.2f%% (%.2f%%)\" % (results.mean()*100, results.std()*100))\n\nmodel0.fit(X_train, y_train)\nimportances = model0.feature_importances_\nindices = np.argsort(importances)[::-1]\n\n# Print the feature ranking\nprint(\"Feature ranking:\")\n\nfor f in range(X_train.shape[1]):\n print(\"%d. %s (%f)\" % (f + 1, features[indices[f]], importances[indices[f]]))\n\ndef corr_class(ground_truth, predictions):\n mat=confusion_matrix(ground_truth,predictions)\n return (mat[0][0]+mat[1][1]*1.0)/np.sum(mat)\n\n# Plot the feature importances of the forest\nplt.figure()\nplt.title(\"Feature importances\")\nplt.bar(range(X_train.shape[1]), importances[indices],\n color=\"r\", align=\"center\")\nplt.xticks(range(X_train.shape[1]))\nplt.xlim([-1, X_train.shape[1]])\nplt.show()\n\nfpr, tpr, thresholds = roc_curve(model0.predict(X_test), y_test)\nauc = roc_auc_score(model0.predict(X_test), y_test)\n\nplt.figure()\nlw = 2\nplt.plot(fpr, tpr, color='darkorange',\n lw=lw, label='ROC curve (area = %0.2f)' % auc)\nplt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\nplt.xlim([0.0, 1.0])\nplt.ylim([0.0, 1.05])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('XGBoost ROC')\nplt.legend(loc=\"lower right\")\n# This is the ROC curve\nplt.show() \n\nprint(\"Test True Positive Rate: \", corr_class(y_test, model0.predict(X_test)))\n\n\n\ntwoptsamples = samples[samples['PTS_TYPE'] == 0]\n\nX2_train, X2_test, y2_train, y2_test = train_test_split(twoptsamples[['LOCATION','SHOT_NUMBER','GAME_CLOCK','SHOT_CLOCK','TOUCH_TIME','SHOT_DIST','CLOSE_DEF_DIST','FG%','DBPM','DAYS_SINCE_START','1','2','3','4','5','6']],twoptsamples['FGM'],test_size = 0.25, random_state=10)\n\nmodel1 = xgb.XGBClassifier()\nkfold = KFold(n_splits = 10, random_state = 7)\nresults = cross_val_score(model1, X2_train, y2_train, cv = kfold)\nprint(\"(2 pt shots) Accuracy: %.2f%% (%.2f%%)\" % (results.mean()*100, results.std()*100))\n\nmodel1.fit(X2_train, y2_train)\nimportances = model1.feature_importances_\nindices = np.argsort(importances)[::-1]\n\n# Print the feature ranking\nprint(\"Feature ranking:\")\n\nfor f in range(X2_train.shape[1]):\n print(\"%d. %s (%f)\" % (f + 1, features[indices[f]], importances[indices[f]]))\n\n# Plot the feature importances of the forest\nplt.figure()\nplt.title(\"Feature importances\")\nplt.bar(range(X2_train.shape[1]), importances[indices],\n color=\"r\", align=\"center\")\nplt.xticks(range(X2_train.shape[1]), indices)\nplt.xlim([-1, X2_train.shape[1]])\nplt.show()\n\nthreeptsamples = samples[samples['PTS_TYPE'] == 1]\n\nX3_train, X3_test, y3_train, y3_test = train_test_split(threeptsamples[['LOCATION','SHOT_NUMBER','GAME_CLOCK','SHOT_CLOCK','TOUCH_TIME','SHOT_DIST','CLOSE_DEF_DIST','FG%','DBPM','DAYS_SINCE_START','1','2','3','4','5','6']],threeptsamples['FGM'],test_size = 0.25, random_state=10)\n\nmodel2 = xgb.XGBClassifier()\nkfold = KFold(n_splits = 10, random_state = 7)\nresults = cross_val_score(model2, X3_train, y3_train, cv = kfold)\nprint(\"(3 pt shots) Accuracy: %.2f%% (%.2f%%)\" % (results.mean()*100, results.std()*100))\n\nmodel2.fit(X3_train, y3_train)\nimportances = model2.feature_importances_\nindices = np.argsort(importances)[::-1]\n\n# Print the feature ranking\nprint(\"Feature ranking:\")\n\nfor f in range(X3_train.shape[1]):\n print(\"%d. %s (%f)\" % (f + 1, features[indices[f]], importances[indices[f]]))\n\n# Plot the feature importances of the forest\nplt.figure()\nplt.title(\"Feature importances\")\nplt.bar(range(X3_train.shape[1]), importances[indices],\n color=\"r\", align=\"center\")\nplt.xticks(range(X3_train.shape[1]), indices)\nplt.xlim([-1, X3_train.shape[1]])\nplt.show()\n\n","sub_path":"XGBoost.py","file_name":"XGBoost.py","file_ext":"py","file_size_in_byte":4674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"624156667","text":"import io\nimport json\nimport flask\nfrom flask import Flask, jsonify, request\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torchvision import models\nfrom PIL import Image\n\nprint(flask.__version__)\nprint(torchvision.__version__)\n\n\napp = Flask(__name__)\nimagenet_class_index = json.load(open(\"/Users/zfwang/machinelearning/deeplearning/src/src_pytorch/deploy/deploy_flask/imagenet_class_index.json\"))\nmodel = models.densenet121(pretrained = True)\nmodel.eval()\n\n\n# 准备图像\ndef transform_image(image_bytes):\n my_transforms = transforms.Compose([\n transforms.Resize(255),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n image = Image.open(io.BytesIO(image_bytes))\n return my_transforms(image).unsqueeze(0)\n\n# 预测\ndef get_prediction(image_bytes):\n tensor = transform_image(image_bytes = image_bytes)\n outputs = model.forward(tensor)\n _, y_hat = outputs.max(1)\n predicted_idx = str(y_hat.item())\n return imagenet_class_index[predicted_idx]\n\n# API\n@app.route('/predict', methods=['POST'])\ndef predict():\n if request.method == 'POST':\n file = request.files['file']\n img_bytes = file.read()\n class_id, class_name = get_prediction(image_bytes = img_bytes)\n return jsonify({\n 'class_id': class_id, \n 'class_name': class_name\n })\n\n\n\nif __name__ == \"__main__\":\n app.run()","sub_path":"src/src_pytorch/deploy/deploy_flask/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"12656704","text":"import sys \nsys.path.append(\"../\")\nimport bencode\nfrom twisted.internet import reactor\nfrom twisted.internet import task\nimport socket\nimport PeerProtocol\nimport PeerFactory\nimport Peer\nfrom Client import RequestClient\n\nINTERVAL_CONNECT_PEER = 1/10\nINTERVAL_ADD_REQUEST = 1/300\nINTERVAL_SEND_REQUEST = 1/200\n# PEER_LISTEN_TCP_PORT = 6788\n# CLIENT_UDP_PORT = 56788\nimport random\nPEER_LISTEN_TCP_PORT = random.randint(6000, 7000)\nCLIENT_UDP_PORT = random.randint(50000, 57000)\n\ndef readMetafileFromFile(filename):\n return bencode.decode(open(filename, 'rb').read())\n\ndef main():\n metafile = readMetafileFromFile('../test.torrent')\n peer = Peer.Peer(PEER_LISTEN_TCP_PORT, reactor, metafile)\n reqClient = RequestClient(\n peer,\n PEER_LISTEN_TCP_PORT,\n clientIpstr = '127.0.0.1',\n # clientIpstr=socket.gethostbyname(socket.gethostname()),\n clientPort=CLIENT_UDP_PORT,\n protocol_id=1,\n info_hash=peer._getInfoHash(),\n peer_id=peer._getPeerID(),\n downloaded=0,\n left=0,\n uploaded=0,\n event=0,\n key=0,\n num_want=0)\n\n reactor.adoptDatagramPort(reqClient.portSocket.fileno(),\n socket.AF_INET, reqClient)\n\n reactor.listenTCP(PEER_LISTEN_TCP_PORT, peer.Serverfactory)\n # loopConnectPeer = task.LoopingCall(peer.tryConnectPeer)\n loopAddRequest = task.LoopingCall(peer.tryAddRequest)\n loopSendRequest = task.LoopingCall(peer.trySendRequest)\n\n reactor.callLater(INTERVAL_CONNECT_PEER, peer.tryConnectPeer)\n # loopConnectPeer.start(INTERVAL_CONNECT_PEER)\n loopAddRequest.start(INTERVAL_ADD_REQUEST)\n loopSendRequest.start(INTERVAL_SEND_REQUEST)\n\n reactor.run()\n\nif __name__ == '__main__':\n main()\n","sub_path":"Bittorrent/another/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"177485599","text":"\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom networks.blocks import ConvBatchNormRelu2D, ConvGroupNormRelu2D\n\n# classical convolutional neural network implementation\nclass CNN(nn.Module):\n\n def __init__(self, input_size, conv_channels, fc_channels, kernel_size=3, group_norm=False):\n super(CNN, self).__init__()\n\n self.input_size = input_size\n self.conv_channels = conv_channels\n self.fc_channels = fc_channels\n self.kernel_size = kernel_size\n\n self.conv_features = nn.Sequential()\n self.fc_features = nn.Sequential()\n\n # convolutional layers\n in_channels = input_size[0]\n data_size = input_size[1]\n for i, out_channels in enumerate(conv_channels):\n if group_norm:\n self.conv_features.add_module('conv%d' % (i + 1), ConvGroupNormRelu2D(in_channels, out_channels, kernel_size=kernel_size))\n else:\n self.conv_features.add_module('conv%d' % (i + 1), ConvBatchNormRelu2D(in_channels, out_channels, kernel_size=kernel_size))\n in_channels = out_channels\n data_size /= 2\n\n # full connections\n in_channels = conv_channels[-1]*data_size*data_size\n for i, out_channels in enumerate(fc_channels):\n if i==len(fc_channels)-1:\n fc = nn.Sequential(nn.Linear(int(in_channels), out_channels))\n else:\n fc = nn.Sequential(nn.Linear(int(in_channels), out_channels),\n nn.BatchNorm1d(out_channels),\n nn.ReLU())\n self.fc_features.add_module('linear%d' % (i + 1), fc)\n in_channels = out_channels\n\n def forward(self, inputs):\n\n outputs = inputs\n for i in range(len(self.conv_channels)):\n outputs = getattr(self.conv_features, 'conv%d' % (i + 1))(outputs)\n outputs = F.max_pool2d(outputs, kernel_size=2)\n\n outputs = outputs.view(outputs.size(0),-1)\n for i in range(len(self.fc_channels)):\n outputs = getattr(self.fc_features, 'linear%d' % (i + 1))(outputs)\n\n return outputs","sub_path":"networks/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"652188804","text":"import numpy as np\nimport random\n\n\nclass SphericalNodes():\n\t\n\tdef __init__(self,radius,node_count,interval):\n\t\tnodes = 0\n\t\tslope = 0\n\t\tif node_count > 100000:\n\t\t\tprint('Warning: such a high node count will likely result in a mesh too complex to converge')\n\t\t\n\t\twhile nodes 0 else []\n\n def is_valid(self, word):\n if word is None or word.isspace() or len(word) == 0:\n return False\n return True\n\n\nwordList = [\n \"ant\", \"anthology\", \"antagonist\", \"antonym\",\n \"fun\", \"function\", \"factory\",\n \"trie\", \"trigger\", \"trigonometry\", \"tripod\"\n]\n\ntrie = Trie()\n\nfor word in wordList:\n trie.insert(word)\n\nprint(trie.auto_complete('')) # Result = []\nprint(trie.auto_complete(None)) # Result = []\nprint(trie.auto_complete(' ')) # Result = []\nprint(trie.auto_complete('123')) # Result = []\nprint(trie.auto_complete('an')) # Result = ['ant', 'anthology', 'antagonist', 'antonym']\nprint(trie.auto_complete('ant')) # Result = ['ant', 'anthology', 'antagonist', 'antonym']\nprint(trie.auto_complete('anto')) # Result = ['antonym']\nprint(trie.auto_complete('trig')) # Result = ['trigger', 'trigonometry']\nprint(trie.auto_complete('fun')) # Result = ['fun', 'function']\n\n\n\n","sub_path":"Problem Vs Algorithm/Autocomplete with tries/autocomplete.py","file_name":"autocomplete.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"336940959","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 10 23:40:28 2019\n\n@author: caspe\n\"\"\"\n\nimport pandas as pd\nfrom fancyimpute import IterativeImputer\nfrom matplotlib import pyplot as plt\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import GridSearchCV, KFold, ParameterGrid, ParameterSampler\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error\nimport xgboost as xgb\nimport lightgbm as lgb\nfrom nested_cv import nested_cv\n\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\ntrain = pd.read_csv('./data/train.csv')\ntest = pd.read_csv('./data/test.csv')\n\ndef fill_ii(df):\n df_filled_ii = pd.DataFrame(IterativeImputer().fit_transform(df.values))\n df_filled_ii.columns = df.columns\n df_filled_ii.index = df.index\n\n return df_filled_ii\n\ndef data_engineering(train, test):\n train = train.drop(train.index[0])\n \n cc_data = pd.concat([train, test], sort=True)\n cc_data = cc_data.drop(['Id', 'SalePrice','Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)\n \n train[\"SalePrice\"] = np.log1p(train[\"SalePrice\"])\n y = train['SalePrice']\n \n cc_data = pd.get_dummies(cc_data, prefix_sep='_')\n \n cc_data = fill_ii(cc_data)\n \n X_train = cc_data[:train.shape[0]]\n X_test = cc_data[train.shape[0]:]\n \n return X_train,X_test,y\n\nX,X_test,y = data_engineering(train,test)\n\nmodels_to_run = [RandomForestRegressor(), xgb.XGBRegressor(), lgb.LGBMRegressor()]\nmodels_param_grid = [ \n { # 1st param grid, corresponding to RandomForestRegressor\n 'max_depth': [3, None],\n 'n_estimators': np.random.randint(100,1000,20)\n }, \n { # 2nd param grid, corresponding to XGBRegressor\n 'colsample_bytree': np.linspace(0.3, 0.5),\n 'n_estimators': np.random.randint(100,1000,20)\n },\n { # 3rd param grid, corresponding to LGBMRegressor\n 'learning_rate': [0.05],\n 'n_estimators': np.random.randint(100,1000,20),\n 'num_leaves': np.random.randint(10,30,10),\n 'reg_alpha' : (1,1.2),\n 'reg_lambda' : (1,1.2,1.4)\n }\n ]\n\n# Allocate inner arrays for each algorithm being run\nouter_score = [ [] for i in range(len(models_to_run)) ]\nbest_inner_score = [ [] for i in range(len(models_to_run)) ]\nbest_params = [ [] for i in range(len(models_to_run)) ]\n\n# Define parameters for function and run different algorithms in a loop\n# If sqrt_of_score = True, the default scoring will be RMSE\nfor i,model in enumerate(models_to_run):\n outer_score[i], best_inner_score[i], best_params[i] = nested_cv(X, y, model, models_param_grid[i], \n 5, 5, sqrt_of_score = True, do_recursive_feature_elimination=True)\n# Print the output of nested_cv function\nfor i,results in enumerate(zip(outer_score, best_inner_score, best_params)):\n print('Outer scores, inner score and best params for model {0}: \\n{1}\\n{2}\\n{3}\\n'\n .format(type(models_to_run[i]).__name__,results[0],results[1],results[2]))","sub_path":"Experimental/nestedCV_innerloop.py","file_name":"nestedCV_innerloop.py","file_ext":"py","file_size_in_byte":3319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"162973336","text":"# #[?] N명의 점수 중에서 80점 이상인 점수의 합계\n\n# # 합계 알고리즘(Sum Algorithm): 주어진 범위에 주어진 조건에 해당하는 자료들의 합계\n\n# # [1] Input : n명의 점수\n\n# In[2]:\n\n\nscores = [ 100, 75, 50, 37, 90, 95 ]\n\n\n# In[17]:\n\n\nsum = 0 # 합계가 저장될 그릇\n\n\n# In[18]:\n\n\nN = len(scores) # 의사코드(슈도코드)\n\n\n# # [2] Process : 합계 알고리즘 영역 : 주어진 범위에 주어진 조건(필터링)\n\n# In[19]:\n\n\nfor i in range(0,N): # 주어진 범위\n if scores[i] >= 80: # 주어진 조건\n sum = sum + scores[i] # 처리\n\n\n# # [3] Output\n\n# In[20]:\n\n\nprint(f\"{N}명의 점수 중 80점 이상의 총점: {sum}\") # 결과 출력\n\n# [!] 디버거 사용하여 디버깅 사용하기 : F9 -> F5 -> F11 -> F5\n","sub_path":"Python Algorithm Learning/SumAlgorithm/SumAlgorithm.py","file_name":"SumAlgorithm.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"123063397","text":"from typing import List\n\nfrom admin import admin_site\nfrom sources import models\nfrom sources.admin.filters import AttributeeFilter\nfrom sources.admin.source_admins.source_admin import SourceAdmin, SourceForm\n\n\nclass TextualSourceForm(SourceForm):\n \"\"\"Form for adding/updating books.\"\"\"\n\n model = models.TextualSource\n\n class Meta:\n model = models.TextualSource\n exclude = []\n\n\nclass TextualSourceAdmin(SourceAdmin):\n \"\"\"Admin for textual sources.\"\"\"\n\n form = TextualSourceForm\n list_display = ['pk', 'html', 'detail_link', 'date_string']\n list_filter = ['verified', AttributeeFilter]\n\n def get_fields(self, request, model_instance=None):\n \"\"\"Return reordered fields to be displayed in the admin.\"\"\"\n fields: List = list(super().get_fields(request, model_instance))\n # Fields to display at the top, in order\n top_fields = ('full_string', 'creators', 'title')\n # Fields to display at the bottom, in order\n bottom_fields = (\n 'volume',\n 'number',\n 'page_number',\n 'end_page_number',\n 'container',\n 'description',\n 'citations',\n )\n index: int = 0\n for top_field in top_fields:\n if top_field in fields:\n fields.remove(top_field)\n fields.insert(index, top_field)\n index += 1\n for bottom_field in bottom_fields:\n if bottom_field in fields:\n fields.remove(bottom_field)\n fields.append(bottom_field)\n return fields\n\n\nadmin_site.register(models.Piece, TextualSourceAdmin)\n","sub_path":"sources/admin/source_admins/textual_source_admin.py","file_name":"textual_source_admin.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"590937717","text":"#N students take k apples and distribute them among each other evenly. The remaining (the undicisible ) part remains in\n#the basket. How many apples will each student get ? How many apples will remain in the basket ? The program reads the\n#number N and K. It should print the two answers for the question above.\n\nno_students=int(input(\"enter the number of students: \"))\nno_apples=int(input(\"enter the number of apples: \"))\n\nD = no_apples // no_students\nB = no_apples % no_students\n\nprint(f\"each student get {D} apples\")\nprint(f\"the remaining apples are {B}\")\n","sub_path":"venv/addition/apples.py","file_name":"apples.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"2012401","text":"from __future__ import division\r\nimport numpy as np\r\nimport random\r\nimport math\r\n\r\nmonopoly_board = [\"Go\", \"brown_1\", \"cc_1\", \"brown_2\", \"income_tax\", \"station_1\", \"light_blue_1\", \"chance_1\", \"light_blue_2\", \"light_blue_3\", \"prison_field\",\r\n\"pink_1\", \"electric_c\", \"pink_2\", \"pink_3\", \"station_2\", \"orange_1\", \"cc_2\", \"orange_2\", \"orange_3\", \"free_parking\",\r\n\"red_1\", \"chance_2\", \"red_2\", \"red_3\", \"station_3\", \"yellow_1\", \"yellow_2\", \"water_works\", \"yellow_3\", \"go_to_jail\",\r\n\"green_1\", \"green_2\", \"cc_3\", \"green_3\", \"station_4\", \"chance_3\", \"dark_blue_1\", \"luxury_tax\", \"dark_blue_2\"]\r\nchance_card_stack = [0, 24, 11, 999, 10, 5, 15, 39, 1, 777, 777, 777, 777, 777, 777, 777]\r\ncc_card_stack = [0, 777, 777, 777, 10, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777]\r\n\r\ndef advance_to_next_station(pos):\r\n if (pos < 5 or pos > 35):\r\n return 5\r\n elif pos < 15:\r\n return 15\r\n elif pos < 25:\r\n return 25\r\n elif pos < 35:\r\n return 35\r\n\r\n\r\ndef draw_card(special_field, pos):\r\n if \"cc\" in special_field:\r\n print(\" - community chest card has been drawn - \")\r\n num1 = np.random.randint(0,len(cc_card_stack))\r\n #print(\"random number out of stack: \" + str(num1))\r\n advance_to = cc_card_stack[num1]\r\n if advance_to == 777:\r\n advance_to = pos\r\n print(\" - no need to move - \")\r\n return 0\r\n else:\r\n print(\"- move forward \" + str(advance_to-pos) + \" steps - \")\r\n return advance_to - pos\r\n elif \"chance\" in special_field:\r\n print(\" - chance card has been drawn - \")\r\n num2 = np.random.randint(0,len(chance_card_stack))\r\n advance_to = chance_card_stack[num2]\r\n if advance_to == 12: # nearest utility\r\n print(\"Advance to the next utility!\")\r\n if pos <= 12:\r\n return 12 - pos\r\n else:\r\n return 28 - pos\r\n elif advance_to == 15: # nearest station\r\n print(\"Advance to the next station!\")\r\n advance_to = advance_to_next_station(pos)\r\n return (advance_to - pos)%40\r\n elif advance_to == 999: # go back 3 spaces\r\n advance_to = pos-3\r\n print(\"Go back 3 spaces!\")\r\n return - 3\r\n elif advance_to == 777: # no need to move\r\n advance_to = pos\r\n print(\" - no need to move - \")\r\n return 0 # don't move anywhere\r\n else: # go straight to position XYZ\r\n print(\"Advance to \" + monopoly_board[advance_to] + \" !\")\r\n return (advance_to - pos)%40\r\n\r\n\r\nclass card:\r\n def __init__(self, card_type, pos):\r\n self.card_type = card_type\r\n self.position = pos\r\n self.step_change = draw_card(card_type, pos)\r\n self.advance_to = (self.step_change + pos)%40\r\n","sub_path":"code/cards.py","file_name":"cards.py","file_ext":"py","file_size_in_byte":2836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"632032183","text":"\"\"\"Directory management utilities in Python.\n\n @author Vaishak K Nair (19MCMI08)\n\n\n\"\"\"\n\n\nimport os\nimport shutil\n\ndef processChoice():\n \"\"\"Process the outermost choice list.\n \"\"\"\n if choice == 1:\n createDir()\n elif choice == 2:\n modifyDir()\n elif choice == 3:\n navIntoDir()\n elif choice == 4:\n listDirs()\n else:\n print(\"Invalid choice\")\n\ndef createDir():\n \"\"\"Read a directory name from user and create it.\n \"\"\"\n print(\"Enter name for directory:\", end=\" \")\n name = input()\n try:\n os.mkdir(name)\n except FileExistsError:\n print(f\"Directory '{name}' already exists!\")\n else:\n print(f\"Created directory named '{name}'\")\n\ndef modifyDir():\n \"\"\"Display directory modification options.\n\n Read user choice and call appropriate functions.\n \"\"\"\n name = input(\"Enter the directory to be modified: \")\n while(True):\n \n choice = int(input(\"\"\"1) Rename directory.\n2) Copy directory to another.\n3) Move directory.\n4) Delete directory.\n5) Exit from modify mode.\\n\"\"\"))\n if(choice == 1):\n renameDir(name)\n elif(choice == 2):\n copyDir(name)\n elif(choice == 3):\n moveDir(name)\n elif(choice == 4):\n deleteDir(name)\n else:\n break\n\ndef renameDir(name):\n \"\"\"Rename the specified directory to a name given by user.\n \"\"\"\n newName = input(\"Enter new name for directory: \")\n try:\n os.rename(name, newName)\n except OSError as er:\n print(f\"Couldn't rename {name} to {newName}. Reason: {er.strerror}\")\n else:\n print(f\"Directory '{name}' successfully renamed to '{newName}'.\")\n\ndef copyDir(name):\n \"\"\"Copy the contents of a directory into the specified directory.\n \"\"\"\n targetDir = input(\"Enter target directory: \")\n try:\n shutil.copytree(name, targetDir)\n except FileExistsError as error:\n print(f\"Directory '{name}' couldn't be copied to '{targetDir}'. Reason: {error.strerror}\")\n except FileNotFoundError as error:\n print(f\"Directory '{name}' couldn't be copied to '{targetDir}'. Reason: {error.strerror}\")\n else:\n print(f\"Directory '{name}' copied to '{targetDir}'\")\n \ndef moveDir(name):\n \"\"\"Move the specified directory into the directory given by the user.\n \"\"\"\n targetName = input(\"Enter target directory: \")\n shutil.move(name, targetName)\n print(f\"Moved '{name}' to '{targetName}'\")\n\n\ndef deleteDir(name):\n \"\"\"Delete the specified directory and its contents.\n \"\"\"\n try:\n shutil.rmtree(name)\n except Error as error:\n print(f\"Couldn't delete '{name}' Reason: {error}\")\n else:\n print(f\"Deleted directory '{name}'\")\n\ndef navIntoDir():\n \"\"\"Navigate into the directory specified by the user.\n \"\"\"\n while(True):\n choice = int(input(\"\"\"1) Go to Parent Directory.\n2) Navigate to specific directory.\n3) Exit from Navigate Mode.\\n\"\"\"))\n if(choice == 1):\n os.chdir(os.pardir)\n elif(choice == 2):\n targetDir = input(\"Enter target path: \")\n try:\n os.chdir(targetDir)\n except OSError as error:\n print(f\"Couldn't change directory to '{targetDir}'. Reason: {error.strerror}\")\n else:\n print(f\"Current working directory changed to '{targetDir}'\")\n else:\n break\n \ndef listDirs():\n \"\"\"List the contents of a directory.\n\n Long listing is also available.\n \"\"\"\n while(True):\n choice = int(input(\"\"\"Enter your choice for method of listing :\n1) List of directories.\n2) List of directories and their details.\n3) Exit from List Mode.\\n\"\"\"))\n if(choice == 1):\n for fileName in os.listdir():\n print(fileName)\n elif(choice == 2):\n for dirEntry in os.scandir():\n stat = dirEntry.stat()\n print(\"d\" if dirEntry.is_dir() else \"-\", stat.st_mode, stat.st_uid, stat.st_gid,\n stat.st_size, stat.st_atime, dirEntry.name)\n else:\n break\n\nif __name__ == \"__main__\":\n print(\"-------------Implementing Directory Management------------\")\n while(True):\n print(\"Press the following to :\")\n print(\"1) Create a new directory.\")\n print(\"2) Modify a directory.\")\n print(\"3) Navigate into directory.\")\n print(\"4) Listing directories.\")\n print(\"5) Exit.\")\n choice = int(input())\n if(choice == 5):\n break\n processChoice()\n\n\n\n\n","sub_path":"A3E2/DirectoryManagement.py","file_name":"DirectoryManagement.py","file_ext":"py","file_size_in_byte":4561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"591129567","text":"#------------------------------------------#\r\n# Title: CDInventory.py (Assignment_07)\r\n# Desc: CD Inventory Program.\r\n# Change Log: \r\n# 2/20/21, Eric Hoyle, consolidated code into functions under classes\r\n# updated main to incorporate new class and function calls. \r\n# 2/27/21, Eric Hoyle, Incorporated exception handling and data serialization.\r\n# Further consolidation of main into functions. Minor corrections\r\n# function arguments.\r\n# DBiesinger, 2030-Jan-01, Created File\r\n#------------------------------------------#\r\nimport pickle\r\n# -- DATA -- #\r\nstrChoice = '' # User input\r\nlstTbl = [] # list of lists to hold data\r\ndicRow = {} # dictionary of data row\r\ndatFileName = 'CDInventory.dat' # data storage file\r\nobjFile = None # file object\r\ncdData = None\r\n\r\n# -- PROCESSING -- #\r\nclass DataProcessor:\r\n \"\"\"Processing data supplied by the user\"\"\"\r\n \r\n @staticmethod \r\n def table_append(cdData, table):\r\n \"\"\"Appends new cd entry as dictionary to a list of dictionaries\r\n \r\n Args: \r\n cdData: Information aboout the CD \r\n table: The list that the new dictionary is appended to\r\n \r\n Returns: \r\n List of dictionaries with new dictionary appended to the list\r\n \r\n \"\"\"\r\n dicRow = {'ID': cdData[0], 'Title': cdData[1], 'Artist': cdData[2]}\r\n table.append(dicRow)\r\n return table\r\n\r\n @staticmethod\r\n \r\n def cd_remove(intIDDel, table):\r\n \"\"\"Removes the dictionary with ID key value specified by user in \r\n intIDDel from the list of dictionaries.\r\n \r\n Args:\r\n intIDDel (int): value for key ID specified by the user\r\n table: list of dictionaries the entry is removed from.\r\n \r\n Returns:\r\n Boolean value of cd removal status.\r\n \"\"\"\r\n intRowNr = -1\r\n blnCDRemoved = False\r\n # for row in lstTbl:\r\n for row in table:\r\n intRowNr += 1\r\n if row['ID'] == intIDDel:\r\n # del lstTbl[intRowNr]\r\n del table[intRowNr]\r\n blnCDRemoved = True\r\n break\r\n return blnCDRemoved\r\n \r\n \r\nclass FileProcessor:\r\n \"\"\"Processing the data to and from text file\"\"\"\r\n\r\n @staticmethod\r\n def read_file(file_name, table):\r\n \"\"\"Function to manage data ingestion from file to a list of \r\n dictionaries\r\n\r\n Reads the data from file identified by file_name into a 2D table\r\n (list of dicts) table one line in the file represents one dictionary \r\n row in table.\r\n\r\n Args:\r\n file_name (string): name of file used to read the data from\r\n table (list of dict): 2D data structure (list of dicts) that holds\r\n the data during runtime\r\n\r\n Returns:\r\n None.\r\n \"\"\"\r\n try: \r\n table.clear() # this clears existing data and allows to load data from file\r\n with open(file_name, 'rb') as dataread:\r\n picdata = pickle.load(dataread)\r\n for line in picdata:\r\n table.append(line)\r\n except FileNotFoundError as e:\r\n print('\\n{:*^66}'.format((e.__doc__).upper()),\r\n '\\n{:^66}'.format(' WARNING: Data not loaded').upper())\r\n\r\n @staticmethod\r\n def write_file(file_name, table):\r\n \"\"\"Function to manage data output from a list of dictionaries to a file \r\n\r\n Writes the data from a 2D table (lstTbl) to a file identified by \r\n file_name, one dictionary per row.\r\n \r\n Args:\r\n file_name (string): name of file used to read the data from\r\n table (list of dict): 2D data structure (lstTbl) that holds the \r\n data during runtime\r\n\r\n Returns:\r\n None.\r\n \"\"\"\r\n try:\r\n with open(file_name, 'wb') as datastore:\r\n pickle.dump(table, datastore)\r\n except FileNotFoundError as e:\r\n print('\\n{:*^66}'.format((e.__doc__).upper()),\r\n '\\n{:^66}'.format(' WARNING: Data not saved').upper())\r\n\r\n# -- PRESENTATION (Input/Output) -- #\r\n\r\nclass IO:\r\n \"\"\"Handling Input / Output\"\"\"\r\n @staticmethod\r\n def add_cd():\r\n \"\"\"Ask user for new ID, CD Title and Artist\r\n \r\n Args: \r\n None.\r\n \r\n Returns:\r\n list of information (ID, Title, and Artist) for a new CD entry\"\"\"\r\n \r\n print('Please enter info for the CD you would like to add:\\n')\r\n strID = ''\r\n n=3\r\n while strID == '':\r\n \r\n try:\r\n strID = int(input('Enter ID: ').strip())\r\n except ValueError:\r\n print('\\n* Don\\'t be a dummy! ID must be a number. Please try again *\\n'.upper())\r\n n-=1\r\n if n==0:\r\n input('You seem to be pretty dense. Let\\'s get you back to the main menu.')\r\n break\r\n continue\r\n strTitle = input('What is the CD\\'s title? ').strip()\r\n strArtist = input('What is the Artist\\'s name? ').strip()\r\n cdData =[strID, strTitle, strArtist]\r\n return cdData\r\n \r\n \r\n @staticmethod\r\n def print_menu():\r\n \"\"\"Displays a menu of choices to the user\r\n\r\n Args:\r\n None.\r\n\r\n Returns:\r\n None.\r\n \"\"\"\r\n\r\n print('\\n\\n')\r\n print('{:-^66}'.format(' Menu '),'\\n{:<}'.format('[l] Load Inventory from'),datFileName,\r\n '\\n{:<30}'.format('[a] Add CD'),'\\n{:<30}'.format('[i] Display Current Inventory'),\r\n '\\n{:<30}'.format('[d] Delete CD from Inventory'),\r\n '\\n{:<}'.format('[s] Save Inventory to'),datFileName,\r\n '\\n{:<30}'.format('[x] Exit'),\r\n '\\n{:-^66}'.format('-'))\r\n\r\n @staticmethod\r\n def menu_choice():\r\n \"\"\"Gets user input for menu selection\r\n\r\n Args:\r\n None.\r\n\r\n Returns:\r\n choice (string): a lower case sting of the users input out of the choices l, a, i, d, s or x\r\n\r\n \"\"\"\r\n choice = ' '\r\n while choice not in ['l', 'a', 'i', 'd', 's', 'x']:\r\n choice = input('Which operation would you like to perform? [l, a, i, d, s or x]: ').lower().strip()\r\n print() \r\n return choice\r\n\r\n @staticmethod\r\n def show_inventory(table):\r\n \"\"\"Displays current inventory table\r\n\r\n\r\n Args:\r\n table (list of dict): 2D data structure (list of dicts) that holds the data during runtime.\r\n\r\n Returns:\r\n None.\r\n\r\n \"\"\"\r\n print('\\n\\n')\r\n print('{:=^66}'.format(' The Current Inventory '))\r\n print('{:<6}{:30}{:30}'.format('ID','Title','Artist'))\r\n print('{:-^66}'.format('-'))\r\n for row in table:\r\n print('{:<6}{:30}{:30}'.format(*row.values()))\r\n print('{:=^66}'.format('='))\r\n \r\n @staticmethod\r\n def cd_removed_conf(removed):\r\n \"\"\" uses the status (True/False) of the boolean flag in the cd_remove\r\n function to return a printed statement of the status to the user.\r\n \r\n Args:\r\n None\r\n \r\n Returns:\r\n None\r\n \"\"\"\r\n print()\r\n if removed:\r\n print('The CD was removed\\n')\r\n else:\r\n print('Could not find this CD!\\n')\r\n\r\n @staticmethod\r\n def del_choice():\r\n n=3\r\n while n > 0:\r\n try:\r\n delID = int(input('Which ID would you like to delete? ').strip())\r\n except ValueError:\r\n print('\\n* Don\\'t be a dummy! ID must be a number. Please try again *\\n'.upper())\r\n n-=1\r\n if n==0:\r\n input('You seem to be pretty dense. Let\\'s get you back to the main menu.')\r\n break\r\n continue\r\n else:\r\n return delID\r\n\r\n# 1. When program starts, read in the currently saved Inventory\r\nFileProcessor.read_file(datFileName, lstTbl)\r\n\r\n# 2. start main loop\r\nwhile True:\r\n # 2.1 Display Menu to user and get choice\r\n IO.print_menu()\r\n strChoice = IO.menu_choice()\r\n\r\n # 3. Process menu selection\r\n try:\r\n if strChoice == 'x':# 3.1 process exit first\r\n break\r\n # 3.2 process load inventory\r\n if strChoice == 'l':\r\n print('WARNING: If you continue, all unsaved data will be lost and the Inventory re-loaded from file.')\r\n strYesNo = input('Type \\'yes\\' to continue and reload from {}. \\nPress any key to cancel: '.format(datFileName))\r\n if strYesNo.lower() == 'yes':\r\n print('reloading...')\r\n FileProcessor.read_file(datFileName, lstTbl)\r\n IO.show_inventory(lstTbl)\r\n else:\r\n input('canceling... Inventory data NOT reloaded. Press [ENTER] to continue to the menu.')\r\n IO.show_inventory(lstTbl)\r\n continue\r\n # 3.3 process add a CD\r\n elif strChoice == 'a':\r\n cdData = IO.add_cd()\r\n lstTbl = DataProcessor.table_append(cdData, lstTbl)\r\n IO.show_inventory(lstTbl)\r\n continue \r\n # 3.4 process display current inventory\r\n elif strChoice == 'i':\r\n IO.show_inventory(lstTbl)\r\n continue\r\n # 3.5 process delete a CD\r\n elif strChoice == 'd':\r\n # 3.5.1 get Userinput for which CD to delete\r\n # 3.5.1.1 display Inventory to user\r\n IO.show_inventory(lstTbl)\r\n # 3.5.1.2 ask user which ID to remove\r\n intIDDel = IO.del_choice()\r\n # 3.5.2 search thru table and delete CD\r\n removed = DataProcessor.cd_remove(intIDDel, lstTbl)\r\n IO.cd_removed_conf(removed)\r\n IO.show_inventory(lstTbl)\r\n continue\r\n # 3.6 process save inventory to file\r\n elif strChoice == 's':\r\n # 3.6.1 Display current inventory and ask user for confirmation to save\r\n IO.show_inventory(lstTbl)\r\n strYesNo = input('Save this inventory to file? [y/n] ').strip().lower()\r\n # 3.6.2 Process choice\r\n if strYesNo == 'y':\r\n # 3.6.2.1 save data\r\n FileProcessor.write_file(datFileName, lstTbl)\r\n else:\r\n input('The inventory was NOT saved to file. Press [ENTER] to return to the menu.')\r\n continue\r\n # 3.7 catch-all should not be possible, as user choice gets vetted in IO, but to be save:\r\n else:\r\n print('General Error')\r\n except Exception as e: #Exception for exceptions the propogate beyong function level\r\n print(e.__doc__)\r\n\r\n\r\n\r\n","sub_path":"CDInventory.py","file_name":"CDInventory.py","file_ext":"py","file_size_in_byte":10890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"346793334","text":"import os\nimport sys\nsys.path.append('..')\nsys.path.append('../..')\nimport argparse\nimport utils\nfrom student_utils_sp18 import *\nfrom dijkstra import single_source_dijkstra_path_length\nfrom min_dominating_set import min_weighted_dominating_set\n\n\n\n###### Helper Functions ######\n\ndef graph_creator(adjacency_matrix, number_of_kingdoms):\n\tedge_list = []\n\tfor i in range(number_of_kingdoms):\n\t\tfor j in range(i):\n\t\t\tweight = adjacency_matrix[i][j]\n\t\t\tif weight == \"x\":\n\t\t\t\tcontinue\n\t\t\tedge_list.append((i,j, weight))\n\tG = nx.Graph()\n\tnodelist = range(number_of_kingdoms)\n\tG.add_weighted_edges_from(edge_list, nodelist=nodelist)\n\treturn G\n\n\n\n##### Graph Solve Object Class #######\nclass GraphSolver:\n\n\tdef __init__(self, input_file):\n\t\tinput_data = utils.read_file(input_file)\n\t\tself.number_of_kingdoms, self.list_of_kingdom_names, self.starting_kingdom, self.adjacency_matrix = data_parser(input_data)\n\t\t\n\n\t\tself.source_index = self.list_of_kingdom_names.index(self.starting_kingdom)\n\t\t\n\t\tself.G = Graph(self.adjacency_matrix, self.number_of_kingdoms, self.source_index)\n\t\t\n\t\tself.dijk = self.G.get_dijkstra()\n\n\t\tself.kingdom_dict = dict()\n\t\tself.c_n = []\n\n\t\tfor i in range(self.number_of_kingdoms):\n\t\t\tself.c_n.append(self.adjacency_matrix[i][i])\n\t\t\tself.kingdom_dict[i] = self.list_of_kingdom_names[i]\n\n\t\tself.unconq = set(range(self.number_of_kingdoms))\n\n\t\tself.dominating_set = self.get_dominating_set()\n\t\tprint(self.dominating_set)\n\n\tdef get_conquer_cost(self, node_index):\n\n\t\treturn self.adjacency_matrix[node_index][node_index]\n\n\tdef get_total_neighbor_cost(self, node_index):\n\t\ttotal = 0\n\t\tfor neighbor in self.G.get_neighbors(node_index):\n\t\t\tif neighbor == self.source_index:\n\t\t\t\tcontinue\n\t\t\ttotal += self.get_conquer_cost(neighbor)\n\n\t\treturn total \n\n\tdef get_dom_weight(self):\n\t\tlst = []\n\t\tfor i in range(self.number_of_kingdoms):\n\t\t\tlst.append(self.get_conquer_cost(i))\n\t\treturn lst\n\n\tdef get_dominating_set(self):\n\t\treturn min_weighted_dominating_set(self.G.graph, self.get_dom_weight())\n\n\n\n####### Graph Object Class ####\nclass Graph:\n\tdef __init__(self, adjacency_matrix, number_of_kingdoms, source_index):\n\t\tself.graph = graph_creator(adjacency_matrix, number_of_kingdoms)\n\t\tself.source = source_index\n\n\n\tdef get_dijkstra(self):\n\t\treturn single_source_dijkstra_path_length(self.graph, self.source)\n\n\tdef get_neighbors(self, node):\n\t\treturn self.graph.neighbors(node)\n\n\tdef get_neighbors_levels(self, node, level):\n\t\tneighbors = set()\n\t\tcurr_level = []\n\t\twhile level > 0:\n\t\t\tcurr_level = []\n\n\tdef get_dominating_set():\n\t\treturn 0\n\nsolver = GraphSolver(\"small_test.in\")\n\n\n\n\n","sub_path":"solver_draft.py","file_name":"solver_draft.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"627458465","text":"from flask import Flask, render_template, request, jsonify\nimport sqlite3\n\napp = Flask(__name__)\n\nconnection = sqlite3.connect('database.db')\nprint('Opened database Successfully')\n\nconnection.execute('CREATE TABLE IF NOT EXISTS posts (title TEXT, post TEXT)')\n\nprint('Table created Successfully')\nconnection.close()\n\n@app.route('/')\ndef route():\n\treturn render_template('home.html')\n\n@app.route('/addnew')\ndef addnew():\n\treturn render_template('newmovie.html')\n\n\n@app.route('/addmovie', methods=['POST'])\ndef addmovie():\n\tconnection = sqlite3.connect('database.db')\n\tcursor = connection.cursor()\n\tprint('hi')\n\ttry:\n\t\tname = request.form['name']\n\t\tyear = request.form['year']\n\t\tgenre = request.form['genre']\n\t\tcursor.execute('INSERT INTO movies (name, year, genre) VALUES (?,?,?)', (name, year, genre))\n\t\tconnection.commit()\n\t\tmessage = 'Record succesfully added'\n\texcept:\n\t\tconnection.rollback()\n\t\tmessage = 'error in insert operation'\n\tfinally: \n\t\treturn render_template('result.html', message = message)\n\t\tconnection.close()\n\t\t\n\n\t\t\n\n\n@app.route('/movies')\ndef movies():\n\tconnection = sqlite3.connect('database.db')\n\tcursor = connection.cursor()\n\tcursor.execute('SELECT * FROM movies')\n\tmovie_list = cursor.fetchall()\n\tconnection.close()\n\treturn jsonify(movie_list)\n\napp.run(debug = True)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"3806708","text":"# dic='{\"name\":\"alex\"}'\n# f=open(\"hello\",\"w\")\n# f.write(dic)\n\n# f_read=open(\"hello\",\"r\")\n# data=f_read.read()\n# print(type(data))\n# data=eval(data)\n# print(data[\"name\"])\n\n# import json\n#\n#\n# dic={'name':'alex'}#---->{\"name\":\"alex\"}----->'{\"name\":\"alex\"}'\n# i=8 #---->'8'\n# s='hello' #---->\"hello\"------>'\"hello\"'\n# l=[11,22] #---->\"[11,22]\"\n#\n# f=open(\"new_hello\",\"w\")\n\n# dic_str=json.dumps(dic)\n# f.write(dic_str) #json.dump(dic,f)\n\n\n\n# f_read=open(\"new_hello\",\"r\")\n# data=json.loads(f_read.read()) # data=json.load(f)\n\n#\n# print(data[\"name\"])\n# print(data)\n# print(type(data))\n\n# print(s)\n# print(type(s))\n\n\n# data=json.dumps(dic)\n#\n# print(data) #{\"name\": \"alex\"}\n# print(type(data))\n\n\n#注意:\n# import json\n#\n# with open(\"Json_test\",\"r\") as f:\n# data=f.read()\n# data=json.loads(data)\n# print(data[\"name\"])\n\n#----------------------pickle-------\nimport pickle\n\ndic = {'name': 'alvin', 'age': 23, 'sex': 'male'}\n\nprint(type(dic)) # \n\n# j = pickle.dumps(dic)\n# print(type(j)) # \n#\n# f = open('序列化对象_pickle', 'wb') # 注意是w是写入str,wb是写入bytes,j是'bytes'\n# f.write(j) # -------------------等价于pickle.dump(dic,f)\n#\n# f.close()\n# # -------------------------反序列化\nimport pickle\n\nf = open('序列化对象_pickle', 'rb')\n\ndata = pickle.loads(f.read()) # 等价于data=pickle.load(f)\n\nprint(data['age'])\n# # -------------------------shelve模块---------\nimport shelve\n\nf = shelve.open(r'shelve1') # 目的:将一个字典放入文本 f={}\n#\n# f['stu1_info']={'name':'alex','age':'18'}\n# f['stu2_info']={'name':'alvin','age':'20'}\n# f['school_info']={'website':'oldboyedu.com','city':'beijing'}\n# f.close()\n\nprint(f.get('stu1_info')['age'])\n\n\n\n# dic={}\n#\n# dic[\"name\"]=\"alvin\"\n# dic[\"info\"]={\"name\":\"alex\"}\n\n","sub_path":"Python/day22-os,json,re等模块/json&pickle.py","file_name":"json&pickle.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"91257055","text":"import os\nimport mysql.connector\nfrom flask import Blueprint\nfrom flask import request\nfrom flask import jsonify\nfrom flask import abort\n\nfrom models.emergency import add_emergency\n\nMIN_LUX = 401\nMAX_LUX = 1000\nMAX_VOC = 0.7\nMIN_DEGREE = 15\nMAX_DEGREE = 30\nMAX_HUMIDITY = 60\n\nenv_data_blueprint = Blueprint('env_data', __name__)\n\n@env_data_blueprint.route(\"/add\", methods=[\"POST\"]) #Add a new record\ndef add():\n fields = [\"room_id\", \"lux\", \"voc\", \"degree\", \"humidity\"]\n data = request.json\n\n for field in fields:\n if (not field in data):\n return abort(400)\n\n if (add_env_data(data[\"room_id\"], data[\"lux\"], data[\"voc\"], data[\"degree\"], data[\"humidity\"])):\n return jsonify({\"message\" : \"OK\"})\n else:\n return abort(500)\n\n@env_data_blueprint.route(\"/\")\ndef get_latest():\n result = get_latest_env_data()\n if(result is not None):\n return jsonify(result)\n else:\n return abort(500)\n\n@env_data_blueprint.route(\"/series/\", methods=[\"GET\"])\ndef get_series():\n room_id = request.args.get(\"room_id\", default=None, type=int)\n field = request.args.get(\"field\", default=None, type=str)\n start = request.args.get(\"start\", default=None, type=str)\n end = request.args.get(\"end\", default=None, type=str)\n\n if (room_id is not None) and (field is not None):\n result = get_env_data_series(room_id, field, start, end)\n if(result is not None):\n return jsonify({\"values\" : result})\n else:\n return abort(500)\n else:\n return abort(400)\n\n\n\ndef add_env_data(room_id: int, lux: int, voc: float, degree: float, humidity: int) -> bool:\n try:\n database = mysql.connector.connect(user = os.getenv(\"DATABASE_USER\"), database = os.getenv(\"DATABASE_NAME\"), password = os.getenv(\"DATABASE_PASSWORD\"))\n cursor = database.cursor()\n\n values = (lux, voc, degree, humidity, room_id)\n sql = (\"\"\"INSERT INTO environmental_data (tmstp, lux, voc, degree, humidity, room_id) VALUES (NOW(), %d, %0.1f, %d, %d, %d)\"\"\" % values)\n cursor.execute(sql)\n database.commit()\n id = cursor.lastrowid\n database.close()\n\n emergency_flag = False\n emergency_string = []\n if ((lux < MIN_LUX) and (lux > 0)):\n emergency_flag = True\n emergency_string.append(\"lux-\")\n if (lux > MAX_LUX):\n emergency_flag = True\n emergency_string.append(\"lux+\")\n if (voc > MAX_VOC):\n emergency_flag = True\n emergency_string.append(\"voc+\")\n if (humidity > MAX_HUMIDITY):\n emergency_flag = True\n emergency_string.append(\"humidity+\")\n if ((degree < MIN_DEGREE) and (degree > 0)):\n emergency_flag = True\n emergency_string.append(\"degree-\")\n if (degree > MAX_DEGREE):\n emergency_flag = True\n emergency_string.append(\"degree+\")\n\n if (emergency_flag):\n tags = ';'.join(emergency_string)\n return add_emergency(0, 0, tags, id, None, None)\n else:\n return True\n except Exception as e:\n print(e)\n if (database.is_connected()):\n database.close()\n return False\n\ndef get_latest_env_data() -> list:\n try:\n database = mysql.connector.connect(user = os.getenv(\"DATABASE_USER\"), database = os.getenv(\"DATABASE_NAME\"), password = os.getenv(\"DATABASE_PASSWORD\"))\n cursor = database.cursor()\n\n cursor.execute(\"SELECT * FROM latest_env_data\")\n columns = [column[0] for column in cursor.description]\n data = []\n for row in cursor.fetchall():\n data.append(dict(zip(columns, row)))\n\n room_list = []\n for element in data:\n env_data = {'id' : element['id'], 'tmstp' : element['tmstp'], 'lux' : element['lux'], 'voc' : element['voc'], 'degree' : element['degree'], 'humidity' : element['humidity']}\n room = {'id' : element['room_id'], 'name' : element['name_room'], 'env_data' : env_data}\n room_list.append(room)\n\n database.close()\n return room_list\n except Exception as e:\n print(e)\n if (database.is_connected()):\n database.close()\n return None\n\ndef get_env_data_series(room_id: int, field: str, start: str, end: str) -> list:\n try:\n database = mysql.connector.connect(user = os.getenv(\"DATABASE_USER\"), database = os.getenv(\"DATABASE_NAME\"), password = os.getenv(\"DATABASE_PASSWORD\"))\n cursor = database.cursor()\n\n if (start != None and end != None):\n sql = \"\"\"SELECT %s, tmstp FROM pepperiot.environmental_data WHERE room_id = %d AND tmstp BETWEEN \"%s\" AND \"%s\";\"\"\" % (field, room_id, start, end)\n else:\n sql = \"\"\"SELECT %s, tmstp FROM pepperiot.environmental_data WHERE room_id = %d AND tmstp > DATE_SUB(NOW(), INTERVAL 2 MONTH) AND tmstp <= NOW();\"\"\" % (field, room_id)\n\n cursor.execute(sql)\n columns = [column[0] for column in cursor.description]\n data = []\n for row in cursor.fetchall():\n data.append(dict(zip(columns, row)))\n\n series = []\n for element in data:\n tmstp = element[\"tmstp\"]\n hour = (\"%s:%s\" % (tmstp.hour, tmstp.minute))\n series.append({\"hour\": hour, \"value\": element[field]})\n\n database.close()\n return series\n except Exception as e:\n print(e)\n if (database.is_connected()):\n database.close()\n return None","sub_path":"server/models/env_data.py","file_name":"env_data.py","file_ext":"py","file_size_in_byte":5519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"598213763","text":"import requests\nfrom lxml import etree\nimport csv\nimport time\nimport sql\n# 首页url\nhome_page_url = 'http://www.socom.cn'\n# 详情页url\ndetail_url = 'http://www.socom.cn/company/16001195.html'\ndef get_html(url):\n try:\n resp = requests.get(url)\n if resp.status_code == 200:\n return resp\n else:\n return None\n except TimeoutError:\n get_html(url) \n\n\n \n except ConnectionError:\n get_html(url)\n\n# 获取到城市列表\ndef parse_home_page(home_page_url):\n # /html/body/div[5]/div[2]/a[1]\n citys_url_list = []\n resp = get_html(home_page_url)\n if resp:\n html = resp.text\n root = etree.HTML(html)\n num = len(root.xpath('//body/div[@class=\"contentBox\"][4]/div[@class=\"provinceBox\"]'))\n for i in range(1, num + 1):\n province = root.xpath('//body/div[@class=\"contentBox\"][4]/div[@class=\"provinceBox\"][{}]/a/text()'.format(i))[0]\n # [-452:-4]\n citys = root.xpath('//body/div[@class=\"contentBox\"][4]/div[@class=\"cityBox\"][{}]/a/text()'.format(i))\n citys_url = root.xpath('//body/div[@class=\"contentBox\"][4]/div[@class=\"cityBox\"][{}]/a/@href'.format(i))\n # print(citys)\n citys = []\n for url in citys_url:\n citys.append(home_page_url + url)\n citys_url_list.append(citys)\n return citys_url_list\n\n# 判断地址是不是最终地址(省 -> 地级市 -> 县级市)\ndef city_is_end(city_url):\n resp = get_html(city_url)\n if resp:\n html = resp.text\n root = etree.HTML(html)\n province = len(root.xpath('//*[contains(concat( \" \", @class, \" \" ), concat( \" \", \"contentBox\", \" \" )) and (((count(preceding-sibling::*) + 1) = 3) and parent::*)]//*[contains(concat( \" \", @class, \" \" ), concat( \" \", \"cityBox\", \" \" ))]/a/text()'))\n print(province)\n if province == 35:\n return True\n else:\n return False\n\n# 获取县级市\ndef get_city_part(city_url):\n resp = get_html(city_url)\n if resp:\n html = resp.text\n root = etree.HTML(html)\n city_parts= root.xpath('//body/div[@class=\"contentBox\"][1]/div[@class=\"cityBox\"]/a/@href')\n city_parts_url = []\n for part in city_parts:\n city_parts_url.append(home_page_url + part)\n return city_parts_url\n else:\n return None\n \n# 获取最终地区的企业分类\ndef get_part_url(city_url):\n resp = get_html(city_url)\n if resp:\n html = resp.text\n root = etree.HTML(html)\n corps = root.xpath('//div[@class=\"contentBox\"][2]/div[@class=\"cityBox\"]/a[@class=\"countyBox\"]/@href')\n corps_url = []\n for part in corps:\n corps_url.append(home_page_url + part)\n return corps_url\n else:\n return None\n\n \n# 获取一个分类的所有企业的链接\ndef get_url_of_corp(part_url):\n resp = get_html(part_url)\n if resp:\n html = resp.text\n root = etree.HTML(html)\n parts = root.xpath('//div[@class=\"contentBox\"][3]/div[@class=\"cityBox\"]/a/@href')\n parts_url = []\n for part in parts:\n parts_url.append(home_page_url + part)\n return parts_url\n else:\n return None\n\n# 获取企业分类进入的url\ndef get_all_detail_url(home_page_url):\n # urls_list = parse_home_page(home_page_url)\n # for urls in urls_list:\n # for url in urls:\n # if city_is_end(url):\n # print(url)\n # else:\n # urls.extend(get_city_part(url))\n # urls.remove(url)\n # return urls_list\n last_city_list = []\n urls_list = sum(parse_home_page(home_page_url), [])\n # print(urls_list)\n for url in urls_list:\n print(url, end='\\t')\n if city_is_end(url):\n print('到头了')\n last_city_list.append(url)\n urls_list.remove(url)\n continue\n else:\n urls_list.extend(get_city_part(url))\n print('获取县级市')\n urls_list.remove(url)\n return last_city_list\n\n# 提取详情页的数据\ndef parser_detail(resp, db):\n detail = {}\n if resp:\n html = resp.text\n root = etree.HTML(html)\n info = root.xpath('//*[contains(concat( \" \", @class, \" \" ), concat( \" \", \"cityBox\", \" \" ))]//div[(((count(preceding-sibling::*) + 1) = 1) and parent::*)]/text()') if root.xpath('//*[contains(concat( \" \", @class, \" \" ), concat( \" \", \"cityBox\", \" \" ))]//div[(((count(preceding-sibling::*) + 1) = 1) and parent::*)]/text()') else None\n # print(info)\n if info:\n detail['公司名称'] = root.xpath('//div[@class=\"contentBox\"][2]/div[@class=\"provinceBox\"]/text()')[0]\n detail['地址'] = info[0].strip().split(':')[-1]\n detail['电话'] = info[1].strip().split(':')[-1]\n detail['传真'] = info[2].strip().split(':')[-1]\n detail['手机'] = info[3].strip().split(':')[-1]\n detail['网址'] = info[4].strip().split(':')[-1]\n detail['邮箱'] = info[5].strip().split(':')[-1]\n detail['联系人'] = info[6].strip().split(':')[-1]\n detail['公司人数'] = info[7].strip().split(':')[-1]\n detail['注册资金'] = info[8].strip().split(':')[-1]\n detail['经济类型'] = info[9].strip().split(':')[-1]\n detail['公司产品'] = info[10].strip().split(':')[-1]\n detail['公司简介'] = info[11].strip().split(':')[-1]\n # with open('sqw.csv', 'a', newline='') as csv_file:\n # writer = csv.writer(csv_file)\n # print([info for info in detail.values()])\n # writer.writerow([info for info in detail.values()])\n sql.insert_detail(db, detail)\n return detail\n else:\n return None\n\n\n\ndef main():\n db = sql.init()\n # with open('sqw.csv', 'w', newline='') as csv_file:\n # writer = csv.writer(csv_file)\n # writer.writerow(['公司名称', '地址', '电话', '传真', '手机', '网址', '邮箱', '联系人', '公司人数', '注册资金', '经济类型', '公司产品', '公司简介'])\n detail_urls = get_all_detail_url(home_page_url)\n # 遍历所有的城市\n for url in detail_urls:\n print('下载', url)\n part_url = get_part_url(url)\n # 遍历所有的分类\n for part in part_url:\n print('分类信息', part)\n corp_url = get_url_of_corp(part)\n # 遍历所有的企业\n for corp in corp_url:\n print('公司链接', corp)\n detail = parser_detail(get_html(corp), db)\n time.sleep(1)\n # detail = parser_detail(get_html(url))\n # writer.writerow([info for info in detail.values()])\n # print(city_is_end('http://www.socom.cn/xinjiang/kelamayi/baijiantan/'))\n\nif __name__ == '__main__':\n main()","sub_path":"SQWSpider/sqwSpider.py","file_name":"sqwSpider.py","file_ext":"py","file_size_in_byte":6965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"178415269","text":"import json\nimport requests\n\nfrom modules import settings\nfrom modules.sdr_util import make_pairs\n\nSDR_BUDGET = settings['SDR_BUDGET']\nONEFORGE_API_KEY = settings['ONEFORGE_API_KEY']\n\n\ndef tset():\n pairs = make_pairs('USD')\n print(json.dumps(pairs))\n\n params = {\n \"pairs\": \",\".join(pairs),\n \"api_key\": ONEFORGE_API_KEY\n }\n print(json.dumps(params))\n\n quotes = requests.get(\"https://forex.1forge.com/1.0.3/quotes\", params=params).json()\n print(json.dumps(quotes))\n\n prices = {}\n for q in quotes:\n prices[q['symbol'][3:]] = q['price']\n\n prices['USD'] = 1\n print(json.dumps(prices))\n\n equivalents = {}\n for c, a in SDR_BUDGET.items():\n e = prices[c] * a\n equivalents[c] = e\n\n print(json.dumps(equivalents))\n\n\nif __name__ == '__main__':\n tset()\n","sub_path":"priceserver/modules/sdr/wip/1forge.py","file_name":"1forge.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"161775377","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\nclass CostFunction:\n \"\"\"\n note:\n we call a chromosome, solution vector.\n \"\"\"\n\n def __init__(self, dimensions, lower_bound, upper_bound, name):\n self.__dimensions = dimensions\n # maximum and minimum of response boundary in each dimension:\n self.__lower_bound = lower_bound\n self.__upper_bound = upper_bound\n self.__costFunction_name = name # only a name for plotting\n\n @property\n def dimensions(self):\n return self.__dimensions\n\n @property\n def lower_bound(self):\n return self.__lower_bound\n\n @property\n def upper_bound(self):\n return self.__upper_bound\n\n @staticmethod\n def _discrete_decoding(original_function):\n \"\"\"\n Decorator function that decode continues space to discrete space by sorting.\n for example consider given solution_vector [5.1, 6.3, 4.008, 9.1, 6.02] to the original function.\n\n This decorator make the original function to work with discrete version of the given solution_vector which in this\n example is [2 0 4 1 3].\n This can be used in discrete problems like N-Queen or TSP.\n \"\"\"\n\n def wrapper(self, solution_vectors: np.ndarray, *args, **kwargs):\n if len(solution_vectors.shape) > 1:\n solution_vectors = np.argsort(solution_vectors, axis=1).reshape(-1, self.dimensions)\n else:\n solution_vectors = np.argsort(solution_vectors).reshape(1, self.dimensions).flatten()\n return original_function(self, solution_vectors, *args, **kwargs)\n return wrapper\n\n def compute_cost(self, solution_vectors):\n \"\"\"\n temporary docstring\n :param solution_vectors:\n :return:\n \"\"\"\n raise NotImplementedError\n\n def visual_result(self, solution_vector):\n raise NotImplementedError\n\n def plot_cost_vs_iteration(self, costs):\n plt.plot(costs)\n plt.title(self.__costFunction_name)\n plt.xlabel('iteration')\n plt.ylabel('Cost')\n plt.show()\n\n def print_step_result(self, solution_vector, iteration: int = \"\"):\n \"\"\"\n Use this to print result in each iteration.\n This prints a simple result of the last solution (an chromosome) that its cost has been computed.\n \"\"\"\n cost = self.compute_cost(solution_vector)\n print(f\"solution {solution_vector} with the cost: {cost} in the iteration {iteration}\")\n\n\nclass Sphere(CostFunction):\n\n def __init__(self):\n # set Sphere parameters:\n super().__init__(dimensions=5, lower_bound=-10, upper_bound=+10, name=\"Sphere\")\n\n def compute_cost(self, solution_vectors: np.ndarray):\n solution_vectors = np.array(solution_vectors)\n solution_vectors = solution_vectors.reshape(-1, self.dimensions)\n cost = np.sum(solution_vectors ** 2, axis=1)\n return cost\n\n def visual_result(self, solution_vector):\n pass\n\n\nclass NQueen(CostFunction):\n\n def __init__(self, num_of_queen: int = 8):\n super().__init__(dimensions=num_of_queen, lower_bound=0, upper_bound=1, name=\"N Queen\")\n\n @CostFunction._discrete_decoding\n def visual_result(self, solution_vector):\n \"\"\"\n Show a representation of N Queen problem with the given solution.\n :param solution_vector: solution got by get_print_solution method. its a agent_row or (chromosome)\n :return:\n \"\"\"\n size = len(solution_vector)\n for row in range(size):\n line = \" \"\n for col in range(size):\n if solution_vector[row] == col:\n line += \"👑 \"\n else:\n line += \"⬜ \"\n print(line)\n\n @CostFunction._discrete_decoding\n def compute_cost(self, solution_vectors):\n\n if len(np.shape(solution_vectors)) == 1: # < if there is only one row in agents_rows, do>:\n return self.__compute_cost_of_a_row(solution_vectors)\n\n costs = []\n for agent in solution_vectors:\n # add computed fitness to the list of costs:\n costs = np.append(costs, self.__compute_cost_of_a_row(agent))\n return costs\n\n @CostFunction._discrete_decoding\n def print_step_result(self, solution_vector, iteration: int = \"\"):\n super().print_step_result(solution_vector, iteration)\n\n def __compute_cost_of_a_row(self, agent_row):\n # compute cost for an agent or chromosome\n\n x = list(range(self.dimensions))\n y = agent_row\n # y = np.argsort(agent_row) # change coding representation to discrete number. #todo: did you replaced?\n\n cost = 0\n for i in range(self.dimensions - 1):\n for j in range(i + 1, self.dimensions):\n if np.abs(x[i] - x[j]) == np.abs(y[i] - y[j]):\n cost = cost + 1\n return cost\n\n\nclass TravellingSalesmanProblem(CostFunction):\n\n def __init__(self, num_of_cities: int = None, distance_range: int = None):\n super().__init__(num_of_cities, lower_bound=0, upper_bound=1, name=\"TSP\")\n\n # virtual distance between cites:\n self.x_axises = None\n self.y_axises = None\n\n if distance_range is not None:\n self.distance_range = distance_range\n self.__generate_cities()\n else:\n self.distance_range = None\n\n def __generate_cities(self):\n self.x_axises = np.random.randint(0, self.distance_range, size=self.dimensions)\n self.y_axises = np.random.randint(0, self.distance_range, size=self.dimensions)\n\n self.__compute_distance()\n # self.plot_cities()\n\n def __compute_distance(self):\n # compute distance:s\n self.distance_matrix = np.zeros(\n [self.dimensions, self.dimensions]) # make a empty n×n matrix, __dimension = number of cities\n\n # compute euclidean distance between cities and store it in distance matrix:\n for row in range(0, self.dimensions - 1): # this was dimensions - 1\n for column in range(row + 1, self.dimensions): # this was row + 1\n self.distance_matrix[row, column] = np.sqrt(\n np.exp2(self.x_axises[row] - self.x_axises[column]) + np.exp2(\n self.y_axises[row] - self.y_axises[column])) # upper triangular matrix\n # diagonal is zero:\n # if row == column:\n # self.distance_matrix[column, row] = np.inf\n self.distance_matrix[column, row] = self.distance_matrix[row, column] # and lower triangular matrix..\n # is the same is the upper.\n # join x and y axises, first row: x axises second is y axises:\n self.cities = np.append(self.x_axises.reshape(1, -1), self.y_axises.reshape(1, -1), axis=0)\n\n def plot_cities(self):\n plt.scatter(self.cities[0, :], self.cities[1, :], marker='o')\n plt.show()\n\n @CostFunction._discrete_decoding\n def compute_cost(self, solution_vectors):\n \"\"\"\n\n :param solution_vectors: matrix is a combination order to travel to cities.\n :return: cost of the given order.\n \"\"\"\n # change coding representation to discrete number:\n\n if self.distance_range is None:\n raise Exception(\"There are no cities; Initialize distance_range on \"\n \"object definition whether use create_cities() function to make your own cities.\")\n cost = 0\n\n one_agents = len(solution_vectors.shape) == 1 # do we have just one agent? (agents_rows contain only one row?)\n\n # adding the first element to the last. e.g. 5 > 4 > 3 > [5].\n if one_agents:\n # solution_vectors = np.argsort(solution_vectors)\n solution = np.append(solution_vectors, solution_vectors[0])\n\n # need a loop to travel to the all cities:\n solution = solution.astype(int) # todo: remove this\n for index in range(0, self.dimensions):\n i = solution[index] # distance of the first city to\n ii = solution[index + 1] # the second city is following:\n cost += self.distance_matrix[i, ii]\n else:\n # solution_vectors = np.argsort(solution_vectors, axis=1)\n solution = np.hstack((solution_vectors, solution_vectors[:, 0].reshape(-1, 1)))\n\n # need a loop to travel to the all cities:\n solution = solution.astype(int) # todo: remove this\n for index in range(0, self.dimensions):\n i = solution[:, index] # distance of the first city to\n ii = solution[:, index + 1] # the second city is following:\n cost += self.distance_matrix[i, ii]\n\n return cost\n\n @CostFunction._discrete_decoding\n def visual_result(self, solution_vector):\n \"\"\"\n Show a representation of TSP problem with the given solution.\n :param solution_vector: solution got by get_print_solution method. its a agent_row or (chromosome)\n \"\"\"\n # adding the first element to the last. e.g. 5 > 4 > 3 > [5]:\n solution = np.append(solution_vector, solution_vector[0])\n\n solution = solution.astype(int)\n\n plt.scatter(self.cities[0, :], self.cities[1, :], marker='o')\n # annotate_cities = np.arange(1, self.dimensions + 1)\n\n # add number annotate to cities:\n # for num in annotate_cities:\n # aplot.annotate(num, (self.cities[0, num], self.cities[1, num]))\n\n plt.plot(self.cities[0, solution], self.cities[1, solution])\n plt.show()\n\n @CostFunction._discrete_decoding\n def print_step_result(self, solution_vector, iteration: int = \"\"):\n super().print_step_result(solution_vector, iteration)\n\n\n\"\"\"\n def create_cities(self, x: list, y: list):\n create cities by user.\n :param x: x vector axis parameters\n :param y: y vector axis parameters\n :return:\n\n if len(x) != len(y):\n raise Exception(\"x and y must be same size\")\n self.x_axises = np.array(x)\n self.y_axises = np.array(y)\n\n num_of_cities = len(x)\n self.dimensions = num_of_cities\n\n # maximum and minimum of response boundary in each __dimension:\n self.min_boundary = 0\n self.max_boundary = num_of_cities - 1\n\n self.distance_range = True\n\n self.__compute_distance()\n\"\"\"\n","sub_path":"grasshopper optimization algorithm/cost_functions.py","file_name":"cost_functions.py","file_ext":"py","file_size_in_byte":10520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"405627422","text":"import sys\nimport os\nimport config\nfrom xml.etree import ElementTree\n\n\nclass shottask(object):\n\t\n\ttasks=[]\n\txmlfile=\"\"\n\n\t_shot=\"shot\"\n\t_dstpath=\"dstpath\"\n\t_srcpath=\"srcpath\"\n\t_synpath=\"synpath\"\n\n\tdef __init__(self, xmlfile):\n\t\tself.xmlfile=xmlfile\n\t\tif not os.path.isfile(xmlfile):\n\t\t\tmessage=\"% not exist!\" %xmlfile\n\t\t\tconfig.log.e(message)\n\t\t\tprint(message)\n\n\n\tdef read_attrib(self, dicts, node):\n\t\tattrib=node.attrib\n\t\tdicts[self._shot]=node.tag\n\t\tfor key in attrib:\n\t\t\tdicts[key]=attrib[key]\n\n\tdef read_xml(self):\n\t\ttext=open(self.xmlfile).read()\n\n\t\troot=ElementTree.fromstring(text)\n\t\t#print(root)\n\n\t\tfor child in root:\n\t\t\tshot={}\n\t\t\tself.read_attrib(shot, child)\n\t\t\tself.tasks.append(shot)\n\n\t\tprint(self.tasks)\n\t\treturn self.tasks\n\n\nif __name__=='__main__':\n\tfilename=sys.argv[1]\n\tst=shottask(filename)\n\ttask=st.read_xml()\n\n","sub_path":"pipeline/readshottask.py","file_name":"readshottask.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"471718087","text":"_base_ = [\n '../fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py'\n]\n\nmodel = dict(\n type='Distilling_Fcos',\n \n distill = dict(\n teacher_cfg='./configs/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py',\n teacher_model_path='/workspace/S/duzhixing/workspace/model/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco-511424d6.pth',\n \n distill_warm_step=500,\n distill_feat_weight=0.01,\n distill_cls_weight=0.05,\n # distill_bbox_weight=0.002,\n \n stu_feature_adap=dict(\n type='ADAP',\n in_channels=256,\n out_channels=256,\n num=5,\n kernel=3\n ),\n )\n)\n\n# # optimizer\n# optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)\n# optimizer_config = dict(grad_clip=None)\n# # learning policy\n# lr_config = dict(\n# policy='step',\n# warmup='linear',\n# warmup_iters=500,\n# warmup_ratio=0.001,\n# step=[16, 22])\n# total_epochs = 24\n\n\nseed=520\n\nfind_unused_parameters=True\n\n\n\n","sub_path":"configs/distill_fcos/resnet50_resnet101_fcos_mstrain_max.py","file_name":"resnet50_resnet101_fcos_mstrain_max.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"298332975","text":"from Resources import *\n\n\nclass LifeSprite(pygame.sprite.Sprite):\n PW = None\n PH = None\n\n def __init__(self, loc):\n super().__init__()\n self.PW = lifeim.get_width()\n self.PH = lifeim.get_height()\n self.image = lifeim\n self.rect = self.image.get_rect()\n self.rect.center = loc\n self.rect.centery -= self.PH/2\n self.rect.centerx -= self.PW / 2\n self.mask = pygame.mask.from_surface(self.image)\n\n self.life = 1\n\n def update(self, val):\n if val:\n self.rect.centery += 1\n","sub_path":"LifeSprite.py","file_name":"LifeSprite.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"71316616","text":"'''Trains a simple convnet on the MNIST dataset.\nGets to 99.25% test accuracy after 12 epochs\n(there is still a lot of margin for parameter tuning).\n16 seconds per epoch on a GRID K520 GPU.\n3 seconds per epoch on a Titan X Pascal.\n\nue.exec('mnistKeras.py')\n'''\n\nfrom __future__ import print_function\nimport numpy as np\n\nclass Logwrapper(object):\n def __init__(self):\n self.terminal = ue.log\n\n def write(self, message):\n ue.log(message)\n\n def flush(self):\n ue.log(\"\")\n\nimport unreal_engine as ue\nimport sys\n\nimport _thread as thread\n\n#wrap default logs so we get all print()\nsys.stdout = Logwrapper()\nsys.stderr = Logwrapper()\n\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Convolution2D, MaxPooling2D\nfrom keras.utils import np_utils\nfrom keras import backend as K\n\ndef train():\n print(\"Training started...\")\n\n np.random.seed(1337) # for reproducibility\n batch_size = 128\n nb_classes = 10\n nb_epoch = 12\n\n # input image dimensions\n img_rows, img_cols = 28, 28\n # number of convolutional filters to use\n nb_filters = 32\n # size of pooling area for max pooling\n pool_size = (2, 2)\n # convolution kernel size\n kernel_size = (3, 3)\n\n # the data, shuffled and split between train and test sets\n (X_train, y_train), (X_test, y_test) = mnist.load_data()\n\n if K.image_dim_ordering() == 'th':\n X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)\n X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\n else:\n X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)\n X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\n X_train = X_train.astype('float32')\n X_test = X_test.astype('float32')\n X_train /= 255\n X_test /= 255\n ue.log('X_train shape:' + str(X_train.shape))\n ue.log(str(X_train.shape[0]) + 'train samples')\n ue.log(str(X_test.shape[0]) + 'test samples')\n\n # convert class vectors to binary class matrices\n Y_train = np_utils.to_categorical(y_train, nb_classes)\n Y_test = np_utils.to_categorical(y_test, nb_classes)\n\n model = Sequential()\n\n model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],\n border_mode='valid',\n input_shape=input_shape))\n model.add(Activation('relu'))\n model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=pool_size))\n model.add(Dropout(0.25))\n\n model.add(Flatten())\n model.add(Dense(128))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(nb_classes))\n model.add(Activation('softmax'))\n\n model.compile(loss='categorical_crossentropy',\n optimizer='adadelta',\n metrics=['accuracy'])\n\n print(X_train)\n\n print(Y_train)\n\n #model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,\n # verbose=1, validation_data=(X_test, Y_test))\n #score = model.evaluate(X_test, Y_test, verbose=0)\n #ue.log('Test score:' + str(score[0]))\n #ue.log('Test accuracy:' + str(score[1]))\n\n#start thread\nthread.start_new_thread(train, ())\n\n#train()","sub_path":"Content/Scripts/mnistKeras.py","file_name":"mnistKeras.py","file_ext":"py","file_size_in_byte":3422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"594829156","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom cell import ConvLSTM\nimport numpy as np\nfrom tools.utils import gaussian_mask\n\n\nclass VideoSaliency(nn.Module):\n def __init__(self):\n super(VideoSaliency, self).__init__()\n\n self.size = 400\n\n ############### R1 ###############\n self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=(1, 1) )\n self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=(1, 1) )\n\n self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=(1, 1) )\n self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=(1, 1) )\n\n self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=(1, 1) )\n self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=(1, 1) )\n self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=(1, 1) )\n\n self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=(1, 1) )\n self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=(1, 1) )\n self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=(1, 1) )\n\n self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, dilation=2, padding=(2, 2) )\n self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, dilation=2, padding=(2, 2) )\n self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, dilation=2, padding=(2, 2) )\n\n self.fc6 = nn.Conv2d(512, 4096, kernel_size=4, dilation=4, padding=(6, 6) )\n self.fc7 = nn.Conv2d(4096, 4096, kernel_size=1, dilation=4 )\n self.fc8 = nn.Conv2d(4096, 1, kernel_size=1 )\n\n self.pool4_conv = nn.Conv2d(512, 128, kernel_size=3, padding=(1, 1) )\n self.pool4_fc = nn.Conv2d(128, 128, kernel_size=1 )\n self.pool4_ms_saliency = nn.Conv2d(128, 1, kernel_size=1 )\n\n ############### R2 ###############\n self.conv1_1_r2 = nn.Conv2d(4, 64, kernel_size=3, padding=(1, 1) )\n self.conv1_2_r2 = nn.Conv2d(64, 64, kernel_size=3, padding=(1, 1) )\n\n self.conv2_1_r2 = nn.Conv2d(64, 128, kernel_size=3, padding=(1, 1) )\n self.conv2_2_r2 = nn.Conv2d(128, 128, kernel_size=3, padding=(1, 1) )\n\n self.conv3_1_r2 = nn.Conv2d(128, 256, kernel_size=3, padding=(1, 1) )\n self.conv3_2_r2 = nn.Conv2d(256, 256, kernel_size=3, padding=(1, 1) )\n self.conv3_3_r2 = nn.Conv2d(256, 256, kernel_size=3, padding=(1, 1) )\n\n self.conv4_1_r2 = nn.Conv2d(256, 512, kernel_size=3, padding=(1, 1) )\n self.conv4_2_r2 = nn.Conv2d(512, 512, kernel_size=3, padding=(1, 1) )\n self.conv4_3_r2 = nn.Conv2d(512, 512, kernel_size=3, padding=(1, 1) )\n\n self.conv5_1_r2 = nn.Conv2d(512, 512, kernel_size=3, dilation=2, padding=(2, 2) )\n self.conv5_2_r2 = nn.Conv2d(512, 512, kernel_size=3, dilation=2, padding=(2, 2) )\n self.conv5_3_r2 = nn.Conv2d(512, 512, kernel_size=3, dilation=2, padding=(2, 2) )\n\n self.fc6_r2 = nn.Conv2d(512, 4096, kernel_size=4, dilation=4, padding=(6, 6) )\n self.fc7_r2 = nn.Conv2d(4096, 4096, kernel_size=1, dilation=4 )\n self.fc8_r2 = nn.Conv2d(4096, 1, kernel_size=1 )\n\n self.pool4_conv_r2 = nn.Conv2d(512, 128, kernel_size=3, padding=(1, 1) )\n self.pool4_fc_r2 = nn.Conv2d(128, 128, kernel_size=1 )\n self.pool4_ms_saliency_r2 = nn.Conv2d(128, 1, kernel_size=1 )\n\n self.convLSTM = ConvLSTM((400, 400), 4, [1], (3, 3), 1, batch_first=True, return_all_layers=False)\n self.c3d = nn.Conv3d(4, 1, kernel_size=3, padding=(1, 1, 1))\n\n self.pool4_saliency_ST = nn.Conv2d(2, 1, kernel_size=1 )\n self.fc8_saliency_ST = nn.Conv2d(2, 1, kernel_size=1 )\n\n self.loc_estimate = nn.Linear(2500, 4)\n\n self.attention_first = nn.Conv2d(6, 256, kernel_size=3, padding=(1, 1) )\n self.attention_second = nn.Conv2d(256, 6, kernel_size=1 )\n\n def forward(self, input, input_prior):\n\n ############### R1 ###############\n x = F.relu(self.conv1_1(input))\n x = F.relu(self.conv1_2(x))\n x = F.max_pool2d(x, 2)\n\n x = F.relu(self.conv2_1(x))\n x = F.relu(self.conv2_2(x))\n x = F.max_pool2d(x, 2)\n\n x = F.relu(self.conv3_1(x))\n x = F.relu(self.conv3_2(x))\n x = F.relu(self.conv3_3(x))\n x = F.max_pool2d(x, 2)\n\n x = F.relu(self.conv4_1(x))\n x = F.relu(self.conv4_2(x))\n x = F.relu(self.conv4_3(x))\n x = F.max_pool2d(x, 1)\n\n branch_pool4 = x.clone()\n\n x = F.relu(self.conv5_1(x))\n x = F.relu(self.conv5_2(x))\n x = F.relu(self.conv5_3(x))\n x = F.max_pool2d(x, 1)\n\n x = F.dropout(F.relu(self.fc6(x)), 0.5)\n x = F.dropout(F.relu(self.fc7(x)), 0.5)\n x = self.fc8(x)\n\n branch_pool4 = F.dropout(F.relu(self.pool4_conv(branch_pool4)), 0.5)\n branch_pool4 = F.dropout(F.relu(self.pool4_fc(branch_pool4)), 0.5)\n branch_pool4 = self.pool4_ms_saliency(branch_pool4)\n\n up_fc8 = F.upsample_bilinear(x, size=[self.size, self.size])\n up_pool4 = F.upsample_bilinear(branch_pool4, size=[self.size, self.size])\n\n ############### R2 ###############\n x_r2 = F.relu(self.conv1_1_r2(input_prior))\n x_r2 = F.relu(self.conv1_2_r2(x_r2))\n x_r2 = F.max_pool2d(x_r2, 2)\n\n x_r2 = F.relu(self.conv2_1_r2(x_r2))\n x_r2 = F.relu(self.conv2_2_r2(x_r2))\n x_r2 = F.max_pool2d(x_r2, 2)\n\n x_r2 = F.relu(self.conv3_1_r2(x_r2))\n x_r2 = F.relu(self.conv3_2_r2(x_r2))\n x_r2 = F.relu(self.conv3_3_r2(x_r2))\n x_r2 = F.max_pool2d(x_r2, 2)\n\n x_r2 = F.relu(self.conv4_1_r2(x_r2))\n x_r2 = F.relu(self.conv4_2_r2(x_r2))\n x_r2 = F.relu(self.conv4_3_r2(x_r2))\n x_r2 = F.max_pool2d(x_r2, 1)\n\n branch_pool4_r2 = x_r2.clone()\n\n x_r2 = F.relu(self.conv5_1_r2(x_r2))\n x_r2 = F.relu(self.conv5_2_r2(x_r2))\n x_r2 = F.relu(self.conv5_3_r2(x_r2))\n x_r2 = F.max_pool2d(x_r2, 1)\n\n x_r2 = F.dropout(F.relu(self.fc6_r2(x_r2)), 0.5)\n x_r2 = F.dropout(F.relu(self.fc7_r2(x_r2)), 0.5)\n x_r2 = self.fc8_r2(x_r2)\n\n branch_pool4_r2 = F.dropout(F.relu(self.pool4_conv_r2(branch_pool4_r2)), 0.5)\n branch_pool4_r2 = F.dropout(F.relu(self.pool4_fc_r2(branch_pool4_r2)), 0.5)\n branch_pool4_r2 = self.pool4_ms_saliency_r2(branch_pool4_r2)\n\n up_fc8_r2 = F.upsample_bilinear(x_r2, size=[self.size, self.size])\n up_pool4_r2 = F.upsample_bilinear(branch_pool4_r2, size=[self.size, self.size])\n\n rnn_inputs = torch.cat((up_pool4, up_pool4_r2, up_fc8, up_fc8_r2), 1)\n rnn_inputs = rnn_inputs.unsqueeze(0)\n rnn_list, state = self.convLSTM(rnn_inputs)\n rnn_output = rnn_list[0].squeeze(0)\n\n c3d_inputs = rnn_inputs.transpose(1, 2)\n c3d_output = self.c3d(c3d_inputs)\n c3d_output = c3d_output.transpose(1, 2)\n c3d_output = c3d_output.squeeze(0)\n\n pool4_saliency_cancat = torch.cat((branch_pool4, branch_pool4_r2), 1)\n pool4_saliency_ST = self.pool4_saliency_ST(pool4_saliency_cancat)\n up_pool4_ST = F.upsample_bilinear(pool4_saliency_ST, size=[self.size, self.size])\n\n fc8_saliency_cancat = torch.cat((x, x_r2), 1)\n fc8_saliency_ST = self.fc8_saliency_ST(fc8_saliency_cancat)\n up_fc8_ST = F.upsample_bilinear(fc8_saliency_ST, size=[self.size, self.size])\n\n # fc8_saliency_ST = F.upsample_bilinear(fc8_saliency_ST, size=[60, 60])\n pool4_saliency_ST = pool4_saliency_ST.view(pool4_saliency_ST.size(0), -1)\n\n local_poc = F.sigmoid(self.loc_estimate(pool4_saliency_ST))\n # local_poc = self.loc_estimate(fc8_saliency_ST)\n # cap_feats = self.generate_local_bbox(local_poc)\n cap_feats = self.generate_local_gaussian(local_poc)\n # rnn_output = F.upsample_bilinear(rnn_output, size=[self.size, self.size])\n\n # up_fc8_ST = up_fc8_ST + rnn_output\n global_saliency = torch.cat((up_pool4_ST, up_fc8_ST), 1)\n\n local_poo4_ST = torch.mul(up_pool4_ST, cap_feats)\n local_fc8_ST = torch.mul(up_fc8_ST, cap_feats)\n # local_rnn_output = torch.mul(rnn_output, cap_feats)\n local_saliency = torch.cat((local_poo4_ST, local_fc8_ST), 1)\n\n final_saliency = torch.cat((global_saliency, local_saliency, rnn_output, c3d_output), 1)\n\n #channel-wise attention\n atten_weights = F.relu(self.attention_first(final_saliency))\n atten_weights = F.softmax(self.attention_second(atten_weights))\n\n # atten_weights = F.upsample_bilinear(atten_weights, size=[480, 480])\n final_saliency = torch.mul(final_saliency, atten_weights)\n final_saliency = torch.mean(final_saliency, 1, keepdim=True)\n\n # return final_saliency, cap_feats, local_poc, cap_feats2\n return final_saliency, cap_feats, local_poc\n\n def generate_local_bbox(self, local_poc):\n size = 400\n points = local_poc.data.cpu().numpy()\n # points_val = np.zeros_like(points, dtype=points.dtype)\n cap_map_batch = np.zeros([points.shape[0], 1, size, size], dtype=np.float16)\n for i in range(0, points.shape[0]):\n point = points[i, :]\n if point[0] < point[2] and point[1] < point[3] \\\n and (point[2] - point[0]) < 0.95 \\\n and (point[3] - point[1]) < 0.95 \\\n and (point[2] - point[0]) > 0.05 \\\n and (point[3] - point[1]) > 0.05:\n # suitable point\n print(point)\n print('area:' + str((point[2] - point[0]) * (point[3] - point[1])))\n point = point * size\n point = point.astype(np.int16)\n cap_map = np.ones([point[2] - point[0], point[3] - point[1]], dtype=np.float16)\n cap_map = np.pad(cap_map, ([point[0], size - point[2]], [point[1], size - point[3]]), 'constant')\n cap_map_batch[i, 0, :, :] = cap_map\n else:\n # not suitable, choose center crop\n cap_map = np.ones([int(size / 2), int(size / 2)], dtype=np.float16)\n cap_map = np.pad(cap_map, ([int(size / 4), int(size / 4)], [int(size / 4), int(size / 4)]), 'constant')\n cap_map_batch[i, 0, :, :] = cap_map\n\n cap_map_batch = torch.from_numpy(cap_map_batch)\n cap_map_batch = cap_map_batch.type(torch.cuda.FloatTensor)\n\n return cap_map_batch\n\n def generate_local_gaussian(self, local_poc):\n size = 400\n points = local_poc.data.cpu().numpy()\n # points_val = np.zeros_like(points, dtype=points.dtype)\n cap_map_batch = np.zeros([points.shape[0], 1, size, size], dtype=np.float16)\n for i in range(0, points.shape[0]):\n point = points[i, :]\n if point[0] < point[2] and point[1] < point[3] \\\n and (point[2] - point[0]) < 0.95 \\\n and (point[3] - point[1]) < 0.95 \\\n and (point[2] - point[0]) > 0.05 \\\n and (point[3] - point[1]) > 0.05:\n # suitable point\n print(point)\n # print(':' + str((point[2] - point[0]) * (point[3] - point[1])))\n # point = point * size\n # point = point.astype(np.int16)\n center_x = (point[3] - point[1]) / 2 + point[1]\n center_y = (point[2] - point[0]) / 2 + point[0]\n print('center point:(' + str(center_x) + ',' + str(center_y) + ')')\n cap_map = gaussian_mask(center_x, center_y, sigma=0.75)\n # cap_map = np.pad(cap_map, ([point[0], size - point[2]], [point[1], size - point[3]]), 'constant')\n cap_map_batch[i, 0, :, :] = cap_map\n else:\n # not suitable, choose center gaussian\n cap_map = gaussian_mask(0.5, 0.5, sigma=0.75)\n # cap_map = np.pad(cap_map, ([int(size / 4), int(size / 4)], [int(size / 4), int(size / 4)]), 'constant')\n cap_map_batch[i, 0, :, :] = cap_map\n\n cap_map_batch = torch.from_numpy(cap_map_batch)\n cap_map_batch = cap_map_batch.type(torch.cuda.FloatTensor)\n\n return cap_map_batch","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":12093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"190423400","text":"#Autor: Cecilia Daniela Olivares Hernández, a01745727\r\n#Descripción: Calcula el pago semanal de un trabajador más sus horas extras y muestra su pago total.\r\n\r\n#Esta funcion calcula el pago de las horas normales\r\ndef calcularPagoNormal(horasNormales, horasExtras, pagoHora):\r\n pagoNormal = horasNormales * pagoHora\r\n return pagoNormal\r\n\r\n#Esta funcion calcula el pago de las horas extras\r\ndef calcularPagoExtra(horasNormales, horasExtras, pagoHora):\r\n pagoExtra = (horasExtras * pagoHora) + ((pagoHora * .65)* horasExtras)\r\n return pagoExtra\r\n\r\n#Funcion principal que resuelve el problema\r\ndef main():\r\n horasNormales = int(input(\"Teclea las horas normales trabajadas: \"))\r\n horasExtras = int(input(\"Teclea las horas extras trabajadas: \"))\r\n pagoHora = int(input(\"Teclea el pago por hora: \"))\r\n pagoNormal = calcularPagoNormal(horasNormales, horasExtras, pagoHora)\r\n pagoExtra = calcularPagoExtra(horasNormales, horasExtras, pagoHora)\r\n pagoTotal = pagoNormal + pagoExtra\r\n print(\"\"\"\r\nPago normal: \\x1b[1;30m $%.2f\"\"\" % (pagoNormal))\r\n print(\"\\x1b[0;mPago extra: \\x1b[1;30m $%.2f\" % (pagoExtra))\r\n print(\"-----------------------\")\r\n print(\"\\x1b[0;mPago total: \\x1b[1;30m $%.2f\" % (pagoTotal))\r\n\r\nmain()","sub_path":"Pago.py","file_name":"Pago.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"498078750","text":"import json\nimport sys\n\nfrom acpc_python_client import wrappers\nfrom acpc_python_client.data.action_type import ActionType\nfrom acpc_python_client.data.betting_type import BettingType\n\n_NUMBERS = (int, float)\n\n\ndef wrapper_to_str(wrapper_object, formatted=True, contents_only=False):\n if isinstance(wrapper_object, str):\n return wrapper_object\n elif isinstance(wrapper_object, _NUMBERS):\n return str(wrapper_object)\n elif isinstance(wrapper_object, bool):\n return 'true' if wrapper_object else 'false'\n elif hasattr(wrapper_object, '_length_'):\n # Object is special C array wrapper class\n return '[ %s ]' % ', '.join([wrapper_to_str(e, False, True) for e in wrapper_object])\n else:\n # Object is wrapped structure\n has_contents = hasattr(wrapper_object, 'contents')\n type_fields = wrapper_object.contents._fields_ if has_contents else wrapper_object._fields_\n\n # Create strings containing \"name\": value from fields on the object in json format\n attribute_names = [field[0] for field in type_fields]\n attribute_vals = [getattr(wrapper_object.contents if has_contents else wrapper_object, field[0])\n for field in type_fields]\n attribute_vals_strings = [wrapper_to_str(attr_val, False, True) for attr_val in attribute_vals]\n attribute_strings = ['\"%s\": %s' % attr for attr in zip(attribute_names, attribute_vals_strings)]\n\n # Pretty print it with json module\n json_string = '{ %s }' % ', '.join(attribute_strings)\n if formatted:\n try:\n json_object = json.loads(json_string)\n except:\n print('Unexpected error:', sys.exc_info()[0])\n print('Error while json parsing following json string:')\n print(json_string)\n raise\n json_string = json.dumps(json_object, sort_keys=False, indent=4)\n if contents_only:\n return json_string\n else:\n object_name = \\\n (wrapper_object._type_ if hasattr(wrapper_object, '_type_') else type(wrapper_object)).__name__\n return '%s: %s' % (object_name, json_string)\n\n\ndef action_type_enum_to_int(action_type):\n if action_type == ActionType.FOLD:\n return wrappers.a_fold\n elif action_type == ActionType.CALL:\n return wrappers.a_call\n elif action_type == ActionType.RAISE:\n return wrappers.a_raise\n else:\n raise ValueError('Unknown action type')\n\n\ndef action_type_int_to_enum(action_type_int):\n if action_type_int == wrappers.a_fold:\n return ActionType.FOLD\n elif action_type_int == wrappers.a_call:\n return ActionType.CALL\n elif action_type_int == wrappers.a_raise:\n return ActionType.RAISE\n else:\n raise ValueError('Unknown action type')\n\n\ndef betting_type_int_to_enum(betting_type):\n if betting_type == wrappers.limitBetting:\n return BettingType.LIMIT\n elif betting_type == wrappers.noLimitBetting:\n return BettingType.NO_LIMIT\n else:\n raise ValueError('Unknown betting type')\n","sub_path":"acpc-python-client-master/acpc_python_client/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"44229202","text":"#!/usr/bin/env python3.7\nimport argparse\nimport time \nimport sys\n\nimport logging\nfrom colorlog import ColoredFormatter\nfrom ppadb.client import Client as AdbClient\n\nfrom PIL import Image\nfrom io import BytesIO\nfrom pyocr import pyocr\nfrom pyocr import builders\nimport yaml\n\nfrom multiprocessing import Pool\n\nimport tenacity\n\ndef create_console_handler(verbose_level):\n clog = logging.StreamHandler()\n formatter = ColoredFormatter(\n \"%(log_color)s[%(levelname)-8s%(module)s]%(reset)s \"\n \"%(white)s%(message)s\",\n reset=True,\n log_colors={\n 'DEBUG': 'cyan',\n 'INFO': 'green',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'red',\n })\n clog.setFormatter(formatter)\n if verbose_level == 0:\n clog.setLevel(logging.WARN)\n elif verbose_level == 1:\n clog.setLevel(logging.INFO)\n else:\n clog.setLevel(logging.DEBUG)\n return clog\n\nclass TradeError(Exception):\n \"\"\"Raised when an error is detected\"\"\"\n def __init__(self, arg):\n self.strerror = arg\n self.args = {arg}\n\ndef scrap_screencap(dev_name, img, location):\n crop = img.crop(config[dev_name]['locations'][location])\n return tool.image_to_string(crop).replace(\"\\n\", \" \")\n\ndef tap(dev,location):\n x, y = config[dev.name]['locations'][location]\n dev.shell(\"input tap \" + str(x) + \" \" + str(y))\n logger.info(dev.name + ' | Tap location ' + str(location) + 'succeeded')\n\ndef check_known_errors(dev_name, img):\n errors= [\n (\"error_box\",[\"est trop loin\", \"expiration\", \"inconnue\"])\n ]\n for err_set in errors:\n box, msgs = err_set\n text = scrap_screencap(dev_name, img, box)\n for msg in msgs:\n if text in msg:\n return False\n return True\n\ndef waiting(location):\n time.sleep(config['waits'][location])\n\n@tenacity.retry(wait=tenacity.wait_fixed(5), stop=tenacity.stop_after_attempt(5), reraise=True)\ndef clic_trade(dev):\n logger.info(\"Check and clic on trade button device {}\".format(dev.name))\n img = Image.open(BytesIO(dev.screencap()))\n if 'ECHANGER' in scrap_screencap(dev.name, img,\"trade_button_label\"):\n logger.info(dev.name + ' | TRADE button found')\n tap(dev,'trade_button')\n waiting('trade_button')\n return\n check_known_errors(dev.name, img)\n raise TradeError('Error Clic Trade {}'.format(dev.name))\n\n@tenacity.retry(wait=tenacity.wait_fixed(2), stop=tenacity.stop_after_attempt(5), reraise=True)\ndef select_pokemon(dev):\n search_string = config[dev.name]['search_string']\n logger.info(\"Check device {} Pokemon selection screen\".format(dev.name))\n img = Image.open(BytesIO(dev.screencap()))\n if 'POKEMON' in scrap_screencap(dev.name, img,\"pokemon_to_trade_box\"):\n logger.info(dev.name + ' | Selection screen found')\n tap(dev,'search_button')\n waiting('location')\n dev.shell(\"input text \" + search_string)\n # tap 2 times the pokemon, once to get of keyboard entry, 2nd to select pokemon\n tap(dev,'first_pokemon')\n waiting('first_pokemon')\n tap(dev,'first_pokemon')\n return\n raise TradeError('Select Pokemon failed on {}'.format(dev.name))\n\n@tenacity.retry(wait=tenacity.wait_fixed(2), stop=tenacity.stop_after_attempt(5), reraise=True)\ndef check_screen(dev):\n name_check = config[dev.name]['name_check']\n logger.info(\"Check device {} NEXT screen\".format(dev.name))\n img = Image.open(BytesIO(dev.screencap()))\n if 'SUIVANT' in scrap_screencap(dev.name, img,\"next_button_box\"):\n logger.info(dev.name + ' | Next screen found')\n #if name_check not in scrap_screencap(dev.name, img,\"name_at_next_screen_box\"):\n # raise namecheckfail\n tap(dev,'next_button')\n return\n raise TradeError('Select Pokemon failed on {}'.format(dev.name))\n\n@tenacity.retry(wait=tenacity.wait_fixed(2), stop=tenacity.stop_after_attempt(5), reraise=True)\ndef confirm_screen(dev):\n logger.info(\"Check device {} CONFIRM screen\".format(dev.name))\n img = Image.open(BytesIO(dev.screencap()))\n if 'CONFIRMER' in scrap_screencap(dev.name, img,\"confirm_button_box\"):\n logger.debug(dev.name + ' | Confirm screen found, doing checks')\n if ( '100' in scrap_screencap(dev.name, img,\"trade_star\") and\n config[dev.name]['search_string'] in scrap_screencap(dev.name, img,\"trade_name_box\")\n ):\n tap(dev,'confirm_button')\n return\n else:\n logger.warning(dev.name + ' | Confirm checks failed')\n raise TradeError('Confirm screen failed on {}'.format(dev.name))\n\n@tenacity.retry(wait=tenacity.wait_fixed(2), stop=tenacity.stop_after_attempt(5), reraise=True)\ndef trade_end(dev):\n logger.info(\"Check device {} trade ended\".format(dev.name))\n img = Image.open(BytesIO(dev.screencap()))\n weight_text = str(scrap_screencap(dev.name, img,\"weight_box\"))\n logger.debug('scap_weight: {}'.format(weight_text))\n if 'POIDS' in weight_text:\n logger.info(dev.name + ' | traded pokemon screen found')\n tap(dev,'close_pokemon_button')\n return\n weight_text = str(scrap_screencap(dev.name, img,\"weight_box_lucky\"))\n logger.debug('lucky scap_weight: {}'.format(weight_text))\n if 'POIDS' in weight_text:\n logger.warning('LUCKY Pokemon !!')\n logger.info(dev.name + ' | traded pokemon screen found')\n tap(dev,'close_pokemon_button')\n return\n raise TradeError('Trade ending failed on {}'.format(dev.name))\n\ndef do_trade(num, p):\n try:\n p.map(clic_trade, [dev_id1,dev_id2])\n p.map(select_pokemon, [dev_id1,dev_id2])\n waiting('next_button')\n\n p.map(check_screen, [dev_id1,dev_id2])\n waiting('confirm_button')\n\n p.map(confirm_screen, [dev_id1,dev_id2])\n waiting('trade_anim')\n\n p.map(trade_end, [dev_id1,dev_id2])\n waiting('trade_ends')\n\n except Exception as e:\n logger.error(\"ERROR: Canceling trade:\" + str(e))\n return False\n\n return True\n \n\nif __name__ == '__main__':\n # get params from command line\n parser = argparse.ArgumentParser(description='Pokemon GO trader')\n parser.add_argument('--config', type=str, default='config.yaml',\n help=\"Config file location.\")\n parser.add_argument('--stop-after', default=1, type=int,\n help='Stop after exchanging pokemon')\n args = parser.parse_args()\n\n # load params from config file\n with open(args.config, \"r\") as f:\n config = yaml.safe_load(f)\n tools = pyocr.get_available_tools()\n tool = tools[0]\n\n # magic number for randomizing crop \n i = 2\n\n verbose_level=1\n logger = logging.getLogger()\n if verbose_level > 0:\n logger.addHandler(create_console_handler(verbose_level))\n logger.setLevel(logging.DEBUG)\n\n # Connecting on local adb server\n try:\n client = AdbClient(host=\"127.0.0.1\", port=5037)\n except:\n logger.error(\"Unable to connect to adb server\")\n logger.error(\"Please check your configuration and run ``adb start-server''\")\n sys.exit(1)\n \n if len(client.devices()) < 2:\n logger.error(\"This program needs 2 phones connected with ADB\")\n \n # instanciate 2 pogo games\n dev_id1 = client.device(config['app1']['device_id'])\n dev_id1.name = 'app1'\n dev_id2 = client.device(config['app2']['device_id'])\n dev_id2.name = 'app2'\n\n\n # trading\n p = Pool(2)\n for trade in range(args.stop_after):\n logger.warning(\"Trade num {}/{} engaged\".format(str(trade+1),str(args.stop_after)))\n if not do_trade(trade, p):\n sys.exit(0)\n\n","sub_path":"trade.py","file_name":"trade.py","file_ext":"py","file_size_in_byte":7721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"393246918","text":"#!/usr/bin/env python3\r\n#\r\n\r\n\r\n'''\r\n绑定回调,在task执行完成的时候可以获取执行的结果,回调的最后一个参数是future对象,通过该对象可以获取协程返回值。\r\n'''\r\n\r\nimport time\r\nimport asyncio\r\nimport functools\r\n\r\nnow = lambda: time.time()\r\n\r\n\r\nasync def do_some_work(x):\r\n print('waiting: ', x)\r\n return 'Done after {}s'.format(x)\r\n\r\n\r\n'''\r\n通过add_done_callback方法给task任务添加回调函数,当task(也可以说是coroutine)执行完成的时候,就会调用回调函数。\r\n并通过参数future获取协程执行的结果。这里我们创建 的task和回调里的future对象实际上是同一个对象\r\nwhat callback do is reading the task.result(), task is the subclass of Future. \r\n'''\r\n\r\n\r\n# callback without argv\r\ndef callback(future: asyncio.Future):\r\n print('callback: ', future.result())\r\n\r\n\r\nstart = now()\r\ncoroutine = do_some_work(2)\r\nloop = asyncio.get_event_loop()\r\ntask = asyncio.ensure_future(coroutine)\r\nprint(\"\\ncallback without argv:\")\r\nprint(task)\r\ntask.add_done_callback(callback)\r\nprint(task)\r\nloop.run_until_complete(task)\r\nprint('time:', now() - start)\r\n\r\n\r\n# callback with argv\r\ndef callback(argv, future: asyncio.Future):\r\n print('argv: {}, callback: {}'.format(argv, future.result()))\r\n\r\n\r\nstart = now()\r\ncoroutine = do_some_work(2)\r\nloop = asyncio.get_event_loop()\r\ntask = asyncio.ensure_future(coroutine)\r\nprint(\"\\ncallback with argv:\")\r\nprint(task)\r\ntask.add_done_callback(functools.partial(callback, 123))\r\nprint(task)\r\nloop.run_until_complete(task)\r\nprint('time:', now() - start)\r\n\r\nloop.close()\r\n","sub_path":"basic_/asyncio_/03_asyncio_callback.py","file_name":"03_asyncio_callback.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"119948999","text":"\n\nimport json\nimport qiskit as q\nimport qiskit_aqua\nfrom math import ceil, log2\nfrom typing import Dict, Tuple\n\nfrom .grover import Grover\nfrom .simulator import Simulator\nfrom .load_graph import load_graph\nfrom .get_logger import get_logger\n\nfrom .encode_graph import *\n\n\nlogger = get_logger(__name__) \n\n\nclass EdgeFinder():\n\n def __init__(self):\n logger.info(\"#\"*100)\n self.__name__ = str(self.__class__).split(\".\")[-1][:-2]\n logger.info(\"Starting workflow of %s\"%(self.__name__))\n self.graph = load_graph()\n self.n_of_qbits = ceil(log2(max((e[0] for e in self.graph))))\n logger.info(\"To encode a vertex we need %d qbits \"%self.n_of_qbits)\n self._setup_registers()\n self._setup_circuit()\n\n def _setup_registers(self):\n\n # Quantum registers\n self.start = q.QuantumRegister(self.n_of_qbits, name=\"start\")\n self.end = q.QuantumRegister(self.n_of_qbits, name =\"end\")\n self.weight= q.QuantumRegister(self.n_of_qbits, name =\"weight\")\n self.flags = q.QuantumRegister(3, name =\"flags\")\n\n self.ancillas_dim = (3 * self.n_of_qbits) - 1\n self.ancillas = q.QuantumRegister(self.ancillas_dim, name=\"ancillas\")\n\n self.qregisters = [self.start, self.end, self.weight, self.flags, self.ancillas]\n\n logger.info(\"The simulation needs {} qbits\".format(sum((register.size for register in self.qregisters))))\n\n # Setup the classical registers to save the result of the measurements\n self.classical_start = q.ClassicalRegister(self.n_of_qbits, 'classical_start')\n self.classical_end = q.ClassicalRegister(self.n_of_qbits, 'classical_end')\n self.classical_weight= q.ClassicalRegister(self.n_of_qbits, 'classical_weight')\n self.classical_flags = q.ClassicalRegister(3, 'classical_flags')\n \n self.cregisters = [self.classical_start, self.classical_end, self.classical_weight, self.classical_flags]\n\n\n def _setup_circuit(self):\n self.circuit = q.QuantumCircuit(*self.qregisters)\n\n def _add_measure_gates(self):\n self.circuit.add(*self.cregisters)\n logger.info(\"Adding measure gates\")\n for q, c in zip(self.qregisters, self.cregisters):\n self.circuit.measure(q, c)\n\n def initialize_circuit(self):\n \"\"\"Prepare the initial superposition\"\"\"\n for register in [self.start, self.end, self.weight]:\n self.circuit.h(register)\n\n\n def setup_oracle(self):\n self.oracle = None\n raise NotImplementedError(\"This method is met to be overvritten by a subclass therfore is not callable.\")\n\n def get_MLE(self, counts : Dict[int, int]) -> Tuple[int, int, int]:\n \"\"\"Find the most frequent result with the flags setted to 1 (MLE)\"\"\"\n logger.info(\"Finding the Most Likley result\")\n \n # Convert it to a list\n result_list = [list(reversed(encoding.split(\" \"))) + [times] for encoding, times in counts.items()]\n\n result_list.sort(key=lambda x: x[-1], reverse=False)\n\n logger.debug(\"The results are: \")\n\n result_list = [(int(values[0],base=2),int(values[1],base=2),int(values[2],base=2),int(values[3],base=2),int(values[5])) for values in result_list]\n\n for values in result_list:\n logger.debug(\"\\t{:d} -> {:d} w: {} flags: {:02b} times: {:d}\".format(*values))\n\n MLE = max(result_list,key=lambda x: x[-1]) # if x[-2] == 1 else 0\n\n logger.info(\"The Most Likley result is {} -> {} w: {} flags: {:02b} times: {}\".format(*MLE))\n return MLE\n\n def run(self, n_of_shots : int = 100*(2**7), local : bool = False):\n\n self.initialize_circuit()\n\n self.setup_oracle()\n oracle_circuit_path = self.__name__ + \"_oracle.qasm\"\n logger.info(\"Saving the oracle circuit to %s\"%oracle_circuit_path)\n with open(oracle_circuit_path, \"w\") as f:\n f.write(self.oracle.qasm())\n\n image_path = self.__name__ + \".png\"\n logger.info(\"Saving the oracle as an image at %s\"%(image_path))\n self.oracle.draw(filename=image_path, output=\"mpl\")\n\n logger.info(\"Checking if the oracle is correct:\")\n simulator = Simulator()\n oracle_sym_results = simulator.symbolic_simulation(self.circuit + self.oracle)\n\n oracle_sym_path = self.__name__ + \"_oracle_sym.log\"\n logger.info(\"Saving the oracle symbolic symulation at %s\"%(oracle_sym_path))\n with open(oracle_sym_path, \"w\") as f:\n for value in oracle_sym_results:\n f.write(value + \"\\n\")\n\n self.circuit = Grover([self.start, self.end, self.flags], self.ancillas).run(self.circuit, self.oracle, number_of_expected_results=4)\n\n self._add_measure_gates()\n\n logger.info(\"The final circuit has {} gates and is {} depth.\".format(self.circuit.size, self.circuit.depth))\n\n circuit_path = self.__name__ + \".qasm\"\n logger.info(\"Saving the circuit to %s\"%circuit_path)\n with open(circuit_path, \"w\") as f:\n f.write(self.circuit.qasm())\n\n logger.info(\"Starting a batch of %d simulations.\"%n_of_shots)\n\n if local:\n results = simulator.parallel_simulation(self.circuit, n_of_shots)\n else:\n results = simulator.distribuited_simulation(self.circuit, n_of_shots, [(\"::1\",10000,4,False), (\"169.254.75.69\",10001,8,True)])\n\n results_path = self.__name__ + \".json\"\n logger.info(\"Saving the results to %s\"%results_path)\n simulator.save_results(results_path)\n\n return self.get_MLE(results)\n","sub_path":"Minimum_Spanning_Tree/qiskit_implementation/weight_encoding/edge_finder/edge_finder.py","file_name":"edge_finder.py","file_ext":"py","file_size_in_byte":5553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"96712061","text":"from setuptools import setup, Extension\n\nimport os\nimport sys\nimport setuptools\nimport glob\n\n__version__ = '0.2.0'\n\nextra_compile_args_dict = {\n 'linux' : ['-w', '-ftemplate-backtrace-limit=0', '-std=c++14'],\n 'linux2' : ['-w', '-ftemplate-backtrace-limit=0', '-std=c++14'],\n 'darwin' : ['-w', '-ftemplate-backtrace-limit=0', '-std=c++14', '-stdlib=libc++'],\n}\n\next_modules = [\n Extension(\n \"_sparsepy\",\n glob.glob('src/*.cpp'),\n include_dirs = ['lib/parallel-hashmap', 'lib/pybind11/include', 'lib/cereal/include'],\n language = 'c++',\n extra_compile_args = extra_compile_args_dict[sys.platform],\n extra_link_args = ['-lz'],\n define_macros = [('DOCTEST_CONFIG_DISABLE', None)]\n )\n]\n\nsetup(\n name = 'sparsepy',\n version = __version__,\n author = 'Adam Moyer',\n author_email = 'atom.moyer@gmail.com',\n url = None,\n description = 'A Fast and Memory Efficient Hash Map for Python',\n packages = ['sparsepy'],\n package_dir={'sparsepy': 'sparsepy'},\n package_data={},\n ext_modules = ext_modules,\n install_requires = ['pytest', 'pytest-timeout', 'pytest-memprof', 'pybind11'],\n include_package_data=True,\n zip_safe = False,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"372926632","text":"class Solution:\n def fill(self, image: List[List[int]], sr: int, sc: int, newColor: int) -> List[List[int]]:\n neighbours=[[0,-1],[-1,0],[0,1],[1,0]]\n prevColor=image[sr][sc]\n image[sr][sc]=newColor\n cell=[sr,sc]\n for i in neighbours:\n r=cell[0]+i[0]\n c=cell[1]+i[1]\n if(r>=0 and r=0 and c List[List[int]]:\n if(image[sr][sc]==newColor):\n return image\n self.fill(image,sr,sc,newColor)\n return image\n \n \n \n","sub_path":"FloodFill.py","file_name":"FloodFill.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"450136482","text":"#!/usr/bin/python\nimport logging\nimport os.path\nimport sys\nfrom optparse import OptionParser\nimport pdb\n\n\npathname = os.path.dirname(sys.argv[0])\nsys.path.append(pathname)\n\nimport callMPsfunctions\n\n\n \ninOptions = OptionParser()\ninOptions.add_option(\"-i\", \"--vcffile\", dest=\"bsFile\", help=\"VCF file for methylation calls\", type=\"string\")\ninOptions.add_option(\"-o\", \"--output\", dest=\"outFile\", help=\"Output file with the probability scores\", type=\"string\")\ninOptions.add_option(\"-v\", \"--verbose\", dest=\"logDebug\", help=\"show verbose debugging output\", action=\"store_true\", default=False)\n(options, args) = inOptions.parse_args()\n\n\n\ncallMPsfunctions.setLog(options.logDebug)\n\n\nif not options.bsFile:\n callMPsfunctions.die(\"input file not given!\")\nif not options.outFile:\n callMPsfunctions.die(\"output file not given!\")\nif not os.path.isfile(options.bsFile):\n callMPsfunctions.die(\"input bs vcf file does not exist: \" + options.bsFile)\n\n\ncallMPsfunctions.getMPsVCF(options.bsFile, options.outFile, options.logDebug)\n\n\n\n\n\n\n\n","sub_path":"004.bsseq_pipeline/002.callMPs/01.callMPs_fromVCF.py","file_name":"01.callMPs_fromVCF.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"136328484","text":"from matplotlib import pyplot as plt\r\nimport numpy as np\r\n\r\n\r\ndef plot_rectangles(long_x, x_labels, resources, y_label, title, legends, save_direction=None):\r\n fig, ax = plt.subplots()\r\n l_s = len(resources)\r\n width = ((100/l_s)/100)-0.005 # el -0.005 es un ajuste para que las barras no se peguen\r\n ind = np.arange(long_x)\r\n rects = []\r\n w = 0\r\n colors = ['r', 'g', 'b', 'y', 'c', 'm']\r\n for i in range(l_s):\r\n rects.append(ax.bar(ind + w, resources[i], width, color=colors[i]))\r\n w += width\r\n ax.set_ylabel(y_label)\r\n ax.set_title(title)\r\n ax.set_xticks((ind + (width*l_s)/2))\r\n ax.set_xticklabels(x_labels)\r\n lab = []\r\n for c in rects:\r\n lab.append(c[0])\r\n ax.legend((lab), legends)\r\n\r\n def autolabel(rect):\r\n # attach some text labels\r\n for r in rect:\r\n height = r.get_height()\r\n ax.text(r.get_x() + r.get_width() / 2., 0.05 * height,\r\n '%.3f' % height,\r\n ha='center', va='bottom')\r\n\r\n for rec in rects:\r\n autolabel(rec)\r\n plt.show() if save_direction is None else plt.savefig(save_direction+'/means.png')\r\n\r\n\r\ndef plot_single_rec(long_x, x_labels, resources, y_label, title):\r\n fig, ax = plt.subplots()\r\n width = 0.25\r\n ind = np.arange(long_x)\r\n rects = ax.bar(ind, resources, width, color='r')\r\n ax.set_ylabel(y_label)\r\n ax.set_title(title)\r\n ax.set_xticks(ind + width)\r\n ax.set_xticklabels(x_labels)\r\n\r\n def autolabel(rect):\r\n # attach some text labels\r\n for r in rect:\r\n height = r.get_height()\r\n ax.text(r.get_x() + r.get_width() / 2., 1.05 * height,\r\n '%i' % height,\r\n ha='center', va='bottom')\r\n\r\n autolabel(rects)\r\n plt.show()","sub_path":"AnalysisCV/auxiliar/Plotters.py","file_name":"Plotters.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"67849664","text":"from basehandler import BaseHandler\nfrom storybooklib import MAX_PLAYERS\n\nclass WaitingToStart(BaseHandler):\n def get(self):\n if not self.user:\n self.render(u'login_screen')\n else:\n game_id = self.request.get('game_id')\n user_id = self.user.user_id\n self.render(u'waiting_to_start', game_id=game_id, MAX_PLAYERS=MAX_PLAYERS, user_id=user_id)\n\n return\n","sub_path":"waitingtostart.py","file_name":"waitingtostart.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"35007009","text":"class Phone():\n def __init__(self,brand,color,phone_number):\n self.brand=brand\n self.color=color\n self.number=phone_number\n self.on=False\n self.sended_messages=0\n def turn_on(self):\n self.on=True\n def turn_off(self):\n self.on=False\n def send_sms(self,other_number,sms):\n if self.on and len(other_number)>=9:\n print(\"Wiadomość została wysłana\")\n self.sended_messages+=1\n else:\n print(\"Włącz telefon lub wprowadź poprawny numer telefonu\") \n def __str__(self):\n if self.on:\n kom=\"włączony\"\n else:\n kom=\"wyłączony\" \n return (f\"Telefon marki {self.brand} o numerze {self.number} jest {kom} , wysłano {self.sended_messages} SMS \\n\") \ntel1=Phone(\"Samsung\",\"white\",\"123456789\") \ntel1.turn_on()\ntel1.send_sms(\"789456123\",\"Hi\")\nprint(tel1) \ntel1.turn_off()\ntel1=Phone(\"Apple\",\"black\",\"789456123\") \ntel1.turn_on()\ntel1.send_sms(\"78945\",\"Hi\")\nprint(tel1) \ntel1.turn_off()\n \n","sub_path":"07-ObjectOrientedProgramming/14_phone.py","file_name":"14_phone.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"298800611","text":"import numpy as np\nimport torch\nfrom ax.service.managed_loop import optimize\n\nfrom model import AutoEncoder\nfrom main import load_dataset\n\ndef train(dataloader, parameters, device):\n model = AutoEncoder(input_dim=1900, nlayers=parameters.get('nlayers', 5), latent=100)\n model = model.to(device)\n\n model.train()\n train_loss = 0\n\n optimizer = torch.optim.Adam(model.parameters(), lr=parameters.get('lr', 1e-5), \n weight_decay=parameters.get('weight_decay', 0.))\n loss_func = torch.nn.MSELoss()\n\n for epoch in range(parameters.get('epochs', 1000)):\n for index, (data, ) in enumerate(dataloader, 1):\n optimizer.zero_grad()\n output = model(data)\n loss = loss_func(output, data)\n train_loss += loss.item()\n loss.backward()\n optimizer.step()\n\n return model\n\ndef test(dataloader, model):\n model.eval()\n test_loss = 0\n\n loss_func = torch.nn.MSELoss()\n\n for index, (data, ) in enumerate(dataloader, 1):\n with torch.no_grad():\n output = model(data)\n loss = loss_func(output, data)\n test_loss += loss.item()\n\n return test_loss / index\n\ndef train_test(parameterization):\n dtype = torch.float\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n batch_size = 100\n train_dataloader, test_dataloader = load_dataset('data/aponc_sda.npz', batch_size, device)\n\n net = train(train_dataloader, parameterization, device)\n\n return test(test_dataloader, net)\n\ndef tune():\n best_parameters, values, experiment, model = optimize(\n parameters=[\n {'name': 'lr', 'type': 'range', 'bounds': [1e-6, 0.4], 'log_scale': True},\n {'name': 'weight_decay', 'type': 'range', 'bounds': [0.0, 1.0], 'log_scale': False},\n {'name': 'nlayers', 'type': 'range', 'bounds': [2, 6], 'log_scale': False},\n #{'name': 'momentum', 'type': 'range', 'bounds': [0.0, 1.0]},\n ],\n evaluation_function=train_test,\n objective_name='mse_loss',\n )\n\n print(best_parameters)\n print('means, covariances', values)\n\n return experiment\n\ndef best(experiment):\n df = experiment.fetch_data().df\n best_arm_name = df.arm_name[df['mean'] == df['mean'].min()].values[0]\n best_arm = experiment.arms_by_name[best_arm_name]\n\n print(best_arm)\n\n dtype = torch.float\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n batch_size = 100\n train_dataloader, test_dataloader = load_dataset('data/aponc_sda.npz', batch_size, device)\n\n combined_train_test_set = torch.utils.data.ConcatDataset([\n train_dataloader.dataset, \n test_dataloader.dataset,\n ])\n\n combined_train_test_loader = torch.utils.data.DataLoader(\n combined_train_test_set, \n batch_size=batch_size, \n shuffle=True,\n )\n\n net = train(train_dataloader, best_arm.parameters, device)\n\n test_mse_loss = test(test_dataloader, net)\n\n print('MSE loss (test set): %f' % (test_mse_loss))\n\ndef main():\n torch.manual_seed(123)\n\n experiment = tune()\n best(experiment)\n\nif __name__ == '__main__':\n main()\n","sub_path":"tune.py","file_name":"tune.py","file_ext":"py","file_size_in_byte":3161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"407409006","text":"\"\"\"Each ListNode holds a reference to its previous node\nas well as its next node in the List.\"\"\"\nclass ListNode:\n def __init__(self, value, prev=None, next=None):\n self.value = value\n self.prev = prev\n self.next = next\n\n \"\"\"Wrap the given value in a ListNode and insert it\n after this node. Note that this node could already\n have a next node it is point to.\"\"\"\n def insert_after(self, value):\n current_next = self.next\n self.next = ListNode(value, self, current_next)\n if current_next:\n current_next.prev = self.next\n\n \"\"\"Wrap the given value in a ListNode and insert it\n before this node. Note that this node could already\n have a previous node it is point to.\"\"\"\n def insert_before(self, value):\n current_prev = self.prev\n self.prev = ListNode(value, current_prev, self)\n if current_prev:\n current_prev.next = self.prev\n\n \"\"\"Rearranges this ListNode's previous and next pointers\n accordingly, effectively deleting this ListNode.\"\"\"\n def delete(self):\n if self.prev:\n self.prev.next = self.next\n if self.next:\n self.next.prev = self.prev\n\n\n\"\"\"Our doubly-linked list class. It holds references to\nthe list's head and tail nodes.\"\"\"\nclass DoublyLinkedList:\n def __init__(self, node=None):\n self.head = node\n self.tail = node\n self.length = 1 if node is not None else 0\n\n def __len__(self):\n return self.length\n\n \"\"\"Wraps the given value in a ListNode and inserts it \n as the new head of the list. Don't forget to handle \n the old head node's previous pointer accordingly.\"\"\"\n def add_to_head(self, value):\n # creating node\n new_node = ListNode(value)\n # add value reflecting # of items in LL\n self.length += 1\n # link node to head if list is empty. Becomes both head and tail\n if not self.head and not self.tail:\n self.head = new_node\n self.tail = new_node\n # list is populated\n else:\n # update the locations of head and tail\n # current head is being linked to new_head\n new_node.next = self.head\n # updating old head to have a prev link\n self.head.prev = new_node\n # updating new head to new-node\n self.head = new_node\n\n \"\"\"Removes the List's current head node, making the\n current head's next node the new head of the List.\n Returns the value of the removed Node.\"\"\"\n def remove_from_head(self):\n # store value before we delete the node, so it can be returned\n value = self.head.value\n # delete head\n self.delete(self.head)\n # return value of deleted node\n return value\n\n \"\"\"Wraps the given value in a ListNode and inserts it \n as the new tail of the list. Don't forget to handle \n the old tail node's next pointer accordingly.\"\"\"\n def add_to_tail(self, value):\n new_node = ListNode(value)\n # add value reflecting # of items in LL\n self.length += 1\n # link node to head if list is empty. Becomes both head and tail\n if not self.head and not self.tail:\n self.head = new_node\n self.tail = new_node\n # list is populated\n else:\n # update the locations of head and tail\n # opposite of add_new_head\n # the prev-prop of new node will be linking to previous tail\n new_node.prev = self.tail\n # current tail's next-prop will link to new node being added\n self.tail.next = new_node\n # updating new tail to new-node\n self.tail = new_node\n\n \"\"\"Removes the List's current tail node, making the \n current tail's previous node the new tail of the List.\n Returns the value of the removed Node.\"\"\"\n def remove_from_tail(self):\n # opposite of remove from head\n value = self.tail.value\n self.delete(self.tail)\n return value\n\n \"\"\"Removes the input node from its current spot in the \n List and inserts it as the new head node of the List.\"\"\"\n def move_to_front(self, node):\n # self refers to list\n # node is value passed in as arg\n # -----\n # don't perform if node already is head\n if node is self.head:\n return\n # store node value so is safe to delete and available for adding to head\n value = node.value\n # delete node from current location\n self.delete(node)\n # add to head\n self.add_to_head(value)\n\n \"\"\"Removes the input node from its current spot in the \n List and inserts it as the new tail node of the List.\"\"\"\n def move_to_end(self, node):\n # opposite of move_to_front \n if node is self.tail:\n return\n value = node.value\n self.delete(node)\n self.add_to_tail(value)\n\n \"\"\"Removes a node from the list and handles cases where\n the node was the head or the tail\"\"\"\n def delete(self, node):\n # TODO: Catch errors if list is empty or node is not in list\n\n # assuming node is in list\n # reduce # of items in DLL\n self.length -= 1\n # if head & tail, ass\n if self.head is self.tail:\n self.head = None\n self.tail = None\n # if head\n elif node is self.head:\n # next node after head becomes new head\n self.head = self.head.next\n node.delete()\n\n # if tail\n # opposite of head\n elif node is self.tail: \n self.tail = self.tail.prev\n node.delete()\n\n # if regular node, call existing delete function\n else:\n node.delete()\n \"\"\"Returns the highest value currently in the list\"\"\"\n def get_max(self):\n # start at head\n if not self.head:\n return None\n # store the head.value in max_val\n max_value = self.head.value\n # create a var for iteration, beginning with first value (head node)\n current = self.head\n # iterate through each node and compare\n while current:\n if current.value > max_value:\n max_value = current.value\n current = current.next\n\n return max_value\n \n # if i > max_val, max_val = i\n # return max_val\n\n\n","sub_path":"doubly_linked_list/doubly_linked_list.py","file_name":"doubly_linked_list.py","file_ext":"py","file_size_in_byte":6388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"236785941","text":"\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import pyplot as plt \n\nimport sys\nimport time\nimport os \nimport pathlib\nimport pickle\n\nCWD = pathlib.Path.cwd()\n\nsys.path.append('../../ml4seas/')\n\n# scipy \nimport numpy as np \nimport pandas as pd \nimport xarray as xr\n\n# tensorflow and keras \nimport tensorflow as tf\n\ntf.debugging.set_log_device_placement(True)\n\nimport tensorflow.keras as keras\nfrom tensorflow.keras.layers import *\nfrom tensorflow.keras.models import Model\nimport tensorflow.keras.backend as K\n\n# import NN utilities \nfrom NN import *\n\n# checks if the GPU is available \nif len(tf.config.list_physical_devices('GPU')) >= 1: \n compute = 'GPU'\nelse: \n compute = 'CPU'\n\nprint(f\"using the {compute}\")\n\n# ----------------------------------------------------------------------------\n# define parameters here \nbatch_size=32\npadd = 8\ninput_shape = (181, 360, 1) # last for the number of channels \nresize_shape = (176, 360) # to be evenly divided by the padd\nn_epochs = 20 # number of epochs \ndpath = pathlib.Path('/media/nicolasf/END19101/data/GCMs/processed/hindcasts/CDS/ECMWF/T2M/')\n# ----------------------------------------------------------------------------\n\n### list the files \nlfiles = list(dpath.glob(\"ECMWF_T2M_seasonal_anomalies_????_??.nc\"))\nlfiles.sort()\n\n# opens the dataset \ndset = xr.open_mfdataset(lfiles, concat_dim='time', combine='nested', parallel=True)\n\n### selects the training set \ndset_train = dset.sel(time=slice('1993','2010'))\n\n### selects the validation set \ndset_val = dset.sel(time=slice('2011',None))\n\n### select the correct lead time (3 = e.g. SON for A initialisation)\ndset_train = dset_train[['t2m']].sel(step=3)\ndset_val = dset_val[['t2m']].sel(step=3)\n\ndset_train = dset_train.stack(instance=('time','member'))\ndset_val = dset_val.stack(instance=('time','member'))\n\n### get the repeated datetimes (will be useful to sample repeatedly in Yds)\n\nrdatetimes_train = dset_train.indexes[\"instance\"].get_level_values(0)\nrdatetimes_val = dset_val.indexes[\"instance\"].get_level_values(0)\n\n# transpose \ndset_train = dset_train.transpose('instance','lat','lon')\ndset_val = dset_val.transpose('instance','lat','lon')\n\n### Generate data for tensorflow \ndata_train = XrDataGenerator(dset_train, dset_train, {'t2m':None}, 't2m', norm=True, batch_size=batch_size, mean=None, std=None, shuffle=True, load=False)\ndata_val = XrDataGenerator(dset_val, dset_val, {'t2m':None}, 't2m', norm=True, batch_size=batch_size, mean=data_train.mean, std=data_train.std, shuffle=True, load=False)\n\n# ----------------------------------------------------------------------------\n### build the model \n\n# encoder \n\n# Input placeholder\noriginal = Input(shape=input_shape)\n\n# Resize to have dimensions divisible by 8\nresized = ResizeLayer(newsize=(176,360))(original)\n\n# # Wrap-around in longitude for periodic boundary conditions\n\npadded = PeriodicPadding2D(padd)(resized)\n\n# Encoding layers\nx = Conv2D(16, (3, 3), padding='same')(padded)\nx = LeakyReLU()(x)\nx = MaxPooling2D((2, 2), padding='same')(x)\nx = Conv2D(8, (3, 3), padding='same')(x)\nx = LeakyReLU()(x)\nx = MaxPooling2D((2, 2), padding='same')(x)\nx = Conv2D(8, (3, 3), padding='same')(x)\nx = LeakyReLU()(x)\n\nencoded = MaxPooling2D((2, 2), padding='same')(x)\n\n### decoder \n\n# Decoding layers\nx = Conv2D(8, (3, 3), padding='same')(encoded)\nx = LeakyReLU()(x)\nx = UpSampling2D((2, 2))(x)\nx = Conv2D(8, (3, 3), padding='same')(x)\nx = LeakyReLU()(x)\nx = UpSampling2D((2, 2))(x)\nx = Conv2D(16, (3, 3), padding='same')(x)\nx = LeakyReLU()(x)\nx = UpSampling2D((2, 2))(x)\ndecoded = Conv2D(1, (3, 3), padding='same')(x)\n\n# Strip the longitude wrap-around\npruned = PrunePeriodicPadding2D(padd)(decoded)\n\noutsize = ResizeLayer(newsize=input_shape[:2])(pruned)\n\nautoencoder = Model(original,outsize)\n\n### run ID \nrun_id = time.strftime(\"run_%Y_%m_%d-%H_%M_%S\")\n\n### ----------------------------------------------------------------------------\n### callbacks \n\n# checkpoints \n\ncheckpoint_cb = keras.callbacks.ModelCheckpoint(f\"./autoencoder_checkpoint_{run_id}_{compute}.h5\", save_best_only=True)\n\n# early stopping \n\nearly_stopping_cb = keras.callbacks.EarlyStopping(patience=10, restore_best_weights=True)\n\nroot_logdir = os.path.join(os.curdir, \"my_logs\")\n\ndef get_run_logdir(run_id):\n return os.path.join(root_logdir, run_id)\n\nrun_logdir = get_run_logdir(run_id)\n\nkeras.backend.clear_session()\nnp.random.seed(42)\ntf.random.set_seed(42)\n\n# tensorboard callback \ntensorboard_cb = keras.callbacks.TensorBoard(run_logdir, profile_batch=0)\n\n### ---------------------------------------------------------------------------------------------------------------------\n### compile \n\nautoencoder.compile(optimizer='adam', loss='mean_squared_error')\n\nautoencoder.summary(line_length=120)\n\n### ---------------------------------------------------------------------------------------------------------------------\n### fit \n\n# ### Note: run tensorboard with: \n# \n# ```\n# tensorboard --logdir=./my_logs --port=6006\n# ```\n\n\nhistory = autoencoder.fit(data_train, validation_data=data_val, epochs=n_epochs, callbacks=[checkpoint_cb, early_stopping_cb, tensorboard_cb])\n\n### ---------------------------------------------------------------------------------------------------------------------\n### save model \n\nsaved_model = CWD / f\"saved_autoencoder_{run_id}_{n_epochs}_epochs_{compute}\" \n\nkeras.models.save_model(autoencoder, saved_model)\n\n### save history \nsaved_history = CWD / f\"saved_history_{run_id}_{n_epochs}_epochs_{compute}.pkl\" \n\npickle.dump(autoencoder.history.history, open(saved_history, \"wb\"))\n\n### ---------------------------------------------------------------------------------------------------------------------\n### Some plots \n\ni = 10\n\nX = data_val[0][0][i:i+1,:,:,:]\n\npred = autoencoder.predict(X)\n\npred = pred.squeeze()\n\nf, axes = plt.subplots(nrows=2, figsize=(10,16))\n\naxes = axes.flatten() \n\nax = axes[0]\n\nim = ax.imshow(data_val[0][0][i,::-1,:,0], vmin=-5, vmax=5, cmap=plt.cm.RdBu_r)\n\nax = axes[1]\n\nim = ax.imshow(pred[::-1,:], vmin=-5, vmax=5, cmap=plt.cm.RdBu_r) \n\nf.savefig(f'./preds_vs_inputs_{run_id}.png', dpi=200, bbox_inches='tight')\n\nf, ax = plt.subplots()\npd.DataFrame(history.history).plot(ax=ax, marker='o')\nax.grid(ls=':')\n\nf.savefig(f'./history_{run_id}.png', dpi=200, bbox_inches='tight')","sub_path":"features-extractors/dimensionality_reduction/CONV_AE/Convolutional_AE.py","file_name":"Convolutional_AE.py","file_ext":"py","file_size_in_byte":6292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"78666593","text":"# -*- coding: utf-8 -*-\nfrom openerp import api, fields, models\n\nclass WebsiteSupportTicketInheritTimesheets(models.Model):\n\n _inherit = \"website.support.ticket\"\n \n timesheet_ids = fields.One2many('website.support.ticket.timesheet', 'wst_id', string=\"Timesheet\")\n\n @api.multi\n def invoice_client(self):\n self.ensure_one()\n\n invoiced_state = self.env['ir.model.data'].sudo().get_object('website_support_timesheets', 'website_ticket_state_invoiced')\n self.state = invoiced_state\n \n invoice_account = self.env['account.account'].search([('user_type_id', '=', self.env.ref('account.data_account_type_receivable').id)], limit=1).id\n new_invoice = self.env['account.invoice'].create({'name': '', 'type': 'out_invoice', 'partner_id': self.partner_id.id, 'account_id': invoice_account, 'comment': 'Support Ticket #' + str(self.id) + \" \" + self.subject.encode(\"UTF-8\") })\n \n for timesheet in self.timesheet_ids:\n time_string = \"\"\n \n if timesheet.hours == 1:\n time_string += \"1 hour\"\n else:\n time_string += str(timesheet.hours) + \" hours\"\n\n time_string += \" and \"\n\n if timesheet.minutes == 1:\n time_string += \"1 minute\"\n else:\n time_string += str(timesheet.minutes) + \" minutes\" \n \n new_invoice.invoice_line_ids.create({'invoice_id': new_invoice.id, 'name': 'Support Ticket Service (' + time_string + ')', 'account_id': invoice_account, 'price_unit': '0'})\n \n return {\n\t 'name':\"Support Ticket Invoice\",\n\t 'view_mode': 'form',\n\t 'view_type': 'form',\n\t 'res_model': 'account.invoice',\n\t 'type': 'ir.actions.act_window',\n\t 'res_id': new_invoice.id,\n\t }\n \nclass WebsiteSupportTicketTimesheet(models.Model):\n\n _name = \"website.support.ticket.timesheet\"\n \n wst_id = fields.Many2one('website.support.ticket', string=\"Support Ticket\")\n hours = fields.Integer(string=\"Hours\")\n minutes = fields.Integer(sring=\"Minutes\")\n project_id = fields.Many2one('project.project', string=\"Project\")","sub_path":"website_support_timesheets/models/website_support_ticket.py","file_name":"website_support_ticket.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"532619402","text":"import glob\nimport os\nimport cv2\nimport torch\nfrom albumentations import Compose, Resize,Lambda\nimport segmentation_models_pytorch as smp\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"\"\n\n# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nLOAD_MODEL_DEPLOY_PATH = \"./model_ear/best_model_ear_v1_43.pth\"\nENCODER = 'resnet18'\nENCODER_WEIGHTS = 'imagenet'\nCLASSES = ['ear']\nACTIVATION = 'sigmoid'\nDEVICE = \"cpu\"\n\ndef get_validation_augmentation():\n \"\"\"Add paddings to make image shape divisible by 32\"\"\"\n test_transform = [\n Resize(height=320, width=480, always_apply=True),\n ]\n return Compose(test_transform)\n\ndef to_tensor(x, **kwargs):\n return x.transpose(2, 0, 1).astype('float32')\n\ndef get_preprocessing(preprocessing_fn):\n \"\"\"Construct preprocessing transform\n Args:\n preprocessing_fn (callbale): data normalization function \n (can be specific for each pretrained neural network)\n Return:\n transform: albumentations.Compose\n \"\"\"\n _transform = [\n Lambda(image=preprocessing_fn),\n Lambda(image=to_tensor),\n ]\n return Compose(_transform)\n\nif __name__ == \"__main__\":\n\n # create segmentation model with pretrained encoder\n model = smp.Unet(\n encoder_name=ENCODER,\n encoder_weights=ENCODER_WEIGHTS,\n classes=len(CLASSES),\n activation=ACTIVATION,\n )\n\n model = torch.load(LOAD_MODEL_DEPLOY_PATH, map_location=DEVICE)\n model.eval()\n model.to(DEVICE)\n \n preprocessing_fn = smp.encoders.get_preprocessing_fn(ENCODER, ENCODER_WEIGHTS)\n preprocessing = get_preprocessing(preprocessing_fn)\n\n data_dir = [\"./test-images\"]\n\n data_samples = []\n for _dir in data_dir:\n # JPEG\n _list_tif = glob.glob(_dir + '/*.jpg')\n data_samples += _list_tif\n\n for path in data_samples:\n\n img = cv2.imread(path)\n img = cv2.resize(img, (480,320))\n h, w = img.shape[:2]\n image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n with torch.no_grad():\n \n #tensor_img = my_transforms(image=image)['image'].unsqueeze(0)\n #predictions = model.forward(tensor_img.to(DEVICE))\n \n sample = preprocessing(image=image)\n image = sample['image']\n\n x_tensor = torch.from_numpy(image).to(DEVICE).unsqueeze(0)\n \n pr_mask = model.predict(x_tensor)\n pr_mask = (pr_mask.squeeze().cpu().numpy().round())\n \n cv2.imshow('Ear Mask',pr_mask)\n cv2.imshow('Ear Image',img)\n cv2.waitKey(0)\n\n cv2.destroyAllWindows()\n","sub_path":"Deploy_ear_segmentation_image.py","file_name":"Deploy_ear_segmentation_image.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"262767637","text":"from io import BytesIO\nfrom PIL import Image\n\n\nclass CropShrinkImageMixin:\n\n def shrink_image(self, field_name, resize_shape):\n img: Image = Image.open(getattr(self, field_name))\n img.thumbnail(self.get_shrinked_size(field_name, resize_shape), Image.ANTIALIAS)\n image_file = BytesIO()\n img.save(image_file, 'png')\n getattr(self, field_name).file = image_file\n\n def get_shrinked_size(self, field_name, resize_shape):\n actual_img_width, actual_img_height = getattr(self, field_name).width, getattr(self, field_name).height\n ratio = min(resize_shape[0] / actual_img_width, resize_shape[1] / actual_img_height)\n return int(actual_img_width * ratio), int(actual_img_height * ratio)\n\n def crop_image(self, field_name, resize_shape):\n img: Image = Image.open(getattr(self, field_name))\n new_width = resize_shape[0]\n new_height = resize_shape[1]\n left = (img.width - new_width) / 2\n top = (img.height - new_height) / 2\n right = (img.width + new_width) / 2\n bottom = (img.height + new_height) / 2\n img = img.crop((left, top, right, bottom))\n image_file = BytesIO()\n img.save(image_file, format='png')\n getattr(self, field_name).file = image_file\n","sub_path":"blog/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"641573321","text":"from __future__ import unicode_literals\n\nfrom uuid import uuid4\n\nfrom carteblanche.mixins import NounView\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponseRedirect\nfrom django.views.generic.base import TemplateView\nfrom django.views.generic.edit import FormView\nfrom django.views.generic.list import ListView\n\n# from core.verbs import NounView\nimport core.models as cm\nimport core.forms as cf\nfrom actstream import action\nimport actstream.models as am\nfrom django.contrib.auth import login, logout\nimport decimal\nimport forms_builder.forms.models as fm\nfrom django.views.generic.edit import CreateView, UpdateView\nimport datetime, time\nfrom dateutil.relativedelta import relativedelta\nfrom django.utils import timezone\n\nfrom utils.utils import retrieve_leaf_dimensions\n\n# do weird stuff to mAake user names nou usernames show up\n\n\ndef user_new_unicode(self):\n return self.get_full_name()\n\n\n# Replace the __unicode__ method in the User class with out new implementation\nUser.__unicode__ = user_new_unicode\n\n\ndef decimal_default(obj):\n if isinstance(obj, decimal.Decimal):\n return float(obj)\n\n\nclass SiteRootView(NounView):\n def get_noun(self, **kwargs):\n siteroot = cm.SiteRoot()\n return siteroot\n\n\nclass MessageView(SiteRootView, TemplateView):\n template_name = 'base/messages.html'\n message = 'Message goes here.'\n\n def get_context_data(self, **kwargs):\n # Call the base implementation first to get a context\n context = super(MessageView, self).get_context_data(**kwargs)\n # Add in a QuerySet of all the books\n context['message'] = self.message\n return context\n\n\nclass LandingView(SiteRootView, TemplateView):\n template_name = 'base/bootstrap.html'\n\n def get(self, request, **kwargs):\n # if the user has no payment methods, redirect to the view where one can be created\n if request.user.is_authenticated():\n return HttpResponseRedirect(reverse(viewname='location_list', current_app='core'))\n else:\n return super(LandingView, self).get(request, **kwargs)\n\n logout(self.request)\n\n\nclass BootstrapView(TemplateView):\n template_name = 'grid.html'\n\n\nclass AjaxableResponseMixin(object):\n \"\"\"\n Mixin to add AJAX support to a form.\n Must be used with an object-based FormView (e.g. CreateView)\n \"\"\"\n\n def render_to_json_response(self, context, **response_kwargs):\n data = json.dumps(context)\n response_kwargs['content_type'] = 'application/json'\n return HttpResponse(data, **response_kwargs)\n\n def form_invalid(self, form):\n response = super(AjaxableResponseMixin, self).form_invalid(form)\n if self.request.is_ajax():\n return self.render_to_json_response(form.errors, status=400)\n else:\n return response\n\n def form_valid(self, form):\n # We make sure to call the parent's form_valid() method because\n # it might do some processing (in the case of CreateView, it will\n # call form.save() for example).\n response = super(AjaxableResponseMixin, self).form_valid(form)\n if self.request.is_ajax():\n data = {\n 'pk': self.noun.pk,\n }\n return self.render_to_json_response(data)\n else:\n return response\n\n\nclass UserCreateView(SiteRootView, FormView):\n model = User\n template_name = 'base/form.html'\n form_class = cf.RegistrationForm\n\n def form_valid(self, form):\n user = User.objects.create_user(uuid4().hex[:30], form.cleaned_data['email'],\n form.cleaned_data['password1'])\n user.first_name = form.cleaned_data['first_name']\n user.last_name = form.cleaned_data['last_name']\n user.save()\n self.object = user\n locations = form.cleaned_data['locations']\n for l in locations:\n l.members.add(user)\n l.save()\n return super(UserCreateView, self).form_valid(form)\n\n def get_success_url(self):\n action.send(self.request.user, verb='created user', action_object=self.object,\n target=self.request.user)\n return reverse(viewname='make_new_user', current_app='core')\n\n def get_success_message(self, cleaned_data):\n first_name = cleaned_data['first_name']\n last_name = cleaned_data['last_name']\n locations = list(cleaned_data['locations'])\n location_names = \"\"\n if len(locations) > 0:\n for l in locations[:-1]:\n location_names += l.title + \", \"\n location_names += \" and \" + locations[-1].title + \".\"\n else:\n location_names = \"no locations.\"\n return first_name + \" \" + last_name + \" now has an account. They are assigned to \" + location_names + \" Make another new user or return to the indicator.\"\n\n\nclass ProgressListView(SiteRootView, TemplateView):\n template_name = 'base/progress.html'\n\n def get_context_data(self, **kwargs):\n # Call the base implementation first to get a context\n context = super(ProgressListView, self).get_context_data(**kwargs)\n # Add in a QuerySet of all the books\n context['locations'] = cm.Location.objects.filter(\n title__icontains=self.kwargs['tag']).order_by('title')\n return context\n\n\nclass UserLoginView(SiteRootView, FormView):\n template_name = 'base/form.html'\n form_class = cf.LoginForm\n success_url = '/'\n\n def form_valid(self, form):\n user = form.user_cache\n login(self.request, user)\n form.instance = user\n\n if self.request.is_ajax():\n context = {\n 'status': 'success',\n 'userid': user.id,\n 'sessionid': self.request.session.session_key\n }\n data = json.dumps(context, default=decimal_default)\n out_kwargs = {'content_type': 'application/json'}\n return HttpResponse(data, **out_kwargs)\n else:\n return super(UserLoginView, self).form_valid(form)\n\n def form_invalid(self, form):\n response = super(UserLoginView, self).form_invalid(form)\n if self.request.is_ajax():\n return self.render_to_json_response({\"errors\": form.errors, \"status\": \"failure\",})\n else:\n return response\n\n def render_to_json_response(self, context, **response_kwargs):\n data = json.dumps(context)\n response_kwargs['content_type'] = 'application/json'\n return HttpResponse(data, **response_kwargs)\n\n\nclass UserView(NounView):\n def get_noun(self, **kwargs):\n user = User.objects.get(id=self.kwargs['pk'])\n coreuser = cm.CoreUser(user)\n user.required_verbs = coreuser.verb_classes\n user.get_verbs = coreuser.get_verbs\n user.get_available_verbs = coreuser.get_available_verbs\n user.conditions = coreuser.conditions\n return user\n\n\nclass UserDetailView(UserView, TemplateView):\n model = User\n template_name = 'base/bootstrap.html'\n\n\nclass UserPasswordResetView(UserView, FormView):\n model = User\n template_name = 'base/form.html'\n form_class = cf.PasswordResetForm\n\n def form_valid(self, form):\n user = User.objects.get(id=self.kwargs['pk'])\n password = form.cleaned_data['password1']\n user.set_password(password)\n user.save()\n return super(UserPasswordResetView, self).form_valid(form)\n\n def get_success_url(self):\n return reverse(viewname='user_list', current_app='core')\n\n def get_success_message(self, cleaned_data):\n return \"Password reset.\"\n\n\nclass UserLogoutView(SiteRootView, TemplateView):\n template_name = 'bootstrap.html'\n\n def get(self, request, **kwargs):\n # if the user has no payment methods, redirect to the view where one can be created\n logout(self.request)\n return HttpResponseRedirect(reverse(viewname='location_list', current_app='core'))\n\n\nclass UserListView(SiteRootView, TemplateView):\n template_name = 'user/list.html'\n\n def get_context_data(self, **kwargs):\n context = super(UserListView, self).get_context_data(**kwargs)\n users = User.objects.filter(is_active=True).order_by('first_name', 'last_name')\n context['users'] = users\n locationusers = []\n for u in users:\n u.locations_volatile = u.location_set.all().order_by('title')\n locationusers.append(u)\n context['locationusers'] = locationusers\n return context\n\n\nfrom django.views.generic.edit import DeleteView\nfrom django.core.urlresolvers import reverse_lazy\n\n\nclass UserDeactivateView(UserView, DeleteView):\n model = User\n template_name = 'user/deactivate.html'\n success_url = reverse_lazy('user_list')\n\n def delete(self, request, *args, **kwargs):\n \"\"\"\n Replaces the delete() method, deactivates the user instead\n \"\"\"\n self.object = self.get_object()\n success_url = self.get_success_url()\n self.object.is_active = False\n self.object.save()\n return HttpResponseRedirect(success_url)\n\n\nclass UserUpdateView(UserView, UpdateView):\n model = User\n template_name = 'base/form.html'\n\n def get_form_class(self):\n return cf.get_user_form_class(self.get_noun())\n\n def form_valid(self, form):\n user = self.get_noun()\n new_locations = form.cleaned_data['locations']\n current_locations = user.location_set.all()\n for l in current_locations:\n if l not in new_locations:\n l.members.remove(user)\n for l in new_locations:\n if l not in current_locations:\n l.members.add(user)\n return super(UserUpdateView, self).form_valid(form)\n\n def get_success_url(self):\n return reverse('user_list')\n\n\nclass LocationCreateView(SiteRootView, CreateView):\n model = cm.Location\n template_name = 'base/form.html'\n fields = '__all__'\n form_class = cf.LocationForm\n\n def get(self, request, *args, **kwargs):\n supes = super(LocationCreateView, self).get(request, *args, **kwargs)\n context = self.get_context_data(**kwargs)\n if self.request.is_ajax():\n data = json.dumps(context, default=decimal_default)\n out_kwargs = {'content_type': 'application/json'}\n return HttpResponse(data, **out_kwargs)\n\n return supes\n\n def get_success_url(self):\n action.send(self.request.user, verb='created location', action_object=self.object)\n return reverse(viewname='location_detail', args=(self.object.id,), current_app='core')\n\n\nclass LocationListView(SiteRootView, TemplateView):\n model = cm.Location\n template_name = 'overview/map.html'\n\n def get_context_data(self, **kwargs):\n context = super(LocationListView, self).get_context_data(**kwargs)\n\n output = []\n if self.request.user.is_staff:\n locations = cm.Location.objects.all().order_by('title')\n else:\n locations = self.request.user.location_set.all()\n\n if self.request.is_ajax():\n for l in locations:\n blob = {\n 'id': l.id,\n 'lattitude': l.position.latitude,\n 'longitude': l.position.longitude,\n 'title': l.title,\n 'indicator_ids': l.get_indicator_ids()\n }\n output.append(blob)\n context['locations'] = output\n else:\n context['locations'] = locations\n dimensions_qs = cm.Dimension.objects.select_related('parent') \\\n .all().order_by('name')\n # '#' stands for no parent(root) in jstree plugin\n context['dimensions'] = map(lambda obj: dict(\n id=obj.id,\n parent=obj.parent.id if obj.parent else '#',\n text=obj.name,\n name=obj.name, # alias\n icon='no-icon' # class for avoiding icon\n ), dimensions_qs)\n context['stream'] = []\n # context['stream'] = am.Action.objects.all()[:40]\n return context\n\n def get(self, request, *args, **kwargs):\n if self.request.is_ajax():\n context = self.get_context_data(**kwargs)\n data = json.dumps(context, default=decimal_default)\n out_kwargs = {'content_type': 'application/json'}\n return HttpResponse(data, **out_kwargs)\n\n return super(LocationListView, self).get(request, *args, **kwargs)\n\n\nclass PlainLocationListView(SiteRootView, TemplateView):\n model = cm.Location\n template_name = 'overview/map.html'\n\n def get_context_data(self, **kwargs):\n context = super(PlainLocationListView, self).get_context_data(**kwargs)\n dimension_id = self.request.GET.get('dimension', None)\n output = []\n if self.request.user.is_staff:\n locations = cm.Location.objects.all()\n else:\n locations = self.request.user.location_set.all()\n\n if dimension_id:\n locations = locations.filter(dimensionpath__dimension=dimension_id)\n\n locations = locations.order_by('title')\n\n if self.request.is_ajax():\n for l in locations:\n blob = {\n 'id': l.id,\n 'lattitude': l.position.latitude if l.position else '0',\n 'longitude': l.position.longitude if l.position else '0',\n 'title': l.title,\n }\n output.append(blob)\n context['locations'] = output\n else:\n context['locations'] = locations\n context['stream'] = []\n # context['stream'] = am.Action.objects.all()[:40]\n return context\n\n def get(self, request, *args, **kwargs):\n if self.request.is_ajax():\n context = self.get_context_data(**kwargs)\n data = json.dumps(context, default=decimal_default)\n out_kwargs = {'content_type': 'application/json'}\n return HttpResponse(data, **out_kwargs)\n\n return super(PlainLocationListView, self).get(request, *args, **kwargs)\n\n\nclass LocationListStreamView(SiteRootView, ListView):\n model = am.Action\n template_name = 'overview/map.html'\n paginate_by = 10\n context_object_name = 'stream'\n queryset = am.Action.objects.all().select_related('actor', 'action_object', 'target')\n\n\nclass LocationView(NounView):\n def get_noun(self, **kwargs):\n return cm.Location.objects.get(id=self.kwargs['pk'])\n\n def get_context_data(self, **kwargs):\n context = super(LocationView, self).get_context_data(**kwargs)\n context[\"background_image_url\"] = self.get_noun().get_background_image_url()\n return context\n\n\nclass LocationUpdateView(LocationView, UpdateView):\n model = cm.Location\n template_name = 'base/form.html'\n success_url = '/'\n form_class = cf.LocationForm\n\n def get_success_url(self):\n action.send(self.request.user, verb='updated location', action_object=self.get_noun())\n return reverse(viewname='location_detail', args=(self.noun.id,), current_app='core')\n\n\nclass LocationDetailView(LocationView, TemplateView):\n model = cm.Location\n template_name = 'location/detail.html'\n\n def get_context_data(self, **kwargs):\n context = super(LocationDetailView, self).get_context_data(**kwargs)\n most_recent_image = self.noun.get_most_recent_image()\n if most_recent_image != None:\n context[\"most_recent_image_url\"] = most_recent_image.get_file_url()\n # context[\"stream\"] = self.noun.get_action_stream()[:40]\n context['stream'] = []\n return context\n\n\nclass LocationDetailStreamView(LocationView, ListView):\n model = am.Action\n template_name = 'location/detail.html'\n paginate_by = 10\n context_object_name = 'stream'\n\n def get_queryset(self, **kwargs):\n return self.noun.get_action_stream().select_related('actor', 'action_object', 'target')\n\n\nclass LocationPhotoListView(LocationView, ListView):\n template_name = 'location/photos.html'\n model = cm.Image\n paginate_by = 5\n\n def get_queryset(self):\n return self.get_noun().images.all()\n\n\nclass LocationIndicatorListlView(LocationView, TemplateView):\n model = cm.Location\n template_name = 'location/indicators.html'\n\n def get_context_data(self, **kwargs):\n context = super(LocationIndicatorListlView, self).get_context_data(**kwargs)\n # context['stream'] = self.noun.get_action_stream()[:40]\n context['stream'] = []\n context['indicators'] = self.noun.indicators.all().order_by('form_number', 'title')\n context['ILLEGAL_FIELD_LABELS'] = cm.ILLEGAL_FIELD_LABELS\n return context\n\n def get(self, request, *args, **kwargs):\n supes = super(LocationIndicatorListlView, self).get(request, *args, **kwargs)\n context = self.get_context_data(**kwargs)\n if self.request.is_ajax():\n data = json.dumps(context, default=decimal_default)\n out_kwargs = {'content_type': 'application/json'}\n return HttpResponse(data, **out_kwargs)\n return supes\n\n\ntry:\n import xlwt\n\n XLWT_INSTALLED = True\n XLWT_DATETIME_STYLE = xlwt.easyxf(num_format_str='MM/YYYY')\nexcept ImportError:\n XLWT_INSTALLED = False\nfrom io import BytesIO\nfrom forms_builder.forms.utils import slugify\n\nimport re\n\n\nclass EntriesFilterView(SiteRootView, FormView):\n model = cm.Location\n template_name = 'base/form.html'\n form_class = cf.SavedFilterForm\n worksheet_names = {}\n\n def sanitize_worksheet_name(self, incoming):\n stripped_name = re.sub(r'[\\W_]+', ' ', incoming[:31])\n if stripped_name in self.worksheet_names:\n self.worksheet_names[stripped_name] += 1;\n return stripped_name[:25] + \" \" + str(self.worksheet_names[stripped_name])\n else:\n self.worksheet_names[stripped_name] = 1\n return stripped_name\n\n def add_indicator_to_workbook(self, indicator, workbook, columns, saved_filter):\n sheet = workbook.add_sheet(self.sanitize_worksheet_name(indicator.get_title()))\n for c, col in enumerate(columns):\n sheet.write(0, c, col)\n for r, row in enumerate(indicator.get_filtered_entries(saved_filter, csv=True)):\n for c, item in enumerate(row):\n if isinstance(item, datetime.datetime):\n item = item.replace(tzinfo=None)\n sheet.write(r + 2, c, item, XLWT_DATETIME_STYLE)\n else:\n sheet.write(r + 2, c, item)\n\n return workbook\n\n def form_valid(self, form):\n try:\n show_hidden_fields = form.cleaned_data['show_hidden']\n except Exception as e:\n show_hidden = False\n\n try:\n indicator = form.cleaned_data['indicator']\n columns = indicator.get_column_headers(show_hidden=show_hidden)\n except Exception as e:\n indicator = None\n\n if form.cleaned_data['export'] == True:\n response = HttpResponse(mimetype=\"application/vnd.ms-excel\")\n fname = \"%s-%s.xls\" % (\"QI Data Export\", slugify(now().ctime()))\n attachment = \"attachment; filename=%s\" % fname\n response[\"Content-Disposition\"] = attachment\n queue = BytesIO()\n workbook = xlwt.Workbook(encoding='utf8')\n if indicator == None:\n for i in cm.Indicator.objects.all().order_by(\"form_number\"):\n columns = i.get_column_headers(show_hidden=show_hidden)\n workbook = self.add_indicator_to_workbook(i, workbook, columns,\n form.cleaned_data)\n else:\n workbook = self.add_indicator_to_workbook(indicator, workbook, columns,\n form.cleaned_data)\n workbook.save(queue)\n data = queue.getvalue()\n response.write(data)\n return response\n else:\n context = {\n \"columns\": columns,\n \"entries\": indicator.get_filtered_entries(form.cleaned_data, csv=False,\n show_hidden=show_hidden),\n \"available_verbs\": self.noun.get_available_verbs(self.request.user),\n \"filter\": form.cleaned_data\n }\n return render_to_response('indicator/entries.html',\n context,\n context_instance=RequestContext(self.request))\n\n def get_form_kwargs(self):\n kwargs = super(EntriesFilterView, self).get_form_kwargs()\n kwargs['ajax_location'] = True\n return kwargs\n\n\nclass ScoresDetailView(SiteRootView, FormView):\n template_name = 'overview/scores.html'\n form_class = cf.DateForm\n\n def form_valid(self, form):\n the_date = form.cleaned_data['date']\n\n return HttpResponseRedirect(reverse(viewname='scores_date_list',\n kwargs={'month': the_date.month, 'year': the_date.year},\n current_app='core'))\n\n def get_context_data(self, **kwargs):\n context = super(ScoresDetailView, self).get_context_data(**kwargs)\n NOT_ASSIGNED_STRING = \"N/A\"\n NO_DATA_STRING = \"N/D\"\n try:\n month = int(self.kwargs['month'])\n year = int(self.kwargs['year'])\n except Exception as e:\n d = datetime.datetime.now()\n month = d.month\n year = d.year\n queryset = cm.Indicator.objects.all().order_by(\"form_number\")\n columns = list(queryset.values_list('title', flat=True))\n indicator_ids = list(queryset.values_list('id', flat=True))\n # get all scores for this month\n rows = {}\n for l in cm.Location.objects.select_related('indicators').all():\n l_assignments = l.get_indicator_ids()\n # rows[l.id] = [l.title]+([NOT_ASSIGNED_STRING]*len(columns))\n l_cols = []\n for lc in indicator_ids:\n # if the column is assigned to this l, fill it with N/D\n if lc in l_assignments:\n l_cols.append(NO_DATA_STRING)\n else:\n # else fill it with N/A\n l_cols.append(NOT_ASSIGNED_STRING)\n rows[l.id] = [l.title] + (l_cols)\n # add space to the begininbg of columns for the location names\n columns = [\"Location\"] + columns\n for s in cm.Score.objects.filter(month=str(month), year=year):\n # add the score object to the table if it exists\n indicator_index = indicator_ids.index(s.indicator.id) + 1\n if type(rows[s.location.id][indicator_index]) == unicode:\n rows[s.location.id][indicator_index] = s\n else:\n rows[s.location.id][indicator_index].merge(s)\n\n this_month = datetime.date(year, month, 1)\n\n # raise Exception(rows)\n context['this_month'] = this_month\n context['last_month'] = this_month - relativedelta(months=1)\n context['next_month'] = this_month + relativedelta(months=1)\n context['columns'] = columns\n context['entries'] = rows.values()\n return context\n\n\nclass LocationImageCreateView(LocationView, CreateView):\n model = cm.Image\n template_name = 'base/form.html'\n fields = ['original_file']\n\n def get_form(self, form_class):\n return cf.ImageForm(self.request.POST or None, self.request.FILES or None,\n initial=self.get_initial())\n\n def form_valid(self, form):\n return super(LocationImageCreateView, self).form_valid(form)\n\n def get_success_url(self):\n self.noun.images.add(self.object)\n action.send(self.request.user, verb='uploaded image', action_object=self.object,\n target=self.noun)\n return reverse(viewname='location_detail', args=(self.noun.id,), current_app='core')\n\n\nclass IndicatorCreateView(SiteRootView, CreateView):\n model = cm.Indicator\n template_name = 'base/form.html'\n form_class = cf.IndicatorForm\n\n def form_valid(self, form):\n new_form = fm.Form.objects.create(title=form.cleaned_data['title'][0:50])\n location_field = fm.Field.objects.create(form=new_form, field_type=1, label=\"Location\",\n visible=False, order=-2)\n location_field = fm.Field.objects.create(form=new_form, field_type=1, label=\"User\",\n visible=False, order=-1)\n location_field = fm.Field.objects.create(form=new_form, field_type=13, label=\"Score\",\n visible=False, order=0)\n form.instance.form = new_form\n self.instance = form.instance\n # action.send(self.request.user, verb='created', action_object=self.object, target=self.object)\n return super(IndicatorCreateView, self).form_valid(form)\n\n def get_success_url(self):\n action.send(self.request.user, verb='created indicator', action_object=self.instance)\n return reverse(viewname='field_create', args=(self.instance.id,), current_app='core')\n\n\nclass IndicatorView(NounView):\n def get_noun(self, **kwargs):\n return cm.Indicator.objects.get(id=self.kwargs['pk'])\n\n\nclass IndicatorUpdateView(IndicatorView, UpdateView):\n model = cm.Indicator\n template_name = 'base/form.html'\n success_url = '/'\n form_class = cf.IndicatorForm\n\n def get_success_url(self):\n self.get_noun().updated_at = datetime.datetime.now()\n action.send(self.request.user, verb='updated indicator', action_object=self.get_noun())\n return reverse(viewname='indicator_detail', args=(self.noun.id,), current_app='core')\n\n\nclass IndicatorDetailView(IndicatorView, TemplateView):\n model = cm.Indicator\n template_name = 'indicator/list.html'\n\n def get_context_data(self, **kwargs):\n context = super(IndicatorDetailView, self).get_context_data(**kwargs)\n context['ILLEGAL_FIELD_LABELS'] = cm.ILLEGAL_FIELD_LABELS\n # context['stream'] = self.noun.get_action_stream()[:40]\n context['stream'] = []\n context['ILLEGAL_FIELD_LABELS'] = cm.ILLEGAL_FIELD_LABELS\n return context\n\n def get_context_data(self, **kwargs):\n context = super(IndicatorDetailView, self).get_context_data(**kwargs)\n indicators = []\n indicators.append(self.noun.get_serialized())\n context['indicators'] = indicators\n context['ILLEGAL_FIELD_LABELS'] = cm.ILLEGAL_FIELD_LABELS\n\n return context\n\n def get(self, request, *args, **kwargs):\n supes = super(IndicatorDetailView, self).get(request, *args, **kwargs)\n context = self.get_context_data(**kwargs)\n if self.request.is_ajax():\n data = json.dumps(context, default=decimal_default)\n out_kwargs = {'content_type': 'application/json'}\n return HttpResponse(data, **out_kwargs)\n\n return supes\n\n\nclass IndicatorListView(SiteRootView, TemplateView):\n model = cm.Indicator\n template_name = 'indicator/list.html'\n\n def get_context_data(self, **kwargs):\n context = super(IndicatorListView, self).get_context_data(**kwargs)\n indicators = []\n for l in cm.Indicator.objects.all().order_by('form_number'):\n blob = l.get_serialized()\n indicators.append(blob)\n context['indicators'] = indicators\n context['ILLEGAL_FIELD_LABELS'] = cm.ILLEGAL_FIELD_LABELS\n return context\n\n def get(self, request, *args, **kwargs):\n supes = super(IndicatorListView, self).get(request, *args, **kwargs)\n context = self.get_context_data(**kwargs)\n if self.request.is_ajax():\n data = json.dumps(context, default=decimal_default)\n out_kwargs = {'content_type': 'application/json'}\n return HttpResponse(data, **out_kwargs)\n\n return supes\n\n\nclass FieldCreateView(IndicatorView, FormView):\n model = fm.Field\n template_name = 'base/form.html'\n\n def get_form(self, form_class):\n return cf.FieldForm(self.request.POST or None, self.request.FILES or None,\n initial=self.get_initial())\n\n def form_valid(self, form):\n form.instance.form = self.noun.form\n form.instance.required = False\n self.object = form.instance.save()\n self.instance = form.instance\n return super(FieldCreateView, self).form_valid(form)\n\n def get_success_url(self):\n action.send(self.request.user, verb='created field', action_object=self.instance,\n target=self.noun)\n return reverse(viewname='field_create', args=(self.noun.id,), current_app='core')\n\n def get_success_message(self, cleaned_data):\n return \"Your field was created. Make another new field or return to the indicator.\"\n\n\nclass FieldUpdateView(IndicatorView, UpdateView):\n model = fm.Field\n template_name = 'base/form.html'\n success_url = '/'\n\n def get_noun(self, **kwargs):\n return cm.Indicator.objects.get(id=self.kwargs['indicator_pk'])\n\n def get_object(self):\n output = get_object_or_404(fm.Field, id=self.kwargs[\"pk\"])\n return output\n\n def get_form(self, form_class):\n return cf.FieldForm(self.request.POST or None, self.request.FILES or None,\n initial=self.get_initial(), instance=self.get_object())\n\n def get_success_url(self):\n action.send(self.request.user, verb='updated field', action_object=self.get_object(),\n target=self.noun)\n return reverse(viewname='indicator_detail', args=(self.noun.id,), current_app='core')\n\n\nimport json\n\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404, render_to_response\nfrom django.template import RequestContext\nfrom django.views.generic.base import TemplateView\n\nfrom forms_builder.forms.forms import FormForForm\nfrom forms_builder.forms.models import Form\n\nfrom forms_builder.forms.signals import form_invalid, form_valid\n\nfrom django.contrib import messages\n\n\nclass IndicatorRecordCreateView(LocationView, TemplateView):\n template_name = \"base/form.html\"\n\n def get_noun(self, **kwargs):\n return cm.Location.objects.get(id=self.kwargs['location_pk'])\n\n def prep_form(self, form):\n # form.fields.__delitem__('location')\n # form.fields.__delitem__('user')\n return form\n\n def get_context_data(self, **kwargs):\n context = super(IndicatorRecordCreateView, self).get_context_data(**kwargs)\n published = Form.objects.published(for_user=self.request.user)\n indicator = get_object_or_404(cm.Indicator, id=kwargs[\"pk\"])\n form = indicator.get_form()\n form = self.prep_form(form)\n context[\"form\"] = form\n context[\"indicator\"] = indicator\n return context\n\n def get(self, request, *args, **kwargs):\n context = self.get_context_data(**kwargs)\n return self.render_to_response(context)\n\n def post(self, request, *args, **kwargs):\n # throw an error if this user is not authorized\n # TODO: find a way to do this with carteblanche\n if (self.request.user.is_staff != True) and (\n self.noun.members.filter(id=self.request.user.id).count() == 0):\n raise Exception(\n \"You tried to create a record with a location you're not assigned to. You must be an Admin or a member of \" + self.noun.title + \" to create a new record.\")\n indicator = get_object_or_404(cm.Indicator, id=kwargs[\"pk\"])\n builder_form_object = indicator.get_builder_form_object()\n form = FormForForm(builder_form_object, RequestContext(request),\n request.POST or None,\n request.FILES or None)\n if not form.is_valid():\n form_invalid.send(sender=request, form=self.form_for_form)\n else:\n # Attachments read must occur before model save,\n # or seek() will fail on large uploads.\n attachments = []\n for f in form.files.values():\n f.seek(0)\n attachments.append((f.name, f.read()))\n indicator = get_object_or_404(cm.Indicator, id=kwargs[\"pk\"])\n location = get_object_or_404(cm.Location, id=kwargs[\"location_pk\"])\n form.cleaned_data[\"user\"] = request.user.get_full_name()\n form.cleaned_data[\"location\"] = location.__str__()\n entry = form.save()\n form_valid.send(sender=request, form=form, entry=entry)\n form = self.prep_form(form)\n score = indicator.score_entry(entry)\n context = self.get_context_data(**kwargs)\n if score >= indicator.passing_percentage:\n messages.success(request, 'Passing score of ' + str(score))\n action.send(self.request.user, verb='entered passing record',\n action_object=context.get(\"indicator\"), target=self.noun)\n else:\n messages.error(request, 'Not passing score of ' + str(score))\n action.send(self.request.user, verb='entered failing record',\n action_object=context.get(\"indicator\"), target=self.noun)\n return HttpResponseRedirect(reverse(viewname='indicator_record_create',\n args=(kwargs['location_pk'], kwargs['pk'],),\n current_app='core'))\n\n context = {\"builder_form_object\": builder_form_object, \"form\": form}\n return self.render_to_response(context)\n\n def render_to_response(self, context, **kwargs):\n if self.request.is_ajax():\n json_context = json.dumps({\n \"errors\": context[\"form_for_form\"].errors,\n \"form\": context[\"form_for_form\"].as_p(),\n \"message\": context[\"form\"].response,\n })\n return HttpResponse(json_context, content_type=\"application/json\")\n return super(IndicatorRecordCreateView, self).render_to_response(context, **kwargs)\n\n\nform_detail = IndicatorRecordCreateView.as_view()\n\nfrom forms_builder.forms.utils import now\n\n\nclass IndicatorRecordUploadView(LocationView, FormView):\n template_name = 'base/form.html'\n form_class = cf.JSONUploadForm\n success_url = '/'\n\n def get_noun(self, **kwargs):\n return cm.Location.objects.get(id=self.kwargs['location_pk'])\n\n def form_valid(self, form):\n try:\n # throw an error if this user is not authorized\n # TODO: find a way to do this with carteblanche\n if (self.request.user.is_staff != True) and (\n self.noun.members.filter(id=self.request.user.id).count() == 0):\n raise Exception(\n \"You tried to synchronize a record with a location you're not assigned to. You must be an Admin or a member of \" + self.noun.title + \" to upload a new record.\")\n json_string = form.cleaned_data['json']\n\n data = json.loads(json_string, parse_float=decimal.Decimal)\n day = 1\n try:\n day = int(data.get(\"day\"))\n except Exception as e:\n pass\n new_entry_time = timezone.datetime(year=int(data.get(\"year\")),\n month=int(data.get(\"month\")), day=day)\n\n # create field entries for incoming data. Don't save them until we're done\n fieldEntries = []\n for f in data.get(\"values\"):\n field_id = f.get(\"field_id\")\n new_value = f.get(\"value\")\n if new_value == True:\n new_value = u\"True\"\n elif new_value == False:\n new_value = u\"False\"\n new_fieldEntry = fm.FieldEntry(value=new_value, field_id=field_id)\n fieldEntries.append(new_fieldEntry)\n if fieldEntries.__len__() > 0:\n # if there are entries, create a new record\n form_id = fm.Field.objects.get(id=field_id).form_id\n new_record = fm.FormEntry(entry_time=new_entry_time, form_id=form_id)\n new_record.save()\n for f in fieldEntries:\n # connect the entries to the record\n f.entry_id = new_record.id\n f.save()\n # create entries for location and user data\n score = float(data.get(\"score\"))\n builder_form = fm.Form.objects.get(id=form_id)\n new_locationEntry = fm.FieldEntry(value=self.get_noun().__str__(),\n field_id=builder_form.fields.get(\n label=\"Location\").id, entry_id=new_record.id)\n new_locationEntry.save()\n new_userEntry = fm.FieldEntry(value=self.request.user.get_full_name(),\n field_id=builder_form.fields.get(label=\"User\").id,\n entry_id=new_record.id)\n new_userEntry.save()\n new_scoreEntry = fm.FieldEntry(value=score,\n field_id=builder_form.fields.get(label=\"Score\").id,\n entry_id=new_record.id)\n new_scoreEntry.save()\n\n # take the score from the json and create an action\n indicator = cm.Indicator.objects.get(form__id=form_id)\n if score == 100:\n messages.success(self.request, 'Passing score of ' + str(score))\n action.send(self.request.user, verb='PASS ' + str(score),\n action_object=indicator, target=self.noun)\n else:\n messages.error(self.request, 'Not passing score of ' + str(score))\n action.send(self.request.user, verb='FAIL ' + str(score),\n action_object=indicator, target=self.noun)\n context = {\n \"status\": \"success\",\n \"record_id\": new_record.id\n }\n except Exception as e:\n context = {\n \"status\": \"failure\",\n \"error\": e\n }\n messages.error(self.request, e)\n if self.request.is_ajax():\n data = json.dumps(context, default=decimal_default)\n out_kwargs = {'content_type': 'application/json'}\n return HttpResponse(data, **out_kwargs)\n else:\n\n return super(IndicatorRecordUploadView, self).form_valid(form)\n\n def get(self, request, *args, **kwargs):\n\n supes = super(IndicatorRecordUploadView, self).get(request, *args, **kwargs)\n context = self.get_context_data(**kwargs)\n if self.request.is_ajax():\n data = json.dumps(context, default=decimal_default)\n out_kwargs = {'content_type': 'application/json'}\n return HttpResponse(data, **out_kwargs)\n\n return supes\n\n\n'''\nincoming json looks like:\n{\n \"title\":\"blah blah blah\",\n \"scores\":[\n {\n \"percentage\":100.00,\n \"indicator_id\":0,\n \"location_id\":0,\n \"passing\":true,\n \"total_record_count\":0,\n \"passing_record_count\":0\n }\n ]\n}\n'''\n\n\nclass LocationScoreUploadView(LocationView, FormView):\n template_name = 'base/form.html'\n form_class = cf.JSONUploadForm\n success_url = '/'\n\n def get_noun(self, **kwargs):\n return cm.Location.objects.get(id=self.kwargs['location_pk'])\n\n def form_valid(self, form):\n json_string = form.cleaned_data['json']\n\n try:\n data = json.loads(json_string, parse_float=decimal.Decimal)\n new_scores = []\n for s in data.get(\"scores\"):\n # print type(s)\n # check to make sure the location matches\n\n if int(s.get(\"location_id\")) != self.noun.id:\n raise Exception(\"wrong score for this location\")\n indicator_id = s.get(\"indicator_id\")\n indicator = cm.Indicator.objects.get(id=indicator_id)\n # create but don't save untill all are created\n t = datetime.datetime(year=s.get(\"year\"), month=s.get(\"month\"), day=1)\n new_score = cm.Score(indicator=indicator, passing=s.get(\"passing\"),\n entry_count=s.get(\"total_record_count\"),\n passing_entry_count=s.get(\"passing_record_count\"),\n month=str(s.get(\"month\")), year=s.get(\"year\"),\n score=s.get(\"percentage\"), location=self.noun,\n user=self.request.user, datetime=t)\n new_scores.append(new_score)\n if settings.CACHING:\n self.noun.invalidate_cached_series(indicator)\n # if nothing blew up, lets save these and invalidate the cached series data\n for s in new_scores:\n s.save()\n context = {\n \"status\": \"success\",\n \"score_id\": 0\n }\n except Exception as e:\n context = {\n \"status\": \"failure\",\n \"error\": e\n }\n if self.request.is_ajax():\n data = json.dumps(context, default=decimal_default)\n out_kwargs = {'content_type': 'application/json'}\n return HttpResponse(data, **out_kwargs)\n else:\n return super(LocationScoreUploadView, self).form_valid(form)\n\n def get(self, request, *args, **kwargs):\n supes = super(LocationScoreUploadView, self).get(request, *args, **kwargs)\n context = self.get_context_data(**kwargs)\n if self.request.is_ajax():\n data = json.dumps(context, default=decimal_default)\n out_kwargs = {'content_type': 'application/json'}\n return HttpResponse(data, **out_kwargs)\n\n return supes\n\n\nclass LocationIndicatorVisualize(LocationView, TemplateView):\n template_name = \"location/visualize.html\"\n\n def get_noun(self, **kwargs):\n return cm.Location.objects.get(id=self.kwargs['location_pk'])\n\n def get(self, request, *args, **kwargs):\n supes = super(LocationIndicatorVisualize, self).get(request, *args, **kwargs)\n context = self.get_context_data(**kwargs)\n if self.request.is_ajax():\n t = datetime.datetime.now()\n year_ago = t - relativedelta(months=12)\n indicator = get_object_or_404(cm.Indicator, id=kwargs[\"pk\"])\n # get all scores for this location/indicator from the last year\n scores = cm.Score.objects.filter(indicator__id=kwargs[\"pk\"],\n location__id=kwargs['location_pk'],\n datetime__gte=year_ago).order_by('datetime')\n # iterate over scores averaging them if there are more than one per month\n\n data = []\n for s in scores:\n # multiplied by 1000 because apparently js doesn't understand utc\n blob = [time.mktime(s.datetime.timetuple()) * 1000, s.score]\n data.append(blob)\n output = {\n \"name\": self.noun.title,\n \"id\": self.noun.id,\n \"data\": data\n }\n context = self.get_context_data(**kwargs)\n context[\"series\"] = [output]\n context[\"noun\"] = {\"title\": self.noun.title}\n data = json.dumps(context, default=decimal_default)\n out_kwargs = {'content_type': 'application/json'}\n return HttpResponse(data, **out_kwargs)\n\n return supes\n\n\nclass LocationVisualize(LocationView, TemplateView):\n template_name = \"location/visualize.html\"\n\n def get(self, request, *args, **kwargs):\n if self.request.is_ajax():\n # store these results in a new series\n # add the series to\n context = self.get_context_data(**kwargs)\n context[\"series\"] = self.noun.get_all_series()\n context[\"noun_title\"] = self.noun.title\n context[\"location_id\"] = self.noun.id\n context[\"noun\"] = {\"title\": self.noun.title}\n data = json.dumps(context, default=decimal_default)\n out_kwargs = {'content_type': 'application/json'}\n return HttpResponse(data, **out_kwargs)\n\n return super(LocationVisualize, self).get(request, *args, **kwargs)\n\n\nclass LocationListVisualizeView(SiteRootView, TemplateView):\n template_name = \"overview/visualize.html\"\n\n def get_context_data(self, **kwargs):\n\n # Call the base implementation first to get a context\n context = super(LocationListVisualizeView, self).get_context_data(**kwargs)\n # Add in a QuerySet of all the books\n context['indicators'] = cm.Indicator.objects.all().order_by('form_number')\n return context\n\n def get(self, request, *args, **kwargs):\n if self.request.is_ajax():\n all_series = []\n # for every location, get all_series\n for l in cm.Location.objects.filter(id=21).prefetch_related('indicators'):\n all_series.append(l.get_all_series())\n # store these results in a new series\n # add the series to\n context = self.get_context_data(**kwargs)\n context[\"series\"] = all_series\n context[\"noun_title\"] = \"Overview\"\n context[\"location_id\"] = \"-2\"\n context[\"noun\"] = {\"title\": \"Overview\"}\n data = json.dumps(context, default=decimal_default)\n out_kwargs = {'content_type': 'application/json'}\n return HttpResponse(data, **out_kwargs)\n\n dimension_qs = retrieve_leaf_dimensions().order_by('name')\n kwargs['dimensions'] = dimension_qs\n\n return super(LocationListVisualizeView, self).get(request, *args, **kwargs)\n","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":46507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"39822189","text":"def modify(arr, n): \n \n # Nothing to do when array size is 1 \n if n <= 1: \n return\n \n # store current value of arr[0] and update it \n prev = arr[0] \n arr[0] = arr[0] * arr[1] \n \n # Update rest of the array elements \n for i in range(1, n-1): \n \n # Store current value of next interation \n curr = arr[i]; \n \n # Update current value using previos value \n arr[i] = prev * arr[i+1] \n \n # Update previous value \n prev = curr \n \n \n # Update last array element \n arr[n-1] = prev * arr[n-1] \n \n \n# Driver program \narr = [2, 3, 4, 5, 6] \nn = len(arr) \nmodify(arr, n) \nfor i in range (0, n): \n print(arr[i],end=\" \") ","sub_path":"data/docker-generated-data/code_clones/p28.py","file_name":"p28.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"409446656","text":"# -*- coding: utf-8 -*-\nimport datetime\nimport os.path\nimport urllib\nfrom requests.sessions import Session\n\nfrom bottle import redirect\nfrom cony.utils import force_str\nfrom xml.etree import ElementTree as ET\n\nxhtml = '{http://www.w3.org/1999/xhtml}'\n\n\ndef cmd_translate(term):\n \"\"\"Translates the text using Google Translate.\"\"\"\n if len(term) < len(term.encode('utf-8')):\n direction = 'ru|en'\n else:\n direction = 'en|ru'\n redirect('http://translate.google.com/#%s|%s' % (direction, term.encode('utf-8')))\n\ncmd_tr = cmd_translate\n\n\ndef cmd_save_word(term):\n \"\"\"Saves word and it's translation into the ~/.words.csv\n\n These files could be used to import words into the FlashCards ToGo.\n \"\"\"\n if ';' not in term:\n return cmd_search_word(term)\n\n filename = '~/.words.csv'\n\n template = \"\"\"\n

Translation \"{{ word }}\" was saved to %s

\n %%rebase layout title='Translation saved'\n \"\"\" % filename\n\n filename = os.path.expanduser(filename)\n dirname = os.path.dirname(filename)\n\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n\n with open(filename, 'a+') as f:\n f.write(term.encode('utf-8'))\n f.write('\\n')\n return dict(template=template, word=term)\n\n\ndef cmd_search_word(term):\n \"\"\"Searches word translations at the http://slovari.yandex.ru.\n\n This command requires `simplejson` module to be installed.\n \"\"\"\n import simplejson\n\n template = \"\"\"\n
    \n %for v in variants:\n
  • {{ v['en'] }}\n %if v['transcript']:\n ({{ v['transcript'] }})\n %end\n %if v['has_audio']:\n \n \n \n \n \n %end\n — {{ v['ru'] }}
  • \n %end\n
\n %rebase layout title='Word translation'\n \"\"\"\n\n variants = {}\n\n internet = Session()\n\n for i in reversed(range((len(term) + 1) / 2, len(term) + 1)):\n url = 'http://suggest-slovari.yandex.ru/suggest-lingvo?v=2&lang=en&' + \\\n urllib.urlencode(dict(part=term[:i].encode('utf-8')))\n response = internet.get(url)\n data = simplejson.loads(response.content)\n\n if data[0]:\n for trans, link in zip(*data[1:]):\n en, ru = trans.split(' - ', 1)\n variants[en] = dict(en=en, ru=ru, link=link)\n if len(variants) > 5:\n break\n\n\n def get_spelling(value):\n url = 'http://lingvo.yandex.ru/' + force_str(value['en']).replace(' ', '%20') + '/%D1%81%20%D0%B0%D0%BD%D0%B3%D0%BB%D0%B8%D0%B9%D1%81%D0%BA%D0%BE%D0%B3%D0%BE/'\n data = internet.get(url).content\n\n xml = ET.fromstring(force_str(data))\n transcript = xml.find('*//{x}span[@class=\"b-translate__tr\"]'.format(x=xhtml))\n\n if transcript is None:\n value['transcript'] = ''\n else:\n value['transcript'] = transcript.text\n\n has_audio = xml.find('*//{x}h1[@class=\"b-translate__word\"]//{x}span[@class=\"b-audio g-js\"]'.format(x=xhtml))\n value['has_audio'] = has_audio is not None\n return value\n\n variants = dict((key, get_spelling(value)) for key, value in variants.iteritems())\n\n return dict(template=template, variants=sorted(variants.values()))\n\ncmd_wo = cmd_search_word\n","sub_path":"cony/repo/lang.py","file_name":"lang.py","file_ext":"py","file_size_in_byte":4091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"76560985","text":"import matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\n\nfrom WeatherDL.data_maker import dataset_maker\nfrom WeatherDL.model_maker import model_3\n\n# Extract data from data_maker\nX, y = dataset_maker(window=5, forecast_day=1)\n(X_train, X_test, y_train, y_test) = train_test_split(X, y, test_size=0.2, shuffle=False)\n\n# Open model from model_maker\nmodel = model_3((5, 8, 20, 6))\nprint(model.summary())\n\n# Fit model, and extract training & validation metrics\nhistory = model.fit(X_train, y_train,\n validation_data=(X_test, y_test),\n batch_size=5,\n epochs=30,\n verbose=2,\n shuffle=False)\n\n# Prediction\ny_pred = model.predict(X_test)\n\n# Data Visualization\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('MSE')\nplt.legend(['Train', 'Test'], loc='upper right')\nplt.show()\nplt.plot(history.history['mean_absolute_error'])\nplt.plot(history.history['val_mean_absolute_error'])\nplt.title('MAE')\nplt.legend(['Train', 'Test'], loc='upper right')\nplt.show()\nplt.plot(history.history['mean_absolute_percentage_error'])\nplt.plot(history.history['val_mean_absolute_percentage_error'])\nplt.title('MAPE')\nplt.legend(['Train', 'Test'], loc='upper right')\nplt.show()\n","sub_path":"WeatherDL/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"399683933","text":"# -*- coding: utf-8 -*-\n# pylint: disable=C0103\n\"\"\"\n======================\nPlot cluster centroids\n======================\n\nThis example shows how to plot centroids of the clusters of rhythmic patterns.\n\"\"\"\n\n# Code source: Martín Rocamora\n# License: MIT\n\n##############################################\n# Imports\n# - matplotlib for visualization\n#\nimport matplotlib.pyplot as plt\nfrom carat import audio, util, annotations, features, clustering, display\n\n##############################################\n# We group rhythmic patterns into clusters and plot their centroids.\n#\n# First, we'll load one of the audio files included in `carat`.\naudio_path = util.example(\"ansina_audio\")\n\ny, sr = audio.load(audio_path)\n\n##############################################\n# Next, we'll load the annotations provided for the example audio file.\nannotations_path = util.example(\"ansina_beats\")\n\nbeats, beat_labs = annotations.load_beats(annotations_path)\ndownbeats, downbeat_labs = annotations.load_downbeats(annotations_path)\n\n##############################################\n# Then, we'll compute the accentuation feature.\n#\n# **Note:** This example is tailored towards the rhythmic patterns of the lowest\n# sounding of the three drum types taking part in the recording, so the analysis\n# focuses on the low frequencies (20 to 200 Hz).\nacce, times, _ = features.accentuation_feature(y, sr, minfreq=20, maxfreq=200)\n\n##############################################\n# Next, we'll compute the feature map.\nn_beats = int(round(beats.size/downbeats.size))\nn_tatums = 4\n\nmap_acce, _, _, _ = features.feature_map(acce, times, beats, downbeats, n_beats=n_beats,\n n_tatums=n_tatums)\n\n##############################################\n# Then, we'll group rhythmic patterns into clusters. This is done using the classical\n# K-means method with Euclidean distance (but other clustering methods and distance\n# measures can be used too).\n#\n# **Note:** The number of clusters n_clusters has to be specified as an input parameter.\nn_clusters = 4\n\ncluster_labs, centroids, _ = clustering.rhythmic_patterns(map_acce, n_clusters=n_clusters)\n\n##############################################\n# Finally we plot the centroids of the clusters of rhythmic patterns.\n\nfig = plt.figure(figsize=(8, 8))\ndisplay.centroids_plot(centroids, n_tatums=n_tatums)\n\nplt.tight_layout()\n\nplt.show()\n","sub_path":"docs/source/examples/plot_cluster_centroids.py","file_name":"plot_cluster_centroids.py","file_ext":"py","file_size_in_byte":2380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"433486854","text":"\"\"\"\nReport generator\n\nThis module provides the following report generation features\n 1. Load and categorize transactions\n 2. Build latency distribution histograms for each category of transactions\n 3. Build html report with (stats, flots, transaction list) for each category, route combination\n 4. Generate environment reports\n\nAuthor: Manikandan Dhamodharan, Morgan Stanley\n\"\"\"\n\nimport time\nimport numpy\nimport logging\nfrom xpedite.report.reportbuilder import ReportBuilder\nfrom xpedite.report.env import EnvReportBuilder\nfrom xpedite.report.histogram import (\n formatLegend, formatBuckets, buildFlotHistograms,\n buildBuckets, buildDistribution, Flot\n )\nfrom xpedite.util import timeAction, formatHumanReadable\nfrom xpedite.containers import ProbeMap\nfrom xpedite.report.profile import Profiles, Profile\nfrom xpedite.analytics import Analytics, CURRENT_RUN\n\nLOGGER = logging.getLogger(__name__)\n\nclass ReportGenerator(object):\n \"\"\"Generates reports for the current profile session\"\"\"\n\n def __init__(self, reportName):\n \"\"\"\n Constructs an instance of report generator\n\n :param reporName: Name of the generated report\n\n \"\"\"\n self.reportName = reportName\n self.analytics = Analytics()\n\n def generateFlots(self, repo, classifier, runId):\n \"\"\"\n Generates latency distribuion histograms for each category/route combination\n\n :param repo: Repository of transaction collection\n :type repo: xpedite.transaction.TransactionRepo\n :param classifier: Classifier to categorize transactions into various types\n :param runId: Epoch time stamp to uniquely identify a profiling session\n\n \"\"\"\n flots = {}\n transactionCollections = [repo.getCurrent()] + repo.getBenchmarks().values()\n if not transactionCollections[0].isCurrent() or transactionCollections[0].name != CURRENT_RUN:\n from xpedite.types import InvariantViloation\n raise InvariantViloation(\n 'expecing transactions for current run at index 0 in the repository. '\n 'instead found {}'.format(transactionCollections[0].name)\n )\n\n elapsedTimeBundles = self.analytics.buildElapsedTimeBundles(transactionCollections, classifier)\n\n for category, elaspsedTimeBundle in elapsedTimeBundles.iteritems():\n buckets = buildBuckets(elaspsedTimeBundle[0], 35)\n if not buckets:\n LOGGER.debug('category %s has not enough data points to generate flot', category)\n continue\n\n LOGGER.debug('Buckets:\\n%s', buckets)\n\n yaxis = []\n conflatedCounts = []\n LOGGER.debug('Bucket values:')\n for i, elapsedTimeList in enumerate(elaspsedTimeBundle):\n bucketValues, conflatedCountersCount = timeAction('building counter distribution',\n lambda bkts=buckets, etl=elapsedTimeList: buildDistribution(bkts, etl)\n )\n conflatedCounts.append(conflatedCountersCount)\n LOGGER.debug('%s', bucketValues)\n title = transactionCollections[i].name\n legend = formatLegend(\n title, min(elapsedTimeList), max(elapsedTimeList), numpy.mean(elapsedTimeList), numpy.median(elapsedTimeList),\n numpy.percentile(elapsedTimeList, 95), numpy.percentile(elapsedTimeList, 99)\n )\n yaxis.append((legend, bucketValues))\n\n benchmarkConflatedCounts = sum(conflatedCounts, 1)\n if conflatedCounts[0] + benchmarkConflatedCounts > 0:\n LOGGER.debug(\n 'conflation - due to narrow bucket range [%s to %s] - (%d) in current run and (%d) in all '\n 'bencmark counter values are conflated',\n buckets[0], buckets[len(buckets)-1],\n conflatedCounts[0], benchmarkConflatedCounts\n )\n\n buckets = formatBuckets(buckets)\n options, data = buildFlotHistograms(buckets, yaxis, False)\n title = '{} - latency distribution benchmark'.format(category)\n description = 'Latency distribution (current run ID #{} vs chosen benchmarks)'.format(runId)\n flots.update({category: Flot(title, description, data, options)})\n return flots\n\n\n @staticmethod\n def getReportProbes(route, userProbes):\n \"\"\"\n Creates probes with human friendly name for reporting\n\n :param userProbes: List of probes enabled for a profiling session\n\n \"\"\"\n reportProbes = []\n userProbeMap = ProbeMap(userProbes)\n for probe in route.probes:\n if probe in userProbeMap:\n reportProbes.append(userProbeMap[probe])\n else:\n reportProbes.append(probe)\n return reportProbes\n\n @staticmethod\n def generateEnvironmentReport(app, result, repo, resultOrder, classifier, txnFilter, benchmarkPaths):\n \"\"\"\n Generates report with environment details\n\n :param app: an instance of xpedite app, to interact with target application\n :param result: Handle to gather and store profiling results\n :param repo: Repository of loaded transactions\n :param resultOrder: Sort order of transactions in latency constituent reports\n :param classifier: Predicate to classify transactions into different categories\n :param txnFilter: Lambda to filter transactions prior to report generation\n :param benchmarkPaths: List of stored reports from previous runs, for benchmarking\n\n \"\"\"\n envReport = EnvReportBuilder().buildEnvironmentReportFile(\n app, repo, resultOrder, classifier, txnFilter, benchmarkPaths\n )\n description = \"\"\"\n Test environment report (cpu clock frequency, kernel configuration etc.)\n \"\"\"\n envReportTitle = 'Test Environment Report'\n if envReport:\n result.attachXpediteReport(envReportTitle, envReportTitle, description, envReport)\n\n @staticmethod\n def addTestResult(reportName, result, timelineStats, benchmarkTlsMap):\n \"\"\"\n Adds report on perfromance regressions to profile results\n\n :param reportName: Name of the generated report\n :param result: Handle to gather and store profiling results\n :param timelineStats: Time line statistics for the current run\n :param benchmarkTlsMap: Time line statistics collection for benchmarks\n\n \"\"\"\n currentRunMedian = timelineStats.getTotalDurationSeries().getMedian()\n for benchmarkName, benchmarkTls in benchmarkTlsMap.iteritems():\n benchmarkMedian = benchmarkTls.getTotalDurationSeries().getMedian()\n threshold = max(benchmarkMedian * .05, .9)\n result.le(benchmarkMedian + threshold)(\n currentRunMedian, '{} Median latency threshold for current run vs benchmark {}'.format(\n reportName, benchmarkName\n )\n )\n\n def generateProfiles(self, transactionRepo, classifier):\n \"\"\"\n Generates profiles for the current profile session\n\n :param transactionRepo: Repository of loaded transactions\n :param classifier: Predicate to classify transactions into different categories\n\n \"\"\"\n transactionTree, benchmarkCompositeTree = self.analytics.buildTransactionTree(transactionRepo, classifier)\n profiles = Profiles(transactionRepo)\n\n for category, categoryNode in transactionTree.getChildren().iteritems():\n i = 1\n for route, transactionNode in categoryNode.children.iteritems():\n routeName = ' [route - {}]'.format(i) if len(categoryNode.children) > 1 else ''\n profileName = '{} - {}{}'.format(self.reportName, category, routeName)\n begin = time.time()\n LOGGER.info('generating profile %s (txns - %d) -> ', profileName, len(transactionNode.collection))\n\n benchmarkTransactionsMap = benchmarkCompositeTree.getCollectionMap([category, route])\n reportProbes = self.getReportProbes(route, transactionRepo.getCurrent().probes)\n timelineStats, benchmarkTimelineStats = self.analytics.computeStats(\n transactionRepo, category, route, reportProbes, transactionNode.collection, benchmarkTransactionsMap\n )\n profiles.addProfile(Profile(profileName, timelineStats, benchmarkTimelineStats))\n elapsed = time.time() - begin\n LOGGER.completed('completed in %0.2f sec.', elapsed)\n i += 1\n return profiles\n\n def generateLatencyReports(self, profiles, flots, result, resultOrder, reportThreshold):\n \"\"\"\n Generates latency breakup reports for a list of profiles\n\n :param profiles: Profile data for the current profile session\n :param flots: Latency distribuion histograms for each category/route combination\n :param result: Handle to gather and store profiling results\n :param resultOrder: Sort order of transactions in latency constituent reports\n :param reportThreshold: Threshold for number of transactions rendered in html reports.\n\n \"\"\"\n flotTracker = set()\n for profile in profiles:\n begin = time.time()\n reportTitle = '{} latency statistics [{} transactions]'.format(profile.name, len(profile.current))\n LOGGER.info('generating report %s -> ', reportTitle)\n\n category = profile.category\n if category not in flotTracker and category in flots:\n flots[category].attach(result)\n flotTracker.add(category)\n self.addTestResult(profile.name, result, profile.current, profile.benchmarks)\n report = ReportBuilder().buildReport(profile.current, profile.benchmarks, profile.reportProbes, profile.name,\n resultOrder, reportThreshold)\n reportSize = formatHumanReadable(len(report))\n reportTitle = '{} - ({})'.format(reportTitle, reportSize)\n description = '\\n\\t{}\\n\\t'.format(reportTitle)\n elapsed = time.time() - begin\n LOGGER.completed('completed %s in %0.2f sec.', reportSize, elapsed)\n result.attachXpediteReport(profile.name, reportTitle, description, report)\n\n def generateReport(self, app, repo, result, classifier, resultOrder, reportThreshold, txnFilter, benchmarkPaths):\n \"\"\"\n Generates statistics for the current profile session and attaches reports to the given result object\n\n :param app: An instance of xpedite app, to interact with target application\n :param repo: Repository of transaction collection\n :type repo: xpedite.transaction.TransactionRepo\n :param result: Handle to gather and store profiling results\n :param classifier: Predicate to classify transactions into different categories (Default value = DefaultClassifier()\n :param resultOrder: Sort order of transactions in latency constituent reports\n :param reportThreshold: Threshold for number of transactions rendered in html reports.\n :param txnFilter: Lambda to filter transactions prior to report generation\n :param benchmarkPaths: List of stored reports from previous runs, for benchmarking\n\n \"\"\"\n try:\n if txnFilter:\n self.analytics.filterTransactions(repo, txnFilter)\n flots = self.generateFlots(repo, classifier, app.runId)\n profiles = self.generateProfiles(repo, classifier)\n self.generateLatencyReports(profiles, flots, result, resultOrder, reportThreshold)\n self.generateEnvironmentReport(app, result, repo, resultOrder, classifier, txnFilter, benchmarkPaths)\n LOGGER.info('\\nTo recreate the report run - \"xpedite report -p profileInfo.py -r %s\"\\n', app.runId)\n result.commitXpediteReport(app, profiles, self.reportName)\n return profiles\n except Exception as ex:\n LOGGER.exception('failed to generate report')\n raise ex\n","sub_path":"scripts/lib/xpedite/reportgenerator.py","file_name":"reportgenerator.py","file_ext":"py","file_size_in_byte":11341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"639603949","text":"\"\"\"\nP024 Lexicographic permutations\n\nA permutation is an ordered arrangement of objects. For example, 3124 is one possible permutation of the digits 1, 2, 3 and 4. If all of the permutations are listed numerically or alphabetically, we call it lexicographic order. The lexicographic permutations of 0, 1 and 2 are:\n\n012 021 102 120 201 210\n\nWhat is the millionth lexicographic permutation of the digits 0, 1, 2, 3, 4, 5, 6, 7, 8 and 9?\n\"\"\"\n\nfrom itertools import permutations\n\n\ndef lexi_perm(str_int, nth):\n \"\"\"return the nth lexicographic permutation of a string of digits\"\"\"\n all = permutations(str_int)\n for i in range(nth):\n target = next(all)\n return int(\"\".join(target))\n\n\nif __name__ == \"__main__\":\n assert lexi_perm(\"012\", 4) == 120\n print(lexi_perm(\"0123456789\", 1000000))\n # >>> 2783915460\n # passed\n","sub_path":"AlgorithmTraining/ProjectEuler/p024_lexicographic_permutations.py","file_name":"p024_lexicographic_permutations.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"8243458","text":"#saving the following data into a set com{}\n#----------\n# 'Google'\n# 'Tmall'\n# 'Facebook'\n#---------\nS = {'Google','Tmall','Facebool'}\n#define a empty set com{}\n#s = set()\n#insert above data\n\n#1.insert a data ('Tencent')\n#2.remove a data ('Tmall')\n#3.clear the set\n#4.determine whether the str('Google') is in the set\n#s.add('Tecent')\n#s.remove('Tmall')\n#s.clear()\nif 'Google' in S:\n\tprint(\"1\")\nelse:\n\tprint('0')\n\n","sub_path":"practice/practice5/2set.py","file_name":"2set.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"340941071","text":"MODE = \"TRAIN\"\n#MODE = \"SUMBIT\"\n#MODE = \"TEST\"\n\nfrom keras.layers import Input\nfrom keras.layers.core import Activation, Flatten, Reshape\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.models import Model\nfrom keras.utils import np_utils\n\nimport os\nimport sys\nif(MODE == \"TRAIN\" or MODE == \"TEST\"):\n\t#\thttps://github.com/qubvel/segmentation_models\n\t#\thttps://github.com/aleju/imgaug\n\t#\thttps://www.github.com/keras-team/keras-contrib\n\tos.system('pip install --upgrade imgaug')\n\tos.system('pip install -U segmentation-models==0.2.1')\n\tos.system('pip install git+https://www.github.com/keras-team/keras-contrib.git')\n\tos.system('pip install -U scikit-learn')\n\timport imgaug as ia\n\tfrom imgaug import augmenters as iaa\n\tfrom imgaug.augmentables.segmaps import SegmentationMapOnImage\n\timport imgaug.imgaug\n\tfrom sklearn.model_selection import train_test_split\n\tfrom sklearn import preprocessing\nelse:\n\tPATH = \"/kaggle/input/efficientnet/\"\n\tsys.path.insert(0, PATH)\n\tPATH = \"/kaggle/input/segmentation-models/\"\n\tsys.path.insert(0, PATH)\n\tPATH = \"/kaggle/input/classification-models/\"\n\tsys.path.insert(0, PATH)\n\t\nfrom keras import metrics\nimport segmentation_models as sm\nfrom glob import glob\nfrom PIL import Image\nimport numpy as np \nimport pandas as pd \nimport random\nimport time\nimport gc\nfrom keras.models import *\nimport keras\n\n#\thttps://www.kaggle.com/aleksandradeis/steel-defect-detection-eda\ndef rle_2_mask_resize(rle, defined=False):\n\t# CONVERT RLE TO MASK \n\tif (pd.isnull(rle))|(rle=='')|(rle=='-1'): \n\t\tif not defined:\n\t\t\treturn np.zeros((256,1600) ,dtype=np.uint8)\n\t\treturn False\n\tif(defined):\n\t\treturn True\n\n\theight = 256\n\twidth = 1600\n\tmask = np.zeros( width*height ,dtype=np.uint8)\n\n\tarray = np.asarray([int(x) for x in rle.split()])\n\tstarts = array[0::2]-1\n\tlengths = array[1::2] \n\tfor index, start in enumerate(starts):\n\t\tmask[int(start):int(start+lengths[index])] = 1\n\treturn mask.reshape( (height,width), order='F' )\n\ndef smallest_length(rle):\n\tif (pd.isnull(rle))|(rle=='')|(rle=='-1'): \n\t\treturn None, None\n\n\tarray = np.asarray([int(x) for x in rle.split()])\n\tlengths = array[1::2]\n\tmin_val = np.amin(lengths) \n\treturn min_val, np.count_nonzero(lengths == min_val)\n#\treturn lengths.sum()/len(lengths)\t\n\n#\thttps://github.com/catalyst-team/mlcomp/blob/85a8849c87040d19a5aed61e72cfd7ad518d8c9b/mlcomp/contrib/transform/rle.py\ndef mask_2_rle(img):\n\t\"\"\"\n\timg: numpy array, 1 - mask, 0 - background\n\tReturns run length as string formatted\n\t\"\"\"\n\tpixels = img.T.flatten()\n\tpixels = np.concatenate([[0], pixels, [0]])\n\truns = np.where(pixels[1:] != pixels[:-1])[0] + 1\n\truns[1::2] -= runs[::2]\n\treturn ' '.join(str(x) for x in runs)\n\n\n\nclass DataGenerator(keras.utils.Sequence):\n\tdef __init__(self, batch_size=6, model=None, single_input=False):\n\t\tself.TRAIN_PATH = '/kaggle/input/severstal-steel-defect-detection/train_images/'\n\t\tself.TEST_PATH = '/kaggle/input/severstal-steel-defect-detection/test_images/'\n\n\t\tself.train_df = pd.read_csv('/kaggle/input/severstal-steel-defect-detection/train.csv')\n\t\tself.train_fns = sorted(glob(self.TRAIN_PATH + '*.jpg'))\n\t\tself.test_fns = sorted(glob(self.TEST_PATH + '*.jpg'))\n\n\t\tsplit_df = self.train_df[\"ImageId_ClassId\"].str.split(\"_\", n = 1, expand = True)\n\t\tself.train_df['Image'] = split_df[0]\n\t\tself.train_df['Label'] = split_df[1]\n\n\t\tself.samples = 6 * 30\n\t\tself.samples_index = 0\n\t\tself.roll_over = False\n\t\tself.last_shared_index = 0\n\n\t\tself.batch_size = batch_size\n\n\t\tself.single_input = single_input\n\n\t\tself.on_epoch_end()\n\n\n\t\tself.model = model\n\n\t\tself.shared = [\n\t\t\t[],\n\t\t\t[],\n\t\t\t[],\n\t\t\t[],\n\t\t\t[],\n\t\t\t[]\n\t\t]\t\n\n\t\tself.round = 0\n\t\tself.most_elements = 0\n\t\tfor index in range(0, len(self.train_df), 4):\n\t\t\tmask_1_def = rle_2_mask_resize(self.train_df.iloc[index]['EncodedPixels'], defined=True)\n\t\t\tmask_2_def = rle_2_mask_resize(self.train_df.iloc[index + 1]['EncodedPixels'], defined=True)\n\t\t\tmask_3_def = rle_2_mask_resize(self.train_df.iloc[index + 2]['EncodedPixels'], defined=True)\n\t\t\tmask_4_def = rle_2_mask_resize(self.train_df.iloc[index + 3]['EncodedPixels'], defined=True)\n\n\t\t\tmoved = []\n\t\t\tif mask_1_def:\n\t\t\t\tmoved.append(0)\n\t\t\tif mask_2_def:\n\t\t\t\tmoved.append(1)\n\t\t\tif mask_3_def:\n\t\t\t\tmoved.append(2)\n\t\t\tif mask_4_def:\n\t\t\t\tmoved.append(3)\n\t\t\n\t\t\tif(len(moved) == 0):\n\t\t\t\tself.shared[4].append(index)\n\t\t\t\tself.most_elements = max(self.most_elements, len(self.shared[4]))\n\t\t\telif(1 < len(moved)):\n\t\t\t\tself.shared[5].append(index)\n\t\t\t\tself.most_elements = max(self.most_elements, len(self.shared[5]))\n\t\t\telse:\n\t\t\t\tself.shared[moved[0]].append(index)\n\t\t\t\tself.most_elements = max(self.most_elements, len(self.shared[moved[0]]))\n\n\tdef __len__(self):\n\t\treturn int(self.most_elements // self.batch_size)\n\t\t#return np.floot(self.train_df.shape[0]//len(shared))\n\t\t#return int(np.floor(self.train_df.shape[0] / self.batch_size))\n\n\n\tdef __getitem__(self, index):\n\t\tif(self.model == None):\n\t\t\tX, y = self.__data_generation()\n\n\t\t\t#X = (X - np.min(X))/np.ptp(X)\n\t\t\t#X = (X - np.mean(X)) / np.std(X)\n\t\t\tif not self.single_input:\n\t\t\t\treturn [X, X], y\n\t\t\treturn X, y\n\t\telse:\n\t\t\tX, y_2 = self.__data_generation()\n\t\t\tY = self.model.predict(X)\n\t\t\tY = Y.reshape(Y.shape[:-1])\n\t\t\ty = np.zeros(Y.shape + (3, ))\n#\t\t\tprint(y.shape)\n#\t\t\tprint(Y.shape)\n\t\t\ty[:, :, :, 0] = Y \n\t\t\ty[:, :, :, 1] = Y\n\t\t\ty[:, :, :, 2] = Y\n\t\t#\ty = resolve_model_one_input(y)\n\t\t\treturn y, y_2\n\n\t\t\t#Y, y#y, Y\n\n\tdef on_epoch_end(self):\n\t\tpass\n\n\tdef __data_generation(self):\n\t\tself.round += 1\n\n\t\tX = np.zeros((self.batch_size, 256, 1600, 3), dtype=np.float64)\n\t\ty = np.zeros((self.batch_size, 256, 1600, 4), dtype=np.float64)\n\n\t\telements_per_class = max((self.batch_size//len(self.shared)), 1)\n\t\tcurrent_index = 0\n\t\tself.samples_index += elements_per_class\n\t\tif(self.train_df.shape[0] < (self.samples_index * len(self.shared))):\n\t\t\tself.samples_index = 0\n\t\t\tself.roll_over = True\n\n\t\twhile current_index < self.batch_size:\n\t\t\tfor i in range(self.last_shared_index, len(self.shared)):\n\t\t\t\tfor j in range(elements_per_class):\n\t\t\t\t\tif not current_index < self.batch_size:\n\t\t\t\t\t\tbreak\n\t\t\t\t\ttry:\n\t\t\t\t\t\tindex = (self.samples_index + j) % len(self.shared[i])\n\t\t\t\t\t\t#\tbased off https://www.kaggle.com/aleksandradeis/steel-defect-detection-eda\n\t\t\t\t\t\tseq = iaa.Sequential([\n\t\t\t\t\t\t\tiaa.Sometimes(0.5,\n\t\t\t\t\t\t\t\tiaa.GaussianBlur(sigma=(0, 0.5))\n\t\t\t\t\t\t\t),\n\t\t\t\t\t\t\t#iaa.ContrastNormalization((0.75, 1.5)),\n\t\t\t\t\t\t\tiaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5),\n\t\t\t\t\t\t\tiaa.Multiply((0.8, 1.2), per_channel=0.2),\n\t\t\t\t\t\t], random_order=True) \n\n\t\t\t\t\t\timage = np.asarray(Image.open(self.TRAIN_PATH + self.train_df.iloc[index]['Image']))\n\n\t\t\t\t\t\tmask_1 = rle_2_mask_resize(self.train_df.iloc[index]['EncodedPixels'])\n\t\t\t\t\t\tmask_2 = rle_2_mask_resize(self.train_df.iloc[index + 1]['EncodedPixels'])\n\t\t\t\t\t\tmask_3 = rle_2_mask_resize(self.train_df.iloc[index + 2]['EncodedPixels'])\n\t\t\t\t\t\tmask_4 = rle_2_mask_resize(self.train_df.iloc[index + 3]['EncodedPixels'])\n\n\t\t\t\t\t\tsegmap = np.zeros((image.shape[0], image.shape[1], 1), dtype=np.int32)\t\t\t\n\t\t\t\t\t\tsegmap[1 < mask_1] = 1\n\t\t\t\t\t\tsegmap[2 < mask_2] = 2\n\t\t\t\t\t\tsegmap[3 < mask_3] = 3\n\t\t\t\t\t\tsegmap[4 < mask_4] = 4\n\n\n\t#\t\t\t\t\tsegmap = np.zeros((4, image.shape[0], image.shape[1], 1), dtype=np.int32)\t\t\t\n\t#\t\t\t\t\tsegmap[0, 1 < mask_1] = 1\n\t#\t\t\t\t\tsegmap[1, 2 < mask_2] = 1\n\t#\t\t\t\t\tsegmap[2, 3 < mask_3] = 1\n\t#\t\t\t\t\tsegmap[3, 4 < mask_4] = 1\n\n\t\t\t\t#\t\tprint(segmap.shape)\n\t\t\t\t#\t\tprint(segmap.shape)\n\t#\t\t\t\t\tsegmap = np.swapaxes(segmap, 0, -1) \n\t#\t\t\t\t\tsegmap = segmap.reshape(segmap.shape[1:])\n\t\t\t\t#\t\tprint(segmap.shape)\n\n\t\t\t\t#\t\tassert(mask_2_rle(segmap[:, :, 0]) == self.train_df.iloc[index]['EncodedPixels'])\n\t\t\t\t#\t\tassert(mask_2_rle(segmap[:, :, 1]) == self.train_df.iloc[index + 1]['EncodedPixels'])\n\t\t\t\t#\t\tassert(mask_2_rle(segmap[:, :, 2]) == self.train_df.iloc[index + 2]['EncodedPixels'])\n\t\t\t\t#\t\tassert(mask_2_rle(segmap[:, :, 3]) == self.train_df.iloc[index + 3]['EncodedPixels'])\n\n#\t\t\t\t\t\tdouble_check_map = np.zeros((image.shape[0], image.shape[1], 1))\n#\t\t\t\t\t\tdouble_check_map[segmap[:, :, 0] == 1] = 1\n#\t\t\t\t\t\tdouble_check_map[segmap[:, :, 1] == 2] = 2\n#\t\t\t\t\t\tdouble_check_map[segmap[:, :, 2] == 3] = 3\n#\t\t\t\t\t\tdouble_check_map[segmap[:, :, 3] == 4] = 4\n\n#\t\t\t\t\t\tmask_2_rle(segmap[:, :, 1])\n#\t\t\t\t\t\tmask_2_rle(segmap[:, :, 2])\n#\t\t\t\t\t\tmask_2_rle(segmap[:, :, 3])\n\n\t\t\t\t\t\tsegmap_on_image = SegmentationMapOnImage(segmap, shape=image.shape)\n\t\t\t\t\t\timage_aug = image\n\t\t\t\t\t\tif(random.randint(0, 3) == 2):\n\t\t\t\t\t\t\timage_aug, _ = seq(image=image, segmentation_maps=segmap_on_image)\n\n\t\t\t\t\t\tX[current_index, :] = image_aug\n\t\t\t\t\t\ty[current_index, :] = segmap\n\n\t\t\t\t\t\tcurrent_index += 1\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tprint(e)\n\t\t\t\t\t\tcurrent_index += 1\n\t\t\t\tself.last_shared_index += 1\n\t\t\tself.last_shared_index = 0\n\t\treturn X, y\n\ndef save_model_own(model, name=\"model\"):\n\tmodel_json = model.to_json()\n\twith open(\"{}.json\".format(name), \"w\") as json_file:\n\t\tjson_file.write(model_json)\n\tmodel.save_weights(\"{}.h5\".format(name))\n\n\n'''\n\thttps://www.tensorflow.org/guide/keras/custom_callback\n'''\nclass EarlyStoppingAtMinLoss(keras.callbacks.Callback):\n\tdef __init__(self, patience=10, timeout=5):\n\t\tsuper(EarlyStoppingAtMinLoss, self).__init__()\n\n\t\tself.start = time.time()\n\t\tself.traning_time = 60 * timeout #60 * 1#0\n\t\tself.round = 0\n\n\t#\tself.prediction = tf.Variable(0., validate_shape=False)\n\n\t\tself.patience = patience\n\t\tself.best = np.Inf\n\t\tself.best_weights = None\n\t\tself.wait = 0\n\n\tdef on_batch_end(self, batch, logs={}):\n#\t\tprint(self.model.outputs[0].eval(session=K.get_session()))\n\t\tif(self.traning_time < (time.time() - self.start)):\n\t\t\tprint(\"tiden er ute...\")\n\t\t\tself.model.stop_training = True\n\n\t\tif(self.round % 5 == 0):\n\t\t\tprint(logs)\n\t\t\tprint(\"accuracy : {} mae:{}\".format(logs.get(\"accuracy\", \"None?\"), logs.get(\"mean_absolute_error\", \"None?\")))\n\n\t\tcurrent = logs.get('loss')\n\t\tif np.less(current, self.best):\n\t\t\tself.best = current\n\t\t\tself.wait = 0\n\t\t\tself.best_weights = self.model.get_weights()\n\t\telif self.patience <= self.wait and self.best_weights != None:\n\t\t\tself.model.set_weights(self.best_weights)\n\t\tself.wait += 1\n\t\tself.round += 1\n\nclass LearningRateScheduler(keras.callbacks.Callback):\n\tdef __init__(self):\n\t\tsuper(LearningRateScheduler, self).__init__()\n\t\t#self.schedule = schedule\n\t\t\n\t\tself.og_lr = 0.01 #1e-4\n\t\tself.decay_factor = 0.75\n\t\tself.step_size = 2\n\n\t\tself.lr = self.og_lr\n\t\tself.iteration = 0\n\t\tself.decay = 0.01\n\n\t\tself.best = np.Inf\n\n\t#\thttps://www.jeremyjordan.me/nn-learning-rate/\n\tdef calc_new_lr(self):\n#\t\treturn self.og_lr * (self.decay_factor ** np.floor(self.iteration/self.step_size))\n\t\t return self.og_lr * (1 / (1 + self.decay * self.iteration))\n\n\tdef on_batch_start(self, batch, logs={}):\n\t\tkeras.backend.set_value(self.model.optimizer.lr, self.calc_new_lr())\t\n\n\tdef on_batch_end(self, batch, logs={}):\n\t\tif(0 < self.iteration and self.iteration % 10 == 0):\n\t\t\tkeras.backend.set_value(self.model.optimizer.lr, self.calc_new_lr())\t\n\t\tself.iteration += 1\n\nif __name__ == \"__main__\":\n\tnp.random.seed(0)\n\trandom.seed( 0 )\n\n\t#data = dataloader()\n\tgc.collect() \n\n\tmodel = None\n\tif(MODE == \"TEST\"):\n\t#\tX, y = data.get_traning()\n\t#\tprint(data.tested_all_data())\n\t\tfor i in range(3):\n\t\t\tmodel = None\n\t\t\thistory = None\n\t\t\tif(i == 0):\n\t\t\t\tmodel = sm.Unet(encoder_weights='imagenet')\n\t\t\t\tmodel.compile(\n\t\t\t\t\t'Adam',\n\t\t\t\t\tloss=sm.losses.bce_dice_loss,\n\t\t\t\t\tmetrics=['accuracy',sm.metrics.iou_score],\n\t\t\t\t)\n\t\t\t\thistory = model.fit(X, y, epochs=1, batch_size=5, \n\t\t\t\t\tvalidation_split=0.1,\n\t\t\t\t\tshuffle=True,\n\t\t\t\t\tverbose=0)\n\t\t\t\tsave_model_own(model, name=\"model\" + str(i))\n\t\t\telif(i == 1):\n\t\t\t\tmodel = sm.FPN(encoder_weights='imagenet')\n\t\t\t\tmodel.compile(\n\t\t\t\t\t'Adam',\n\t\t\t\t\tloss=sm.losses.bce_dice_loss,\n\t\t\t\t\tmetrics=['accuracy',sm.metrics.iou_score],\n\t\t\t\t)\n\t\t\t\thistory = model.fit(X, y, epochs=1, batch_size=5, \n\t\t\t\t\tvalidation_split=0.1,\n\t\t\t\t\tshuffle=True,\n\t\t\t\t\tverbose=0)\n\t\t\t\tsave_model_own(model, name=\"model\" + str(i))\n\t\t\telif(i == 2):\n\t\t\t\tmodel = sm.Linknet(encoder_weights='imagenet')\n\t\t\t\tmodel.compile(\n\t\t\t\t\t'Adam',\n\t\t\t\t\tloss=sm.losses.bce_dice_loss,\n\t\t\t\t\tmetrics=['accuracy',sm.metrics.iou_score],\n\t\t\t\t)\n\t\t\t\thistory = model.fit(X, y, epochs=1, batch_size=5, \n\t\t\t\t\tvalidation_split=0.1,\n\t\t\t\t\tshuffle=True,\n\t\t\t\t\tverbose=0)\n\t\t\t\tsave_model_own(model, name=\"model\" + str(i))\n\telif(MODE == \"TRAIN\"):\n\t\tgc.collect() \n\n\t\n\t\tmodel = sm.Unet(encoder_weights='imagenet', classes=1, activation=\"relu\")\n\t\tmodel.compile(\n\t\t\t\t'Adam',\n\t\t\t\tloss=sm.losses.bce_dice_loss,\n\t\t\t\tmetrics=['accuracy',sm.metrics.iou_score],\n\t\t)\n\t\t\n\t\tearly = EarlyStoppingAtMinLoss(timeout=15)\n\t\ttraining_generator = DataGenerator(batch_size=3, single_input=True)\n\t\tmodel.fit_generator(generator=training_generator,\n\t\t\t\t\t\t\t\t\tuse_multiprocessing=True,\n\t\t\t\t\t\t\t\t\tworkers=1,\n\t\t\t\t\t\t\t\t\tverbose=0,\n\t\t\t\t\t\t\t\t\tmax_queue_size=2,\n\t\t\t\t\t\t\t\t\tcallbacks=[early, \n\t\t\t\t\t\t\t\t\t\t\t\tLearningRateScheduler()])\n\t\t\n\telif(MODE == \"SUMBIT\"):\n\t\tmodel = model_from_json(open('/kaggle/input/steel/model.json', 'r').read())\n\t\tmodel.load_weights(\"/kaggle/input/steel/model.h5\")\n\n\t#\textra_model = model_from_json(open('/kaggle/input/steel/extra_model.json', 'r').read())\n\t#\textra_model.load_weights(\"/kaggle/input/steel/extra_model.h5\")\n\t\t\n\n\t\tinput_size = 32 \n\t\tdata_frame_data = {\n\t\t\t'ImageId_ClassId':[],\n\t\t\t'EncodedPixels':[]\n\t\t}\n\t\tprint(\"Model loaded\")\n\t\troundd = 0\n\t\tfor size_jump in range(0, len(data.test_fns), input_size):\n\t\t\tmovment_step = (len(data.test_fns)-size_jump) if ((len(data.test_fns)-size_jump) < input_size) else input_size\n\n\t\t\tindex = 0\n\t\t\ttest_input = np.zeros((movment_step, 256, 1600, 3))\n\t\t\tfor i in range(size_jump, size_jump + movment_step): \n\t\t\t\timage = np.asarray(Image.open(data.test_fns[i]))\n\t\t\t\ttest_input[index, :] = image\n\t\t\t\tindex += 1\n\n\n\t\t\t'''\n\t\t\tY = model.predict(test_input)\n\t\t\tY = Y.reshape(Y.shape[:-1])\n\t\t\ty = np.zeros(Y.shape + (3, ))\n#\t\t\tprint(y.shape)\n#\t\t\tprint(Y.shape)\n\t\t\ty[:, :, :, 0] = Y \n\t\t\ty[:, :, :, 1] = Y\n\t\t\ty[:, :, :, 2] = Y\n\n\t\t\toutput = extra_model.predict(y)\n\t\t\t'''\n\t\t\toutput = model.predict(test_input)\n#\t\t\tnew_output = extra_model.predict(output)\n#\t\t\tnew_output = connect_back_input(new_output)\n\n\t\t\tprint(\"Done new prediction\")\n\t\t\tmask_1 = np.zeros((movment_step, output.shape[1], output.shape[2], 1), dtype=np.int32)\t\t\n\t\t\tmask_2 = np.zeros((movment_step, output.shape[1], output.shape[2], 1), dtype=np.int32)\n\t\t\tmask_3 = np.zeros((movment_step, output.shape[1], output.shape[2], 1), dtype=np.int32)\n\t\t\tmask_4 = np.zeros((movment_step, output.shape[1], output.shape[2], 1), dtype=np.int32)\n\t\t\tmask_1[output == 1] = 1\n\t\t\tmask_2[output == 2] = 1\n\t\t\tmask_3[output == 3] = 1\n\t\t\tmask_4[output == 4] = 1\n\n\t\t\tfor i in range(0, output.shape[0]):\n\t\t\t\tname = data.test_fns[size_jump + i].split(\"/\")[-1]\n\t\t\t\tdata_frame_data[\"ImageId_ClassId\"].append(name + \"_1\")\n\t\t\t\tdata_frame_data[\"EncodedPixels\"].append(mask_2_rle(mask_1[i, :]))\n\t\t\t\t\n\t\t\t\tdata_frame_data[\"ImageId_ClassId\"].append(name + \"_2\")\n\t\t\t\tdata_frame_data[\"EncodedPixels\"].append(mask_2_rle(mask_2[i, :]))\n\t\t\t\t\n\t\t\t\tdata_frame_data[\"ImageId_ClassId\"].append(name + \"_3\")\n\t\t\t\tdata_frame_data[\"EncodedPixels\"].append(mask_2_rle(mask_3[i, :]))\n\t\t\t\t\n\t\t\t\tdata_frame_data[\"ImageId_ClassId\"].append(name + \"_4\")\n\t\t\t\tdata_frame_data[\"EncodedPixels\"].append(mask_2_rle(mask_4[i, :]))\n\n\t\tpd.DataFrame(data_frame_data).to_csv(\"submission.csv\", encoding='utf-8', index=False)\n\t","sub_path":"kaggle/severstal-steel-defect-detection/steel.py","file_name":"steel.py","file_ext":"py","file_size_in_byte":15283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"88376821","text":"#!/usr/bin/python3\n\"\"\"\nContains the class DBStorage\n\"\"\"\n\nimport models\nfrom models.base_model import BaseModel, Base\nfrom models.product import Product\nfrom models.customer import Customer\nfrom models.customer_product_mapping import CustomerProductMapping\nfrom os import getenv\nimport sqlalchemy\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\n\nclasses = {\"Customer\": Customer, \"Product\": Product,\n \"CustomerProductMapping\": CustomerProductMapping}\n\n\nclass DBStorage:\n \"\"\"interaacts with the MySQL database\"\"\"\n __engine = None\n __session = None\n\n def __init__(self):\n \"\"\"Instantiate a DBStorage object\"\"\"\n GRAPHIT_MYSQL_USER = getenv('GRAPHIT_MYSQL_USER')\n GRAPHIT_MYSQL_PWD = getenv('GRAPHIT_MYSQL_PWD')\n GRAPHIT_MYSQL_HOST = getenv('GRAPHIT_MYSQL_HOST')\n GRAPHIT_MYSQL_DB = getenv('GRAPHIT_MYSQL_DB')\n GRAPHIT_ENV = getenv('GRAPHIT_ENV')\n self.__engine = create_engine('mysql+mysqldb://{}:{}@{}/{}'.\n format(GRAPHIT_MYSQL_USER,\n GRAPHIT_MYSQL_PWD,\n GRAPHIT_MYSQL_HOST,\n GRAPHIT_MYSQL_DB))\n\n def all(self, cls=None):\n \"\"\"query on the current database session\"\"\"\n new_dict = {}\n for clss in classes:\n if cls is None or cls is classes[clss] or cls is clss:\n objs = self.__session.query(classes[clss]).all()\n for obj in objs:\n key = obj.__class__.__name__ + '.' + obj.id\n new_dict[key] = obj\n return (new_dict)\n\n def new(self, obj):\n \"\"\"add the object to the current database session\"\"\"\n self.__session.add(obj)\n\n def save(self):\n \"\"\"commit all changes of the current database session\"\"\"\n self.__session.commit()\n\n def delete(self, obj=None):\n \"\"\"delete from the current database session obj if not None\"\"\"\n if obj is not None:\n self.__session.delete(obj)\n\n def reload(self):\n \"\"\"reloads data from the database\"\"\"\n Base.metadata.create_all(self.__engine)\n sess_factory = sessionmaker(bind=self.__engine, expire_on_commit=False)\n Session = scoped_session(sess_factory)\n self.__session = Session\n\n def close(self):\n \"\"\"call remove() method on the private session attribute\"\"\"\n self.__session.remove()\n\n def get(self, cls, id):\n \"\"\"get object based on class and id\"\"\"\n objs = self.__session.query(classes[cls]).all()\n for obj in objs:\n if obj.__class__.__name__ == cls and obj.id == id:\n return obj\n return None\n\n def count(self, cls=None):\n \"\"\"get count of all objects or objects of a specific class\"\"\"\n object_count = 0\n object_list = []\n if cls is None:\n for value in classes.values():\n object_count += self.__session.query(value).count()\n else:\n if cls in classes:\n object_count += self.__session.query(classes[cls]).count()\n return object_count\n\n def get_session(self):\n \"\"\"returns a session to query\"\"\"\n return self.__session\n","sub_path":"models/engine/db_storage.py","file_name":"db_storage.py","file_ext":"py","file_size_in_byte":3310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"338460095","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\nwith open('README.rst') as readme_file:\n readme = readme_file.read()\n\nwith open('HISTORY.rst') as history_file:\n history = history_file.read()\n\nwith open('requirements.txt', 'r') as req:\n requirements_install = [l.strip() for l in req.readlines() if l.strip() != '']\n\nwith open('requirements_test.txt', 'r') as req:\n requirements_test = [l.strip() for l in req.readlines() if l.strip() != '']\n\nwith open('requirements_links.txt', 'r') as req:\n dependency_links = [l.strip() for l in req.readlines() if l.strip() != '']\n\nextras = {\n 'test': requirements_test\n}\n\n\nentry_points = '[console_scripts]\\ndatafs=datafs.datafs:cli'\n\nsetup(\n name='datafs',\n version='0.6.5',\n description=\"DataFS is an abstraction layer for data storage systems. It manages file versions and metadata using a json-like storage system like AWS's DynamoDB and relies on PyFilesystem to abstract file storage, allowing you to store files locally and on the cloud in a seamless interface.\",\n long_description=readme + '\\n\\n' + history,\n author=\"Climate Impact Lab\",\n url='https://github.com/ClimateImpactLab/datafs',\n packages=find_packages(exclude=['*.tests', '*.tests.*', 'tests.*', 'tests', 'docs', 'examples']),\n package_dir={'datafs':\n 'datafs'},\n include_package_data=True,\n install_requires=requirements_install,\n entry_points=entry_points,\n license=\"MIT license\",\n zip_safe=False,\n keywords='datafs',\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n \"Programming Language :: Python :: 2\",\n 'Programming Language :: Python :: 2.7'\n ],\n test_suite='tests',\n tests_require=requirements_test,\n extras_require=extras,\n dependency_links = dependency_links\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"19141907","text":"from idlelib import statusbar\nfrom tkinter import *\nimport tkinter.messagebox\nfrom pygame import mixer\n\n# Window\nroot = Tk()\nmixer.init()\nroot.geometry('500x550')\nroot.title(\"Settings\")\nroot.iconbitmap(r'Art/settings.png')\n\n\n# About Credits\ndef about_game():\n tkinter.messagebox.showinfo('Credits', 'Sound from:\\n~Zapsplat.com, '\n '\\n~PlayOnLoop.com, '\n '\\n~http://www.freesfx.co.uk')\n\n\n# How to play\ndef rules():\n tkinter.messagebox.showinfo('Rules', 'Two players are trying to score a goal in the opponents net with horizontal, '\n 'vertical and diagonal moves.\\nAlready used points can be re-used for a double'\n 'move')\n\n\n# menubar\nmenubar = Menu(root)\nroot.config(menu=menubar)\n\n# submenu\nsubMenu = Menu(menubar, tearoff=0)\nmenubar.add_cascade(label=\"About\", menu=subMenu)\nsubMenu.add_command(label=\"Credits\", command=about_game)\nsubMenu.add_command(label=\"How to play\", command=rules)\n\n# text\ntext = Label(root, text='Game Settings')\ntext.pack(pady=10)\n\n\n# button functions\nmuted = FALSE\n\n\ndef mute_music():\n global muted\n if muted: # unmute music\n mixer.music.set_volume(0.5)\n volume1Btn.configure(image=volume1Photo)\n scale.set(50)\n muted = FALSE\n else: # mute\n mixer.music.set_volume(0)\n volume1Btn.configure(image=mutePhoto)\n scale.set(0)\n muted = TRUE\n\n\ndef play_btn():\n mixer.music.load('background.wav')\n mixer.music.play()\n\n\ndef pause_btn():\n mixer.music.load('../Sound/background.wav')\n mixer.music.stop()\n statusbar['text'] = \"Paper football: music paused\"\n\n\ndef set_vol(val):\n volume = int(val) / 100\n mixer.music.set_volume(volume)\n\n\ndef exit_btn():\n root.destroy()\n\n\n# frame\nmiddleframe = Frame(root, relief=RAISED, borderwidth=0)\nmiddleframe.pack()\n\n# Volume 1\nvolume1Photo = PhotoImage(file='../Art/sound.png')\nvolume1Btn = Button(image=volume1Photo, command=mute_music)\nvolume1Btn.pack()\nmutePhoto = PhotoImage(file='../Art/sound_off.png')\n\n# Volume button\nvolumePhoto = PhotoImage(file='../Art/sound.png')\nplay_btn = Button(middleframe, image=volumePhoto, command=play_btn)\nplay_btn.pack(pady=5, padx=10)\n\n\n\n# Mixer\nscale = Scale(root, from_=0, to=100, orient=HORIZONTAL, command=set_vol)\nscale.set(50) # default value\nmixer.music.set_volume(50)\nscale.pack()\n\n# Exit button\nexitPhoto = PhotoImage(file='../Art/exit.png')\nexit_btn = Button(middleframe, image=exitPhoto, command=exit_btn)\nexit_btn.pack(pady=5, padx=10)\n\n\n# status bar\nstatusbar = Label(root, text=\"Paper Football\", relief=SUNKEN, anchor=W)\nstatusbar.pack(side=BOTTOM, fill=X)\n\n# loop\nroot.mainloop()\n","sub_path":"View/SettingsUI.py","file_name":"SettingsUI.py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"313325779","text":"# -*- coding: utf-8 -*-\nimport datetime\nimport calendar\nfrom dateutil.relativedelta import relativedelta\n# from ChryslerMTD.sql.current_month_data import current_month_sql\n\n\ndef get_pre_day():\n date = datetime.date.today()\n dates = str(date).split('-')\n print(dates)\n month_range = calendar.monthrange(int(dates[0]), int(dates[1]))\n if int(dates[1]) == 1:\n year = int(dates[0]) - 1\n fact_date = str(year) + '-' + '12' + '-' + str(month_range[1])\n else:\n if int(dates[1])-1 < 10:\n fact_date = str(dates[0]) + '-0' + str(int(dates[1])-1) + '-' + str(dates[2])\n else:\n fact_date = str(dates[0]) + '-' + str(int(dates[1]) - 1) + '-' + str(dates[2])\n return fact_date\n\n\ndef get_days(n_day):\n date = datetime.date.today()\n tar_day = date.today() + relativedelta(days=-n_day)\n return str(tar_day)\n# a = get_pre_day()\n# print(a)\n\n\ndef get_pre_month(mon):\n # 输入int,获取之前n月\n date = datetime.date.today()\n month = date.today() + relativedelta(months=-mon)\n return str(month)[0:7]\n\n\ndef wrap_month_condition(mon):\n cond_temp = 'CalendarMonth=\\'' + get_pre_month(mon) + '\\''\n return cond_temp\n\n\ndef wrap_date_condition(day):\n cond_temp = 'Date=\\'' + get_days(day) + '\\''\n return cond_temp\n\n# sql = current_month_sql.format(date_condition=wrap_month_condition(0)+' and '+wrap_date_condition(0))\n# print(wrap_date_condition(1))\n\n\n","sub_path":"ChryslerMTD/get_date.py","file_name":"get_date.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"438700006","text":"#!/usr/bin/env python\n\nimport json # Used when TRACE=jsonp\nimport os # Used to get the TRACE environment variable\nimport re # Used when TRACE=jsonp\nimport sys # Used to smooth over the range / xrange issue.\n\n# Python 3 doesn't have xrange, and range behaves like xrange.\nif sys.version_info >= (3,):\n xrange = range\n\n# Circuit verification library.\n\nclass Wire(object):\n \"\"\"A wire in an on-chip circuit.\n \n Wires are immutable, and are either horizontal or vertical.\n \"\"\"\n \n def __init__(self, name, x1, y1, x2, y2):\n \"\"\"Creates a wire.\n \n Raises an ValueError if the coordinates don't make up a horizontal wire\n or a vertical wire.\n \n Args:\n name: the wire's user-visible name\n x1: the X coordinate of the wire's first endpoint\n y1: the Y coordinate of the wire's first endpoint\n x2: the X coordinate of the wire's last endpoint\n y2: the Y coordinate of the wire's last endpoint\n \"\"\"\n # Normalize the coordinates.\n if x1 > x2:\n x1, x2 = x2, x1\n if y1 > y2:\n y1, y2 = y2, y1\n \n self.name = name\n self.x1, self.y1 = x1, y1\n self.x2, self.y2 = x2, y2\n self.object_id = Wire.next_object_id()\n \n if not (self.is_horizontal() or self.is_vertical()):\n raise ValueError(str(self) + ' is neither horizontal nor vertical')\n \n def is_horizontal(self):\n \"\"\"True if the wire's endpoints have the same Y coordinates.\"\"\"\n return self.y1 == self.y2\n \n def is_vertical(self):\n \"\"\"True if the wire's endpoints have the same X coordinates.\"\"\"\n return self.x1 == self.x2\n \n def intersects(self, other_wire):\n \"\"\"True if this wire intersects another wire.\"\"\"\n # NOTE: we assume that wires can only cross, but not overlap.\n if self.is_horizontal() == other_wire.is_horizontal():\n return False \n \n if self.is_horizontal():\n h = self\n v = other_wire\n else:\n h = other_wire\n v = self\n return v.y1 <= h.y1 and h.y1 <= v.y2 and h.x1 <= v.x1 and v.x1 <= h.x2\n \n def __repr__(self):\n # :nodoc: nicer formatting to help with debugging\n return('')\n \n def as_json(self):\n \"\"\"Dict that obeys the JSON format restrictions, representing the wire.\"\"\"\n return {'id': self.name, 'x': [self.x1, self.x2], 'y': [self.y1, self.y2]}\n\n # Next number handed out by Wire.next_object_id()\n _next_id = 0\n \n @staticmethod\n def next_object_id():\n \"\"\"Returns a unique numerical ID to be used as a Wire's object_id.\"\"\"\n id = Wire._next_id\n Wire._next_id += 1\n return id\n\nclass WireLayer(object):\n \"\"\"The layout of one layer of wires in a chip.\"\"\"\n \n def __init__(self):\n \"\"\"Creates a layer layout with no wires.\"\"\"\n self.wires = {}\n \n def wires(self):\n \"\"\"The wires in the layout.\"\"\"\n self.wires.values()\n \n def add_wire(self, name, x1, y1, x2, y2):\n \"\"\"Adds a wire to a layer layout.\n \n Args:\n name: the wire's unique name\n x1: the X coordinate of the wire's first endpoint\n y1: the Y coordinate of the wire's first endpoint\n x2: the X coordinate of the wire's last endpoint\n y2: the Y coordinate of the wire's last endpoint\n \n Raises an exception if the wire isn't perfectly horizontal (y1 = y2) or\n perfectly vertical (x1 = x2).\"\"\"\n if name in self.wires:\n raise ValueError('Wire name ' + name + ' not unique')\n self.wires[name] = Wire(name, x1, y1, x2, y2)\n \n def as_json(self):\n \"\"\"Dict that obeys the JSON format restrictions, representing the layout.\"\"\"\n return { 'wires': [wire.as_json() for wire in self.wires.values()] }\n \n @staticmethod\n def from_file(file):\n \"\"\"Builds a wire layer layout by reading a textual description from a file.\n \n Args:\n file: a File object supplying the input\n \n Returns a new Simulation instance.\"\"\"\n\n layer = WireLayer()\n \n while True:\n command = file.readline().split()\n if command[0] == 'wire':\n coordinates = [float(token) for token in command[2:6]]\n layer.add_wire(command[1], *coordinates)\n elif command[0] == 'done':\n break\n \n return layer\n\ndef nodeNum(node):\n if(node is None):\n return 0\n return node.nodeNum\n\ndef height(node):\n if(node is None):\n return -1\n else:\n return node.height\n\nclass Node(object):\n def __init__(self,key,patient):\n self.left=None\n self.right=None\n self.key=key\n self.height=0\n self.patient=patient\n self.nodeNum=1\n def updateHeight(self):\n self.height=1+max(height(self.right),height(self.left))\n def updateNodeNum(self):\n self.nodeNum=nodeNum(self.right)+nodeNum(self.left)+1\n\n def rRotate(self):\n lNode=self.left\n patient=self.patient\n self.left=lNode.right\n if(self.left!=None):\n self.left.patient=self\n lNode.right=self\n self.patient=lNode\n if(self == patient.right):\n patient.right=lNode\n else: \n patient.left=lNode\n lNode.patient=patient\n self.updateHeight()\n self.updateNodeNum()\n lNode.updateHeight()\n lNode.updateNodeNum()\n \n def lRotate(self):\n rNode=self.right\n patient=self.patient\n self.right=rNode.left\n if(self.right != None):\n self.right.patient=self\n rNode.left=self\n self.patient=rNode\n if(self==patient.right):\n patient.right=rNode\n else:\n patient.left=rNode\n rNode.patient=patient \n self.updateHeight()\n self.updateNodeNum()\n rNode.updateHeight()\n rNode.updateNodeNum()\n\n def rlRotate(self):\n self.right.rRotate()\n self.lRotate()\n def lrRotate(self):\n self.left.lRotate()\n self.rRotate()\n def checkAndFixRightInsert(self):\n if(height(self.right)-height(self.left)<=1):\n self.updateHeight()\n self.updateNodeNum()\n return\n if(height(self.right.left)-height(self.right.right)>=1):\n self.rlRotate()\n else:\n self.lRotate()\n\n def checkAndFixLeftInsert(self):\n if(height(self.left)-height(self.right)<=1):\n self.updateHeight()\n self.updateNodeNum()\n return\n if(height(self.left.right)-height(self.left.left)>=1):\n self.lrRotate()\n else:\n self.rRotate()\n def checkAndFixRightDelete(self):\n self.checkAndFixLeftInsert()\n def checkAndFixLeftDelete(self):\n self.checkAndFixRightInsert()\n \n \n#klevel=1\n\nclass RangeIndex(object):\n \"\"\"\n Post: Array-based range index implementation.\n Now: AVL-tree-based range index implementation\n \"\"\"\n \n def __init__(self):\n \"\"\"Initially empty range index.\"\"\"\n self.head=Node(0,None)\n \n def add(self, key):\n \"\"\"Inserts a key in the range index.\"\"\"\n \n if key is None:\n raise ValueError('Cannot insert nil in the index')\n if(self.head.right is None):\n self.head.right=Node(key,self.head)\n else:\n self.__add__(self.head.right,key)\n \n def __add__(self,toBeInserted,key):\n if(toBeInserted is None):\n if(self.head.right is None):\n raise Exception(\"Fucking! We algothrim won't happened that we add to a None!\")\n else:\n raise Exception(\"We algothrim won't happened that we add to a None!\")\n if(key>toBeInserted.key):\n if(toBeInserted.right is None):\n toBeInserted.right=Node(key,toBeInserted)\n toBeInserted.updateHeight()\n toBeInserted.updateNodeNum()\n else:\n self.__add__(toBeInserted.right,key)\n toBeInserted.updateHeight()\n toBeInserted.updateNodeNum()\n toBeInserted.checkAndFixRightInsert()\n \n elif(keycurrentNode.key):\n self.__remove__(currentNode.right,key)\n currentNode.checkAndFixRightDelete()\n elif(key=tree.key)):\n break\n if(l=l):\n# klevel+=1\n self.__nodeList__(node.left,l,h,result)\n# klevel-=1\n if(node.key<=h):\n# klevel+=1\n self.__nodeList__(node.right,l,h,result)\n# klevel-=1\n def __rank__(self,currentNode,key):\n if(currentNode is None):\n return False,0\n if(key>currentNode.key):\n hit,count=self.__rank__(currentNode.right,key)\n return hit,count+1+nodeNum(currentNode.left)\n if(key other.key or\n (self.key == other.key and self.wire_id > other.wire_id))\n \n def __ge__(self, other):\n # :nodoc: Delegate comparison to keys.\n return (self.key > other.key or\n (self.key == other.key and self.wire_id >= other.wire_id))\n\n def __eq__(self, other):\n # :nodoc: Delegate comparison to keys.\n return self.key == other.key and self.wire_id == other.wire_id\n \n def __ne__(self, other):\n # :nodoc: Delegate comparison to keys.\n return self.key == other.key and self.wire_id == other.wire_id\n\n def __hash__(self):\n # :nodoc: Delegate comparison to keys.\n return hash([self.key, self.wire_id])\n\n def __repr__(self):\n # :nodoc: nicer formatting to help with debugging\n return ''\n\nclass KeyWirePairL(KeyWirePair):\n \"\"\"A KeyWirePair that is used as the low end of a range query.\n \n This KeyWirePair is smaller than all other KeyWirePairs with the same key.\"\"\"\n def __init__(self, key):\n self.key = key\n self.wire = None\n self.wire_id = -1000000000\n\nclass KeyWirePairH(KeyWirePair):\n \"\"\"A KeyWirePair that is used as the high end of a range query.\n \n This KeyWirePair is larger than all other KeyWirePairs with the same key.\"\"\"\n def __init__(self, key):\n self.key = key\n self.wire = None\n # HACK(pwnall): assuming 1 billion objects won't fit into RAM.\n self.wire_id = 1000000000\n\nclass CrossVerifier(object):\n \"\"\"Checks whether a wire network has any crossing wires.\"\"\"\n \n def __init__(self, layer):\n \"\"\"Verifier for a layer of wires.\n \n Once created, the verifier can list the crossings between wires (the \n wire_crossings method) or count the crossings (count_crossings).\"\"\"\n\n self.events = []\n self._events_from_layer(layer)\n self.events.sort()\n \n self.index = RangeIndex()\n self.result_set = ResultSet()\n self.performed = False\n \n def count_crossings(self):\n \"\"\"Returns the number of pairs of wires that cross each other.\"\"\"\n if self.performed:\n raise \n self.performed = True\n return self._compute_crossings(True)\n\n def wire_crossings(self):\n \"\"\"An array of pairs of wires that cross each other.\"\"\"\n if self.performed:\n raise \n self.performed = True\n return self._compute_crossings(False)\n\n def _events_from_layer(self, layer):\n \"\"\"Populates the sweep line events from the wire layer.\"\"\"\n for wire in layer.wires.values():\n if wire.is_horizontal():\n self.events.append([wire.x1, 0, wire.object_id, 'add', wire])\n self.events.append([wire.x2,3,wire.object_id, 'delete', wire])\n else: \n self.events.append([wire.x1, 1, wire.object_id, 'query', wire])\n\n def _compute_crossings(self, count_only):\n \"\"\"Implements count_crossings and wire_crossings.\"\"\"\n if count_only:\n result = 0\n else:\n result = self.result_set\n for event in self.events:\n event_x, event_type, wire = event[0], event[3], event[4]\n \n if event_type == 'add':\n self.trace_sweep_line(event_x)\n self.index.add(KeyWirePair(wire.y1, wire))\n\n elif event_type == 'query':\n self.trace_sweep_line(event_x)\n if count_only:\n result += self.index.count(KeyWirePairL(wire.y1),\n KeyWirePairH(wire.y2))\n else:\n cross_wires=self.index.list(KeyWirePairL(wire.y1),\n KeyWirePairH(wire.y2))\n for cross_wire in cross_wires:\n result.add_crossing(wire, cross_wire.wire)\n elif event_type=='delete':\n self.trace_sweep_line(event_x)\n self.index.remove(KeyWirePair(wire.y1, wire))\n \n return result\n \n def trace_sweep_line(self, x):\n \"\"\"When tracing is enabled, adds info about where the sweep line is.\n \n Args:\n x: the coordinate of the vertical sweep line\n \"\"\"\n # NOTE: this is overridden in TracedCrossVerifier\n pass\n\nclass TracedCrossVerifier(CrossVerifier):\n \"\"\"Augments CrossVerifier to build a trace for the visualizer.\"\"\"\n \n def __init__(self, layer):\n CrossVerifier.__init__(self, layer)\n self.trace = []\n self.index = TracedRangeIndex(self.trace)\n self.result_set = TracedResultSet(self.trace)\n \n def trace_sweep_line(self, x):\n self.trace.append({'type': 'sweep', 'x': x})\n \n def trace_as_json(self):\n \"\"\"List that obeys the JSON format restrictions with the verifier trace.\"\"\"\n return self.trace\n\n# Command-line controller.\nif __name__ == '__main__':\n import sys\n layer = WireLayer.from_file(sys.stdin)\n verifier = CrossVerifier(layer)\n \n if os.environ.get('TRACE') == 'jsonp':\n verifier = TracedCrossVerifier(layer)\n result = verifier.wire_crossings()\n json_obj = {'layer': layer.as_json(), 'trace': verifier.trace_as_json()}\n sys.stdout.write('onJsonp(')\n json.dump(json_obj, sys.stdout)\n sys.stdout.write(');\\n')\n elif os.environ.get('TRACE') == 'list':\n verifier.wire_crossings().write_to_file(sys.stdout)\n else:\n sys.stdout.write(str(verifier.count_crossings()) + \"\\n\")\n","sub_path":"6006/A3/circuit2/circuit2.py","file_name":"circuit2.py","file_ext":"py","file_size_in_byte":20778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"638839702","text":"from flask import Flask\nimport sqlite3\n\n\nNO_RESULT = 'No result from local server'\nconn = sqlite3.connect('resultsDB.sqlite')\ncur = conn.cursor()\n\nhost = '0.0.0.0'\nportNumber = 12223\napp = Flask(__name__)\n\n\ndef getResult(usn, retry=False):\n usn = usn[0:5] + usn[5:7].upper() + usn[7:]\n print(usn)\n cur.execute('SELECT result FROM Results WHERE usn = ?', (usn,))\n htmlRes = cur.fetchone()\n if(htmlRes != None):\n htmlRes, = htmlRes\n return htmlRes\n else:\n return None\n\n@app.route('/', methods=['GET'])\ndef keyGet(usn):\n res = getResult(usn)\n if res is None:\n return NO_RESULT\n else:\n return res\n\n\nif __name__ == '__main__':\n app.run(host=host, port=portNumber, threaded=False)\n","sub_path":"local_result_server.py","file_name":"local_result_server.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"383419355","text":"import os\n\nfrom classify import show_trained_data\n\n\ndef test_file_codec():\n rootdir = os.getcwd() + '/DataSet'\n for subdir, dirs, files in os.walk(rootdir):\n for file in files:\n path = os.path.join(subdir, file)\n\n try:\n f = open(path)\n sentence = f.read()\n except:\n print(path.split('/')[-1])\n\n\nshow_trained_data()\n\n# test_file_codec()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"177957609","text":"import io\nimport sys\n\nclass GeneralizedSuffixTree(object):\n \"\"\"\n Represents a generalized suffix tree for string matching.\n\n Uses a list of words.\n \"\"\"\n\n WORD_DELIMITER_CHAR = '$'\n\n class Node():\n \"\"\"\n Represents a node in the generalized suffix tree.\n \"\"\"\n\n def __init__(self, parent, words=set()):\n \"\"\"\n Initializes.\n \"\"\"\n\n self._parent = parent\n self._children = []\n self._words = words\n\n @property\n def parent(self):\n \"\"\"\n Returns the parent node ID.\n \"\"\"\n\n return self._parent\n\n @property\n def children(self):\n \"\"\"\n Returns the IDs of the child nodes.\n \"\"\"\n\n return self._children\n\n @property\n def words(self):\n \"\"\"\n Returns the set of the indices of the words that pass through\n this node.\n \"\"\"\n\n return self._words\n\n def __repr__(self):\n \"\"\"\n Returns the string representation.\n \"\"\"\n\n return 'parent={}, children={}, words={}'.format(self._parent,\n self._children,\n self._words)\n\n class Edge():\n \"\"\"\n Represents an edge in the generalized suffix tree.\n \"\"\"\n\n def __init__(self, word_index, start_index, stop_index):\n \"\"\"\n Initializes.\n \"\"\"\n\n self._word_index = word_index\n self._start_index = start_index\n self._stop_index = stop_index\n\n def __repr__(self):\n \"\"\"\n Returns the string representation.\n \"\"\"\n\n return 'word={}, start_index={}, stop_index={}'.format(\n self._word_index,\n self._start_index,\n self._stop_index)\n\n def __init__(self, words):\n \"\"\"\n Initializes.\n \"\"\"\n\n self._words = []\n root = GeneralizedSuffixTree.Node(-1)\n self._nodes = [ root ]\n self._edges = {}\n\n for word in words:\n self._add_word(word, len(words) > 1)\n\n def _add_word(self, word, multiple=True):\n \"\"\"\n Adds the word to the tree.\n\n Algorithm:\n\n - Append $ to the word (so that all the words are delimited\n by $0, ..., and $(N - 1).\n\n - Add each suffix of the word to the tree.\n\n - Find the insertion point and corresponding suffix.\n\n - Create a new node and add it as a child to its parent node.\n\n - Create an edge from the insertion point to the new node.\n \"\"\"\n\n word += GeneralizedSuffixTree.WORD_DELIMITER_CHAR\n if multiple:\n word += str(len(self._words))\n self._words.append(word)\n\n end_index = word.index(GeneralizedSuffixTree.WORD_DELIMITER_CHAR)\n for i in range(end_index + 1):\n suffix = word[i:]\n insertion_suffix, insertion_parent_id = self._insert_node(suffix)\n\n new_word_index = len(self._words) - 1\n node = GeneralizedSuffixTree.Node(insertion_parent_id,\n { new_word_index })\n self._nodes.append(node)\n new_child_id = len(self._nodes) - 1\n self._nodes[insertion_parent_id]._children.append(new_child_id)\n\n end_index = len(word)\n start_index = end_index - len(insertion_suffix)\n edge = GeneralizedSuffixTree.Edge(new_word_index,\n start_index,\n end_index)\n self._edges[insertion_parent_id, new_child_id] = edge\n\n def _insert_node(self, suffix, current_node=0):\n \"\"\"\n Traverses the tree to determine the insertion point of the given suffix.\n\n Algorithm:\n\n - Update the current node's word indices to include the last word's\n index.\n\n - If the first character of the given suffix is the delimiter,\n then return (suffix, current node).\n\n - Consider each child edge leading from the current node.\n\n - If the entire edge is a prefix of the suffix, make a\n recursive call to move to the child node and traverse\n further down the tree.\n\n - Otherwise, if the edge partially overlaps in the prefix of\n the current suffix, split the edge and insert a new node\n at the split point (which is at the end of the overlap).\n Return (offset suffix, new node ID).\n \"\"\"\n\n new_word_index = len(self._words) - 1\n self._nodes[current_node]._words.add(new_word_index)\n\n if suffix[0] == GeneralizedSuffixTree.WORD_DELIMITER_CHAR:\n return suffix, current_node\n\n for child_id in self._nodes[current_node]._children:\n edge = self._edges[current_node, child_id]\n edge_word = self.edge_substring(edge)\n\n if suffix[:len(edge_word)] == edge_word:\n suffix = suffix[len(edge_word):]\n return self._insert_node(suffix, child_id)\n elif suffix[0] == edge_word[0]:\n offset = 0\n while (suffix[offset] == edge_word[offset] !=\n GeneralizedSuffixTree.WORD_DELIMITER_CHAR):\n offset += 1\n\n new_node_id = self._split_edge(current_node, child_id, offset)\n\n return suffix[offset:], new_node_id\n\n return suffix, current_node\n\n def _split_edge(self, parent_id, child_id, split_pos):\n \"\"\"\n Splits the edge between the given parent and child nodes at the given\n split position.\n\n Inserts a new node at the split position and returns the index of the\n new node.\n\n Algorithm:\n\n - Create a new node, copying the child node's word indices and\n adding the last word's index. The node's parent is the old edge's\n parent node. The node's children is the old edge's child node.\n\n - The old edge's parent node's children are updated to include the\n new node and exclude the old edge's child node.\n\n - The old edge's child node's parent is updated as the new node.\n\n - The tree's edges are updated to remove the old edge and to add\n 2 new edges from the parent node to the new node and from the new\n node to the child node.\n \"\"\"\n\n new_node_id = len(self._nodes)\n new_word_index = len(self._words) - 1\n word_indices = self._nodes[child_id]._words | { new_word_index }\n new_node = GeneralizedSuffixTree.Node(parent_id,\n words=word_indices)\n self._nodes.append(new_node)\n self._nodes[new_node_id]._children.append(child_id)\n\n self._nodes[parent_id]._children.append(new_node_id)\n self._nodes[parent_id]._children.remove(child_id)\n\n self._nodes[child_id]._parent = new_node_id\n\n old_edge = self._edges[parent_id, child_id]\n parent_to_new_node_edge = GeneralizedSuffixTree.Edge(\n old_edge._word_index,\n old_edge._start_index,\n old_edge._start_index + split_pos)\n self._edges[parent_id, new_node_id] = parent_to_new_node_edge\n new_node_to_child_edge = GeneralizedSuffixTree.Edge(\n old_edge._word_index,\n old_edge._start_index + split_pos,\n old_edge._stop_index)\n self._edges[new_node_id, child_id] = new_node_to_child_edge\n\n del self._edges[parent_id, child_id]\n\n return new_node_id\n\n def edge_substring(self, edge):\n \"\"\"\n Returns the substring that corresponds to the given edge.\n \"\"\"\n\n word = self._words[edge._word_index]\n\n return word[edge._start_index:edge._stop_index]\n\n def node_substring(self, node_id):\n \"\"\"\n Returns the substring that corresponds to a traversal from\n the root to the given node.\n\n Algorithm:\n\n - Traverse the tree from the given node to the root.\n Accumulate characters over the visited edges.\n \"\"\"\n\n word = ''\n while self._nodes[node_id]._parent != -1:\n edge = self._edges[self._nodes[node_id]._parent, node_id]\n word = self.edge_substring(edge) + word\n\n node_id = self._nodes[node_id]._parent\n\n return word\n\n def node_depth(self, node_id):\n \"\"\"\n Returns the node's depth in the tree, which means the length of the\n substring that leads to the given node.\n\n Note: The substring does not include the out-of-alphabet character.\n\n Algorithm:\n\n - If the node ID is that of the root node, return 0.\n\n - Check the first edge for whether it includes the delimited\n character. The depth is initialized to the first edge's word's\n length.\n\n - Continue traversing the tree from the given node to the root.\n Increment the depth by each edge's word length.\n \"\"\"\n\n if node_id == 0:\n return 0\n\n edge = self._edges[self._nodes[node_id]._parent, node_id]\n edge_word = self.edge_substring(edge)\n depth = None\n if GeneralizedSuffixTree.WORD_DELIMITER_CHAR not in edge_word:\n depth = len(edge_word)\n else:\n marker_index = edge_word.index(\n GeneralizedSuffixTree.WORD_DELIMITER_CHAR)\n depth = len(edge_word[:marker_index])\n node_id = self._nodes[node_id]._parent\n\n while self._nodes[node_id]._parent != -1:\n edge = self._edges[self._nodes[node_id]._parent, node_id]\n edge_word = self.edge_substring(edge)\n depth += len(edge_word)\n\n node_id = self._nodes[node_id]._parent\n\n return depth\n\n @property\n def nodes(self):\n \"\"\"\n Returns the nodes.\n \"\"\"\n\n return self._nodes\n\n @property\n def edges(self):\n \"\"\"\n Returns the edges.\n \"\"\"\n\n return self._edges\n","sub_path":"course4-strings/practice/suffix_tree/generalized_suffix_tree.py","file_name":"generalized_suffix_tree.py","file_ext":"py","file_size_in_byte":10726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"408234447","text":"#!/bin/python\n\n#importing the porgrams\nimport os\nimport sys\n\npdb_file = None\nglobal file_name\nlines = None\n\n\n### THE MENU FUNCTIONS\ndef print_menu():\n print(\n\"\"\"\n********************************************************************************\n* PDB FILE ANALYZER *\n********************************************************************************\n* Select an option from below: *\n* *\n* 1) Open a PDB File (O) *\n* 2) Information (I) *\n* 3) Show histogram of amino acids (H) *\n* 4) Display Secondary Structure (S) *\n* 5) Export PDB File (X) *\n* 6) Exit (Q) *\n* *\n* Current PDB: %s *\n********************************************************************************\n\"\"\"%pdb_file)\n \n option = input(\": \")\n #return option \n\n # \"\"\"Takes user's option following main menu display, assesses it and calls the respective funtions\"\"\"\n if option.lower() in ('o','i','h','s','x','q'):\n if option.lower() == 'o':\n open_file()\n print_menu()\n \n if option.lower() == 'i':\n print(\"Information\")\n main_info_function(file_name)\n print_menu()\n \n if option.lower() == 'h':\n print(\"Histogram of Amino Acids\")\n main_histogram(file_name)\n print_menu()\n \n if option.lower() == 's':\n print(\"Display Secondary Structure\")\n main_sec_structure(file_name)\n print_menu()\n \n if option.lower() == 'x':\n export_func(file_name)\n print_menu()\n \n if option.lower() == 'q':\n print(\"Exit\")\n main_quit()\n \n else:\n print(\"\\n\" + \"unsupported option: %s, please enter valid option\" %(option))\n print_menu()\n\n\n#------------------------------------------------------------------------------------------------------------------------\n# OPEN SECTION #\n#------------------------------------------------------------------------------------------------------------------------\n\ndef path_check(file):\n \"\"\" Checks if file exists. if it does, then call function that checks whether file is open \"\"\"\n if os.path.exists(file): \n check_open(file)\n else:\n print(\"file not found\")\n print_menu()\n\ndef check_open(file):\n \"\"\"checks if file is open or not. If file is already open, it calls function for replacement enquiry\"\"\"\n if pdb_file:\n file_name = file\n else:\n replacement_check(file)\n\ndef replacement_check(file):\n \"\"\"checks if user wants to replace file or not, incase file was already loaded\"\"\"\n if True:\n x = input(\"Would you like to replace the current file, Y/N: \")\n if x.lower() == 'y':\n file_name = file\n print(\"The File 3AYU.pdb has been successfully loaded\")\n else:\n print(\"proceed to select options to work with\", pdb_file)\n print_menu()\n\ndef read_pdb(pdb_file):\n with open(file_name,\"r\") as myfile:\n lines = myfile.readlines()\n return lines \n#------------------------------------------------------------------------------------------------------------------------\n# INFORMATION SECTION #\n#------------------------------------------------------------------------------------------------------------------------\ndef title_print(lines):\n \"\"\"print title of the protein\"\"\"\n title_string = \"\"\n for line in lines:\n if line.startswith(\"TITLE\"):\n title_string = title_string + line[9:].strip()\n print(\"Title : \" , title_string)\n \ndef extract_chain_sequences(lines):\n \"\"\"extract all the sequence residue lines from the file\"\"\"\n seq = []\n for line in lines:\n if line.startswith('SEQRES'):\n seq.append(line[0:])\n return seq\n\ndef collect_chain_ids(all_sequences):\n \"\"\"identify chains in protein\"\"\"\n chains = []\n for line in all_sequences:\n if line[11] not in chains:\n chains.append(line[11])\n return chains\n\ndef print_chains(chains_in_prot):\n \"\"\"print chains in protein\"\"\"\n x = '' .join(chains_in_prot)\n print(\"- Chains:\", x[0], \"and\", x[1])\n \ndef pdb_info(all_sequences, chains_in_prot, lines):\n title_print(lines)\n for chain in chains_in_prot:\n print(\"- Chain \", chain)\n \n residues = []\n for line in all_sequences:\n if line[11] == chain:\n one_letter_code = {'GLY':'G', 'ALA':'A', 'VAL':'V', 'CYS':'C', 'PRO':'P', 'LEU':'L',\\\n 'ILE':'I', 'MET':'M', 'TRP':'W', 'PHE':'F', 'SER':'S', 'THR':'T',\\\n 'TYR':'Y', 'ASN':'N', 'GLN':'Q', 'LYS':'K', 'ARG':'R', 'HIS':'H',\\\n 'ASP':'D', 'GLU':'E'}\n residues.extend(line[18:].split()) #splits the string into a list of residues after appending to the list of residues\n chain_seq = '' .join([one_letter_code[i] for i in residues])#converts the 3 code residues to their corresponding 1 letter denotation\n \n helix = []\n for line in lines:\n if line.startswith('HELIX') and line[19] == chain:\n helix.append(line[0:])\n numb = len(helix)\n \n sheet = []\n for line in lines:\n if line.startswith('SHEET') and line[21] == chain:\n sheet.append(line[0:])\n num = len(sheet)\n \n print(\"Number of amino acids:\", len(chain_seq))\n print(\"Number of helix: \", numb)\n print(\"Number of sheet: \", num)\n print(\"Sequence:\", '\\n'.join(''.join(chain_seq[i:i+50]) for i in range(0, len(chain_seq), 50)))\n\n \n\n#------------------------------------------------------------------------------------------------------------------------\n# HISTOGRAM SECTION #\n#------------------------------------------------------------------------------------------------------------------------\ndef chain_sequence(all_sequences):\n \"\"\"general print sequences in all chains\"\"\"\n residues = []\n for line in all_sequences:\n one_letter_code = {'GLY':'G', 'ALA':'A', 'VAL':'V', 'CYS':'C', 'PRO':'P', 'LEU':'L', 'ILE':'I',\\\n 'MET':'M', 'TRP':'W', 'PHE':'F', 'SER':'S', 'THR':'T', 'TYR':'Y', 'ASN':'N', \\\n 'GLN':'Q', 'LYS':'K', 'ARG':'R', 'HIS':'H', 'ASP':'D', 'GLU':'E'}\n residues.extend(line[18:].split()) #splits the string into a list of residues after appending to the list of residues\n chain_seq = '' .join([one_letter_code[i] for i in residues]) #converts the 3 code residues to their corresponding 1 letter denotation\n return chain_seq\n\ndef ordering_option():\n \"\"\"prints the ordering options and gives the user an input cell. returns the input\"\"\"\n print(\"\"\"\n Choose an option to order by:\n number of amino acids - ascending (an)\n number of amino acids - descending (dn)\n alphabetically - ascending (aa)\n alphabetically - descending (da)\n \"\"\")\n choice = input(\"order by: \")\n return choice\n\ndef an_order(chain_seq):\n \"\"\" generates a list of the 20 amino acids ordered in ascending order of their abundance in the sequence\"\"\"\n # availing list of all the possible (20) amino acids\n aa_residues = ['G', 'A', 'V', 'C', 'P', 'L', 'I', 'M', 'W', 'F', 'S', 'T', 'Y', 'N', 'Q', 'K', 'R', 'H', 'D', 'E']\n aa_number = [] # initializing an empty list of count for every amino acid in the sequence\n \n # Create a dictionary of each amino acid paired with its count in the sequence\n for residue in aa_residues:\n aa_number.append(chain_seq.count(residue))\n aa_count_dict = dict((residue,aa) for residue,aa in zip(aa_residues, aa_number))\n \n # create a new list of the 20 amino acids ordered according to user option(an)\n residues_list= []\n for k,v in sorted(aa_count_dict.items(), key=lambda p:p[1]):\n residues_list.append(k)\n return residues_list\n\ndef dn_order(chain_seq):\n \"\"\"generates a list of the 20 amino acids ordered in descending order of their abundance in the sequence\"\"\"\n # availing list of all the possible (20) amino acids\n aa_residues = ['G', 'A', 'V', 'C', 'P', 'L', 'I', 'M', 'W', 'F', 'S', 'T', 'Y', 'N', 'Q', 'K', 'R', 'H', 'D', 'E']\n aa_number = [] # initializing an empty list of count for every amino acid in the sequence\n \n # Create a dictionary of each amino acid paired with its count in the sequence\n for residue in aa_residues:\n aa_number.append(chain_seq.count(residue))\n aa_count_dict = dict((residue,aa) for residue,aa in zip(aa_residues, aa_number))\n \n # create a new list of the 20 amino acids ordered according to user option(dn)\n residues_list = []\n for k,v in sorted(aa_count_dict.items(), key=lambda p:p[1], reverse=True):\n residues_list.append(k)\n \n return residues_list\n\ndef aa_order(chain_seq):\n \"\"\"generates a list of the 20 amino acids ordered in ascending alphabetical order\"\"\"\n residue_list = []\n one_letter_code = {'G':'Gly', 'A':'Ala', 'V':'Val', 'C':'Cys', 'P':'Pro', 'L':'Leu', 'I':'Ile', 'M':'Met', 'W':'Trp', 'F':'Phe', 'S':'Ser', 'T':'Thr', 'Y':'Tyr', 'N':'Asn', 'Q':'Gln', 'K':'Lys', 'R':'Arg', 'H':'His', 'D':'Asp', 'E':'Glu'}\n for k,v in sorted(one_letter_code.items(), key=lambda p:p[1]):\n residue_list.append(k)\n return residue_list\n\n\ndef da_order(chain_seq):\n \"\"\"generates a list of the 20 amino acids ordered in ascending alphabetical order\"\"\"\n residue_list = []\n one_letter_code = {'G':'Gly', 'A':'Ala', 'V':'Val', 'C':'Cys', 'P':'Pro', 'L':'Leu', 'I':'Ile', 'M':'Met', 'W':'Trp', 'F':'Phe', 'S':'Ser', 'T':'Thr', 'Y':'Tyr', 'N':'Asn', 'Q':'Gln', 'K':'Lys', 'R':'Arg', 'H':'His', 'D':'Asp', 'E':'Glu'}\n for k,v in sorted(one_letter_code.items(), key=lambda p:p[1], reverse=True):\n residue_list.append(k)\n return residue_list\n\ndef draw_hist(chain_seq, residues):\n \"\"\"generates a histogram of amino acids in the sequence\"\"\"\n one_letter_code = {'G':'Gly', 'A':'Ala', 'V':'Val', 'C':'Cys', 'P':'Pro', 'L':'Leu', 'I':'Ile', 'M':'Met', 'W':'Trp', 'F':'Phe', 'S':'Ser', 'T':'Thr', 'Y':'Tyr', 'N':'Asn', 'Q':'Gln', 'K':'Lys', 'R':'Arg', 'H':'His', 'D':'Asp', 'E':'Glu'}\n #residues = ['G', 'A', 'V', 'C', 'P', 'L', 'I', 'M', 'W', 'F', 'S', 'T', 'Y', 'N', 'Q', 'K', 'R', 'H', 'D', 'E']\n for residue in residues:\n freq = []\n for i in chain_seq:\n if residue == i:\n l = one_letter_code[i] \n freq.append(l)\n amino_acid = \"\".join(set(freq))\n if residue in chain_seq:\n print(amino_acid, \"(\", len(freq),\"):\", \"*\" * len(freq))\n else:\n pass\n\ndef summary_caller(order, chain_seq):\n \"\"\"takes theorder option given by the user and calls appropriate function to excecute task\"\"\"\n \n if order.lower() in ('an','dn','aa','da'):\n if order.lower() == 'an': \n residues = an_order(chain_seq)\n draw_hist(chain_seq, residues)\n \n elif order.lower() == 'dn':\n residues = dn_order(chain_seq)\n draw_hist(chain_seq, residues)\n \n elif order.lower() == 'aa':\n residues = aa_order(chain_seq)\n draw_hist(chain_seq, residues)\n \n else:\n residues = da_order(chain_seq)\n draw_hist(chain_seq, residues)\n else:\n print(\"invalid input\")\n ordering_option() \n \n\n#------------------------------------------------------------------------------------------------------------------------\n# SECONDARY STRUCTURE SECTION #\n#------------------------------------------------------------------------------------------------------------------------\n\ndef sec_structure_generate(chains_in_prot, all_sequences,lines):\n for chain in chains_in_prot: # Adress a chain at a time from my unique chains list\n print('\\n' + \"Chain\",chain,\":\")\n \n residues = [] # Initiate empty list for my residues per chain\n count = 0\n structure = [] # Initiate an empty list to create the secondary structure symbols as we move over our residue list once generated\n tag =[]\n for line in all_sequences: # Generate primary sequence of the chain\n if line[11] == chain:\n count = count+1\n one_letter_code = {'GLY':'G', 'ALA':'A', 'VAL':'V', 'CYS':'C', 'PRO':'P', 'LEU':'L', 'ILE':'I',\\\n 'MET':'M', 'TRP':'W', 'PHE':'F', 'SER':'S', 'THR':'T', 'TYR':'Y', 'ASN':'N', \\\n 'GLN':'Q', 'LYS':'K', 'ARG':'R', 'HIS':'H', 'ASP':'D', 'GLU':'E'}\n x = line[18:].split()\n residues.extend(x) #splits the string into a list of residues after appending to the list of residues\n chain_seq = '' .join([one_letter_code[i] for i in residues]) #converts the 3 code residues to their corresponding 1 letter denotation\n for i in residues: #Fill the structure list with dashes ('-') as place holders per residue\n structure.append(\"-\")\n tag.append(\" \")\n for line in lines: # Identify where each structure starts and ends using the secondary structure info in the pdb file\n \n if line.startswith('SHEET'): #Process for sheet part of the chain\n new_line = line.split()\n if new_line[5] == chain:\n start = int(new_line[6])\n stop = int(new_line[9])\n num = (stop - start) +1\n update_structure = num * \"|\" \n \n update_tag = (new_line[1] + new_line[2])\n tag[start-1:start+1] = update_tag\n structure[start - 1 : stop] = update_structure \n \n if line.startswith('HELIX'): #process for helix part of chain\n new_line = line.split()\n if new_line[4] == chain:\n start = int(new_line[5])\n stop = int(new_line[8])\n num = (stop - start) +1\n update_structure = num * \"/\" \n \n update_tag = (new_line[1])\n tag[start-1:start+1] = update_tag\n structure[start - 1 : stop] = update_structure\n \n for i in range(0, len(chain_seq),80):\n \n print('\\n' + ''.join(chain_seq[i:i+80]) + \\\n '\\n' + ''.join(structure[i:i+80]) +\\\n '\\n' + ''.join(tag[i:i+80]))\n print(\"(%d)\" %(len(chain_seq)))\n \n#------------------------------------------------------------------------------------------------------------------------\n# EXPORT SECTION #\n#------------------------------------------------------------------------------------------------------------------------\n\ndef export_func(file_name):\n out_file_name = file + 'export'\n with open(out_file_name,\"w\") as myfile:\n lines_to_transfer = open(file_name,\"r\")\n for line in lines_to_transfer:\n myfile.write(line)\n file.close()\n\n#------------------------------------------------------------------------------------------------------------------------\n# EXIT SECTION #\n#------------------------------------------------------------------------------------------------------------------------\n\ndef quit():\n \"\"\"Takes users input of whether to exit program or go back to the main menu\"\"\"\n option = input(\"Do you want to exit (E) or do you want go back to the menu (M)\")\n return option\n\ndef quit_options(option): \n \"\"\"Executes user option\"\"\"\n if option == \"E\" or option == \"e\":\n sys.exit()\n elif option == \"M\" or option == \"m\":\n print_menu()\n else:\n quit()\n \n \n#------------------------------------------------------------------------------------------------------------------------\n# THE MASTER FUNCTIONS FOR EACH OPTION #\n#------------------------------------------------------------------------------------------------------------------------\ndef open_file():\n \"\"\" Asks the user to put in file name and once received, it calls the function that checks whether file path exists\"\"\"\n file = input(\"Enter a Valid PATH for a PDB File: \")\n path_check(file)\n return file\n\ndef main_info_function(file_name):\n \"\"\"The information summary boss function\"\"\"\n lines = read_pdb(file)\n all_sequences = extract_chain_sequences(lines) #extracts the sequence residues lines\n sequence = chain_sequence(all_sequences) # extracts the sequence of amino acids in the protein\n chains_in_prot = collect_chain_ids(all_sequences)\n pdb_info(all_sequences, chains_in_prot, lines)\n\ndef main_histogram(file_name):\n \"\"\"Main function. Calls the rest of the functions within the histogram option\"\"\"\n lines = read_pdb(file) \n all_sequences = extract_chain_sequences(lines) #extracts the sequence residues lines\n sequence = chain_sequence(all_sequences) # extracts the sequence of amino acids in the protein\n order = ordering_option() # displays order options for the user and records the choice input\n summary_caller(order,sequence) # sieves through the order options and calls the appropriate functions based on order selected by user\n\ndef main_sec_structure(file_name):\n \"\"\"This is master secondary structure function\"\"\"\n lines = read_pdb(file)\n all_sequences = extract_chain_sequences(lines)\n chains_in_prot = collect_chain_ids(all_sequences)\n sec_structure_generate(chains_in_prot, all_sequences,lines)\n\ndef main_quit():\n \"\"\"The master quit function\"\"\"\n option = quit()\n quit_options(option)\n \ndef software_funct():\n print_menu()\nsoftware_funct()\n\n","sub_path":".ipynb_checkpoints/mini-project-frame-checkpoint.py","file_name":"mini-project-frame-checkpoint.py","file_ext":"py","file_size_in_byte":19133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"272898876","text":"import pandas as pd\nfrom textblob import TextBlob\nfrom textblob.sentiments import NaiveBayesAnalyzer\nimport seaborn as sns; sns.set(color_codes=True)\nfrom seaborn import kdeplot\nimport matplotlib.pyplot as plt\n\n# carga DataFrame\ndf = pd.read_csv(\"CSV-JoeBin.csv\")\n\n# Nuevo data frame solo con dos columnas\nDataAnalis = df[[\"usertweet\", \"TweetMsg\"]]\nprint (DataAnalis.head(10))\nprint ('')\n\n# Remover index si contine el Texto\n# utilizamos or (|)\ndf3 = DataAnalis.drop(\n DataAnalis[DataAnalis['TweetMsg'].str.contains('@ONU_es @free_equal @ONU_derechos')].index | \n DataAnalis[DataAnalis['TweetMsg'].str.contains('@BarcelonaSC')].index |\n DataAnalis[DataAnalis['TweetMsg'].str.contains('LigaPro')].index |\n DataAnalis[DataAnalis['TweetMsg'].str.contains('Alineación confirmada')].index |\n DataAnalis[DataAnalis['TweetMsg'].str.contains('¡Nuestra armadura de hoy!')].index |\n DataAnalis[DataAnalis['TweetMsg'].str.contains('¡Hoy juega el Ídolo!')].index |\n DataAnalis[DataAnalis['TweetMsg'].str.contains('PRÓXIMO PARTIDO')].index |\n DataAnalis[DataAnalis['TweetMsg'].str.contains('LDU_Oficial')].index |\n DataAnalis[DataAnalis['TweetMsg'].str.contains('Escoge el diseño que más te guste')].index\n \n )\nprint(df3.head(10))\n\nprint('Total de elemetos Data-Frame(df)', len(df))\nprint('Total de elemetos Data-Frame(df)', len(df3))\n\n#Eliminar caracteres especiales y urls\ndf4 = df3.TweetMsg.str.replace('http\\S+',\"\").str.replace('@',\"\").str.replace('?',\"\").str.replace('!',\"\").str.replace('(',\"\").str.replace(')',\"\").str.replace('#',\"\").str.replace(':',\"\").str.replace('¡',\"\").str.replace('.',\"\").str.replace(',',\"\").str.replace('/',\"\").str.replace('-',\"\").str.replace('_',\"\").str.replace('+',\"\").str.replace('“',\"\").str.replace('\"',\"\").str.replace(\"'\",\"\").str.replace(\"|\",\"\")\nprint (df4.head(10))\nprint ('')\n\n# Variables Globales \nmsg = 1\nitem = []\ntweet = []\nNPLpolarity = []\n\n# variables Sentimiento\nNeutro=0\nPositivo=0\nMalo=0\n\n# recorrer cada elemento del data frame\nfor ind in df4.index: \n print (msg)\n try:\n print(df4[ind]) \n t=TextBlob(df4[ind])\n #ten=t.translate(to=\"en\")\n #print (ten)\n polarity= t.polarity\n print (polarity)\n #input()\n except:\n print(\"Error\")\n polarity= \"Error\"\n \n # agregar resultados a Lista de objetos\n tweet.append(df4[ind])\n NPLpolarity.append(t.polarity)\n item.append(msg)\n\n # Agrupar Polaridad del mensaje analizado\n if (t.polarity == 0):\n Neutro += 1\n\n if (t.polarity > 0 ):\n Positivo += 1\n\n if (t.polarity < 0 and t.polarity < 1):\n Malo += 1\n \n msg += 1\n \n# Nuevo Data-Frame con la Polaridad Incluida\ndfresultado = pd.DataFrame({'tweet': item, 'msg': tweet, 'NPLpolarity': NPLpolarity})\nprint(dfresultado.head(10))\n\n\n# Graficar lmplot\ng=sns.lmplot(x='tweet', y='NPLpolarity', data=dfresultado.head(150), line_kws={'color': 'red'})\nplt.savefig('data-lmplot.png', dpi=300)\nplt.show()\n\n# Graficar Pie chart\nsizes = [Neutro,Malo, Positivo]\nlabels = ['Neutro','Malo', 'Positivo']\ncols = ['c','b','r']\nexplode=(0,0.1,0)\nfig1, ax1 = plt.subplots()\nax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',\n shadow=True, startangle=90)\nax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\nplt.title('Analisis de Sentimientos')\nplt.savefig('data-Pie.png', dpi=300)\nplt.show()\n\n# Save new data Frame\ndfresultado.to_csv('CSV-Resultado.csv')\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":3472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"246222116","text":"import os\nimport numpy as np\ntry:\n import serial\nexcept:\n os.system(\"pip3 install pyserial\")\ntry:\n import serial.tools.list_ports\nexcept:\n os.system(\"pip3 install pyserial\")\n\nimport serial.tools.list_ports\nimport warnings\nimport time\nfrom deploy import predict\nfor p in serial.tools.list_ports.comports():\n print(\"Dev \",p.description)\n pass\n\ngsm_ports = [\n p.device\n for p in serial.tools.list_ports.comports()\n if 'Generic' in p.description or 'Arduino' in p.description or 'tty' in p.description\n]\n\nif not gsm_ports:\n print(\"No Arduino Device Found\")\nif len(gsm_ports) > 1:\n warnings.warn('Multiple Arduino found - using the first')\n\ntry:\n port = gsm_ports[0]\n device = serial.Serial(port, 9600, timeout=0.5) # /dev/ttyUSB0\n print(\"Communication Established with Device.\")\n time.sleep(3)\n\nexcept:\n\n print(\"Unable to Initialize \")\npath = \"Data/9/\"\ni = 0\nwhile True:\n a = device.readall()\n if len(a) > 5:\n a = a.decode()\n print(a)\n break\n time.sleep(0.1)\n\nwhile True:\n a = device.readall()\n if len(a) > 5:\n a = a.decode()\n # if (input(\"0-discard/1-save: \") == '0'):\n # continue\n data = a.split(\"\\n\\r\\n\")[1]\n parsed = []\n for i in data.split(\"\\n\"):\n i = i.split(\",\")\n try:\n int(i[0])\n i = list(map(int, i))\n parsed.append(i)\n except:\n pass\n parsed = np.array(parsed).flatten()\n print(\"Predicted Shape: \")\n predict(parsed)\n\n time.sleep(0.1)\n","sub_path":"Python_MPU6050.py","file_name":"Python_MPU6050.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"341238593","text":"from django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.views.generic import (CreateView, DetailView, ListView,\n TemplateView, UpdateView)\nimport datetime as dt\nimport json\n\nfrom . import models, forms\nfrom .nvd3 import (duration_chart_data, duration_weekday_chart_data,\n duration_monthly_chart_data, duration_yearly_chart_data)\nfrom common.views import LoginMixin, SiteMixin\n\n\nclass MainView(LoginMixin, SiteMixin, TemplateView):\n template_name = 'tracker/index.html'\n section = 'tracker'\n\n def get_context_data(self):\n context = super().get_context_data()\n events = models.Event.objects.filter(progress=3).annotate_duration()\n context['ongoing_events'] = events.order_by('-duration')\n events = models.Event.objects.filter(progress=4).annotate_duration()\n context['waiting_events'] = events.order_by('-duration')\n entries = models.Entry.objects.filter(duration=None)\n context['pending_entries'] = entries.order_by('-datetime')\n return context\n\n\nclass StatsView(LoginMixin, SiteMixin, TemplateView):\n template_name = 'tracker/stats.html'\n section = 'stats'\n\n def get_context_data(self):\n context = super().get_context_data()\n duration = models.Entry.objects.all().duration_over_time()\n duration_data = duration_weekday_chart_data(duration)\n context['weekday_data'] = json.dumps(duration_data)\n duration_data = duration_monthly_chart_data(duration)\n context['monthly_data'] = json.dumps(duration_data)\n duration_data = duration_yearly_chart_data(duration)\n context['yearly_data'] = json.dumps(duration_data)\n # This time n years ago\n context['n_years_ago'] = []\n today = dt.date.today()\n delta = dt.timedelta(3)\n for year in range(today.year - 1, 2007, -1):\n start = dt.datetime(year, today.month, today.day, 0, 0) - delta\n start = start.replace(tzinfo=timezone.utc)\n end = dt.datetime(year, today.month, today.day, 23, 59) + delta\n end = end.replace(tzinfo=timezone.utc)\n entries = models.Entry.objects.filter(datetime__range=(start, end))\n if entries:\n context['n_years_ago'].append((year, entries))\n return context\n\n\nclass TopView(SiteMixin, ListView):\n template_name = 'tracker/top.html'\n section = 'top'\n queryset = models.Event.objects.all()\\\n .annotate_duration()\\\n .order_by('-rating', '-duration')\n\n\n# Entries\n\nclass EntryMixin(LoginMixin, SiteMixin):\n model = models.Entry\n section = 'entries'\n\n\nclass EntryView(EntryMixin, DetailView):\n pass\n\n\nclass EntryCreateView(EntryMixin, CreateView):\n fields = 'datetime', 'event', 'duration', 'annotation'\n\n def get_initial(self):\n try:\n event_pk = self.request.GET.get('event')\n event = models.Event.objects.get(id=event_pk)\n initial = {'event': event}\n except models.Event.DoesNotExist:\n initial = {}\n finally:\n initial['datetime'] = timezone.now()\n return initial\n\n\nclass EntryUpdateView(EntryMixin, UpdateView):\n fields = 'datetime', 'event', 'duration', 'annotation'\n\n\nclass EntryListView(EntryMixin, ListView):\n queryset = models.Entry.objects.all()\\\n .select_related('event')\\\n .order_by('-datetime')\n paginate_by = 100\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n object_list = context['object_list']\n if object_list is not None:\n duration = object_list.duration_over_time()\n duration_data = duration_chart_data(duration)\n context['duration_data'] = json.dumps(duration_data)\n return context\n\n\nclass EntryCloseView(EntryMixin, UpdateView):\n\n def dispatch(self, request, *args, **kwargs):\n instance = self.get_object()\n delta = timezone.now() - instance.datetime\n instance.duration = int(delta.days * 24 * 60 + delta.seconds / 60)\n instance.save()\n url = reverse('tracker:events:detail', args=[instance.event.id])\n return HttpResponseRedirect(url)\n\n\n# Events\n\nclass EventMixin(LoginMixin, SiteMixin):\n model = models.Event\n section = 'events'\n\n\nclass EventView(EventMixin, DetailView):\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n entries = self.object.entries.all()\n context['entry_list'] = entries.order_by('-datetime')\n context['tag_list'] = self.object.tags.all().order_by('value')\n duration = entries.duration_over_time()\n context['duration_data'] = json.dumps(duration_chart_data(duration))\n return context\n\n\nclass EventCreateView(EventMixin, CreateView):\n form_class = forms.EventForm\n\n\nclass EventUpdateView(EventMixin, UpdateView):\n form_class = forms.EventForm\n\n\nclass EventListView(EventMixin, ListView):\n paginate_by = 100\n queryset = models.Event.objects.all()\\\n .annotate_duration()\\\n .order_by('-date')\n\n\n# Tags\n\nclass TagMixin(LoginMixin, SiteMixin):\n model = models.Tag\n section = 'tags'\n\n\nclass TagView(TagMixin, DetailView):\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n events = self.object.events.exclude(rating=None).annotate_duration()\n context['event_list'] = events.order_by('-rating', '-duration')\n entries = models.Entry.objects.filter(event__in=events)\n entries = entries.select_related('event')\n context['entry_list'] = entries.order_by('-datetime')\n duration = entries.duration_over_time()\n duration_data = duration_monthly_chart_data(duration)\n context['duration_data'] = json.dumps(duration_data)\n return context\n\n\nclass TagListView(TagMixin, ListView):\n queryset = models.Tag.objects.all()\\\n .annotate_duration()\\\n .annotate_rating()\\\n .extra(select={'upper': 'upper(value)'})\\\n .order_by('upper')\n paginate_by = 100\n","sub_path":"web/tracker/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"361630249","text":"# U03_EX11_SumofNaturalNumbers.py\n#\n# Author: Will Baschab\n# Course: Coding for OOP\n# Section: A2\n# Date: 29 Sep 2018\n# IDE: PyCharm\n#\n# Assignment Info\n# Exercise: 11\n# Source: Python Programming\n# Chapter: 03\n#\n# Program Description\n# This program determines the sum of the first n natural numbers\n# with n being inputted by the user.\n#\n#\n# Algorithm (pseudocode)\n# 1. print introduction\n# 2. get amount of terms from user input\n# 3. initialize total at 0\n# 4. begin for loop in range of terms\n# 5. total = total + (i + 1)\n# 6. print total in complete sentence\n#\n\n\ndef main():\n\n print(\"\\nThis program determines the sum of the first n natural numbers\",\n \"\\nwith n being inputted by the user.\")\n\n terms = int(input(\"\\nEnter the number of first natural numbers to sum: \"))\n\n total = 0\n\n for i in range(terms):\n total = total + (i + 1)\n\n print(\"\\nThe total of the first\", terms, \"natural numbers is\", str(total) + \".\")\n\n\nmain()\n","sub_path":"Chapter03/U03_EX11_SumofNaturalNumbers.py","file_name":"U03_EX11_SumofNaturalNumbers.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"632488924","text":"\"\"\"Search API.\n\nIt's assumed that the API is used with an object stored in a\nRelStorage with a Postgres back end.\n\"\"\"\n\nimport re\nfrom ZODB.utils import p64\n\ndef _try_to_close_cursor(cursor):\n try:\n cursor.close()\n except Exception:\n pass\n\ndef search(conn, query, *args, **kw):\n \"\"\"Search for newt objects using an SQL query.\n\n Query parameters may be provided as either positional\n arguments or keyword arguments. They are inserted into the\n query where there are placeholders of the form ``%s`` for\n positional arguments or ``%(NAME)s`` for keyword arguments.\n\n The query results must contain the columns ``zoid`` and\n ``ghost_pickle``. It's simplest and costs nothing to simply\n select all columns (using ``*``) from the ``newt`` table.\n\n A sequence of newt objects is returned.\n \"\"\"\n if kw:\n if args:\n raise TypeError(\"Only positional or keyword arguments can be used,\"\n \" not both\")\n args = kw\n get = conn.ex_get\n cursor = conn._storage.ex_cursor()\n try:\n cursor.execute(\"select zoid, ghost_pickle from (%s)_\" % query,\n args or kw)\n return [get(p64(zoid), ghost_pickle) for (zoid, ghost_pickle) in cursor]\n finally:\n _try_to_close_cursor(cursor)\n\ndef search_batch(conn, query, args, batch_start, batch_size):\n \"\"\"Query for a batch of newt objects.\n\n Query parameters are provided using the ``args``\n argument, which may be a tuple or a dictionary. They are\n inserted into the query where there are placeholders of the\n form ``%s`` for an arguments tuple or ``%(NAME)s`` for an\n arguments dict.\n\n The ``batch_size`` and ``batch_size`` arguments are used to\n specify the result batch. An ``ORDER BY`` clause should be\n used to order results.\n\n The total result count and sequence of batch result objects\n are returned.\n \"\"\"\n query = \"\"\"\n select zoid, ghost_pickle, count(*) over()\n from (%s) _\n offset %s limit %s\n \"\"\" % (query, batch_start, batch_size)\n get = conn.ex_get\n cursor = conn._storage.ex_cursor()\n try:\n cursor.execute(query, args)\n count = 0\n result = []\n for zoid, ghost_pickle, count in cursor:\n result.append(get(p64(zoid), ghost_pickle))\n return count, result\n finally:\n _try_to_close_cursor(cursor)\n\n\ntext_extraction_template = \"\"\"\\\ncreate or replace function %s(state jsonb) returns tsvector as $$\ndeclare\n text text;\n result tsvector;\nbegin\n if state is null then return null; end if;\n\"\"\", \"\"\"\\\n return result;\nend\n$$ language plpgsql immutable;\n\"\"\"\n\ndef _texts(texts, exprs, weight=None):\n if not exprs:\n return\n\n if isinstance(exprs, str):\n exprs = (exprs, )\n\n first_block = not texts\n\n first = True\n for expr in exprs:\n if identifier(expr):\n expr = \"state ->> '%s'\" % expr\n\n text = \"coalesce(%s, '')\" % expr\n if first:\n first = False\n else:\n text = \"text || \" + text\n texts.append(\" text = %s;\" % text)\n\n tsvector = 'to_tsvector(text)'\n if weight:\n tsvector = \"setweight(%s, '%s')\" % (tsvector, weight)\n\n if not first_block:\n tsvector = \"result || \" + tsvector\n\n texts.append(\" result := %s;\\n\" % tsvector)\n\n\nidentifier = re.compile(r'\\w+$').match\ndef create_text_index_sql(fname, D=None, C=None, B=None, A=None):\n \"\"\"Compute and return SQL to set up a newt text index.\n\n The resulting SQL contains a statement to create a\n `PL/pgSQL `_\n function and an index-creation function that uses it.\n\n The first argument is the name of the function to be generated. The\n second argument is a single expression or property name or a\n sequence of expressions or property names. If expressions are\n given, they will be evaluated against the newt JSON ``state``\n column. Values consisting of alphanumeric characters (including\n underscores) are threaded as names, and other values are treated\n as expressions.\n\n Additional arguments, ``C``, ``B``, and ``A`` can be used to\n supply expressions and/or names for text to be extracted with\n different weights for ranking. See:\n https://www.postgresql.org/docs/current/static/textsearch-controls.html#TEXTSEARCH-RANKING\n \"\"\"\n texts = []\n _texts(texts, D)\n _texts(texts, C, 'C')\n _texts(texts, B, 'B')\n _texts(texts, A, 'A')\n\n if not texts:\n raise TypeError(\"No text expressions were specified\")\n\n texts.insert(0, text_extraction_template[0] % fname)\n texts.append(text_extraction_template[1])\n texts.append(\"create index newt_%s_idx on newt using gin (%s(state));\\n\"\n % (fname, fname))\n return '\\n'.join(texts)\n\ndef create_text_index(conn, fname, D, C=None, B=None, A=None):\n \"\"\"Set up a newt full-text index.\n\n The ``create_text_index_sql`` method is used to compute SQL, which\n is then executed to set up the index. (This can take a long time\n on an existing database with many records.)\n\n The SQL is executed against the database associated with the given\n connection, but a separate connection is used, so it's execution\n is independent of the current transaction.\n \"\"\"\n conn, cursor = conn._storage.ex_connect()\n sql = create_text_index_sql(fname, D, C, B, A)\n try:\n cursor.execute(sql)\n conn.commit()\n finally:\n try:\n cursor.close()\n except Exception:\n pass\n try:\n conn.close()\n except Exception:\n pass\n","sub_path":"src/newt/db/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":5665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"474345234","text":"import ctypes\r\nimport string\r\nimport sys\r\n\r\ndef get_wavelength_information(pSpecCore):\r\n \"\"\"Calculate Spectrum\r\n\r\n This function returns wavelength range information for spectrum data.\r\n\r\n * \\brief Get wavelength information data.\r\n *\r\n * This function returns wavelength range information for spectrum data.\r\n *\r\n * \\code\r\n * double start_wavelength, end_wavelength, wavelength_interval;\r\n * int ret_val = csGetWavelengthInfo(&start_wavelength, &end_wavelength, &wavelength_interval);\r\n * \\endcode\r\n *\r\n * \\param start_wavelength - double pointer to start wavelength [OUT]\r\n * \\param end_wavelength - double pointer to end wavelength [OUT]\r\n * \\param wavelength_interval - double pointer to wavelength interval[OUT]\r\n *\r\n * \\return\r\n * Returns one numeric value of NSP_RETURN_VALUE_SUCCESS.\r\n * - NSP_RETURN_VALUE_SUCCESS (1)\r\n * - NSP_RETURN_VALUE_FAILURE (-1)\r\n \"\"\"\r\n\r\n Start_Wavelength= ctypes.c_double()\r\n End_Wavelength= ctypes.c_double()\r\n Interval_Wavelength= ctypes.c_double()\r\n\r\n ret = pSpecCore.csGetWavelengthInfo(ctypes.byref(Start_Wavelength),ctypes.byref(End_Wavelength),ctypes.byref(Interval_Wavelength))\r\n\r\n if ret <=0:\r\n print (\"[PythonPrismError] Getting Wavelength Information Failed!\")\r\n return (-1,-1,-1)\r\n else:\r\n print (\"[PythonPrism] (StartWL, EndWL , IntervalWL) : (\",Start_Wavelength.value, \" , \", End_Wavelength.value, \" , \", Interval_Wavelength.value, \" )\")\r\n return (Start_Wavelength, End_Wavelength, Interval_Wavelength)","sub_path":"NSP32_SDK/wrappers/python/wrapper_python3/core/get_wavelength_information.py","file_name":"get_wavelength_information.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"248840827","text":"import netaddr\nfrom io import StringIO\nfrom netaddr import *\nfrom os import listdir\nfrom os.path import isfile, join\n\n\nclass Subnets(object):\n def __init__(self, subnets):\n self.subnets = subnets\n\n @classmethod\n def build_from_directory(cls, directory_path):\n \"\"\"\n\n :param directory_path:\n :return: A Subnets object initialised with a list of all the subnets contained in the files in a directory.\n\n >>> s = Subnets.build_from_directory('./data/test')\n >>> s.subnets\n [IPNetwork('192.136.55.0/24'), IPNetwork('103.214.228.0/24'), IPNetwork('192.136.54.0/24')]\n \"\"\"\n all_subnets = []\n for file_path in Subnets.get_files_in_directory(directory_path):\n subnets = Subnets.build_subnet_list(file_path)\n all_subnets += subnets\n return cls(all_subnets)\n\n @staticmethod\n def get_files_in_directory(directory_path):\n \"\"\"\n\n :param directory_path: The path of a directory\n :return: A list of paths of files in the derectory\n\n >>> Subnets.get_files_in_directory('.')\n ['./subnet.py']\n\n \"\"\"\n file_list = []\n for filename in listdir(directory_path):\n file_path = join(directory_path, filename)\n if isfile(file_path):\n file_list.append(file_path)\n return file_list\n\n @staticmethod\n def build_subnet_list(file_path):\n with open(file_path, 'r') as file:\n return Subnets.build_subnet_list_from_file(file)\n\n @staticmethod\n def build_subnet_list_from_file(file):\n \"\"\"\n Build a list of subnets from a file.\n\n :param file: A file of text lines\n :return: A list of subnets\n\n >>> input = StringIO(\"103.214.228.0/24\\\\n192.136.54.0/24\\\\n\")\n >>> Subnets.build_subnet_list_from_file(input)\n [IPNetwork('103.214.228.0/24'), IPNetwork('192.136.54.0/24')]\n\n \"\"\"\n subnets = []\n for line in file:\n subnets.append(IPNetwork(line.strip()))\n return subnets\n\n def merge(self):\n \"\"\"\n Merge a list of subnets combining adjacent subnets.\n\n :param subnets: A list of subnets\n :return: A list of merged subnets\n\n >>> s = Subnets([IPNetwork('192.136.55.0/24'), IPNetwork('103.214.228.0/24'), IPNetwork('192.136.54.0/24')])\n >>> s.merge()\n [IPNetwork('103.214.228.0/24'), IPNetwork('192.136.54.0/23')]\n \"\"\"\n return netaddr.cidr_merge(self.subnets)\n\n\nif __name__ == '__main__':\n import doctest\n\n doctest.testmod()\n","sub_path":"network/subnet.py","file_name":"subnet.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"612573956","text":"from django.db import IntegrityError\nfrom django.db.models import F, Q, Max\nfrom django.conf import settings\nfrom django.core import serializers\nfrom django.core.urlresolvers import reverse\nfrom django.core.files import File\nfrom django.core.files.temp import NamedTemporaryFile\nfrom django.forms import ValidationError\nfrom django.forms.models import inlineformset_factory\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404, render\nfrom django.template import RequestContext, loader\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext as _\n\n\nfrom general.decorators import login_required_403\nfrom models import *\nfrom forms import AlbumForm, AlbumGroupForm, EditAlbumFormAjax, PickAlbumForm, OrderAlbumForm, UploadFromURLForm\nfrom utils import resize, admin_mode, get_default_img_size\n\n\nimport itertools\nimport json\nimport urllib2\nfrom urlparse import urlparse\n\nGroupFormset = inlineformset_factory(Album, AlbumGroup, form=AlbumGroupForm, extra=1, can_delete=False)\n\n@login_required_403\ndef new_album(request):\n \"\"\" Deprecated \"\"\"\n error = None\n if request.method == \"POST\": #submission of new album\n form = AlbumForm(request.POST, user=request.user)\n if form.is_valid():\n album = form.save(commit=False)\n album.user = request.user\n try:\n album.validate_unique()\n album.set_order()\n album.save()\n return HttpResponse(\"
\" % (album.title, album.id))\n except ValidationError:\n error = _(\"You have used this album title before. Make sure to pick an unique title.\")\n else: #request for rendered form\n album = Album(user=request.user)\n form = AlbumForm(instance=album, user=request.user, initial={'user': request.user})\n return render(request, 'albums/ajax/new_album.html', {'form': form, 'error': error})\n\n@login_required_403\ndef uploadify(request):\n # Processing of each uploaded image\n albumform = PickAlbumForm(request.user, request.POST)\n # import pdb; pdb.set_trace()\n\n import logging\n logging.info(request.FILES)\n\n if albumform.is_valid():\n album = albumform.cleaned_data['album']\n max_order = Photo.objects.filter(album=album).aggregate(Max('order'))['order__max'] or 0\n img = request.FILES['Filedata']\n path = 'albums/%s/%s/' % (request.user.id, album.id) #/media/albums///.jpg\n # get the resizing dimensions from the preferences #TODO this might move to utils in the future\n preferences = Preferences.get_or_create(request.user)\n resize_dimensions = get_default_img_size(preferences)\n img_data = resize(img, upload_to=path, sizes_data=[resize_dimensions])\n\n # if img_data is not None:\n for data in img_data:\n photo = Photo(user=request.user, album=album, width=data[1], height=data[2])\n photo.image = data[0]\n photo.order = max_order + 1\n photo.save()\n p_id = photo.id\n return HttpResponse('%s' % p_id, mimetype=\"text/plain\") #return the photo id\n # else:\n # return HttpResponse(_('File extension not in valid extensions: \"%(extensions)s\".' % {\n # 'extensions': \",\".join(settings.VALID_IMG_EXTENSIONS)\n # })\n # )\n else:\n return HttpResponse()\n\n### uploading images from urls\n@login_required_403\ndef upload_url(request):\n albumform = PickAlbumForm(request.user, request.POST)\n urlform = UploadFromURLForm(request.POST)\n if albumform.is_valid() and urlform.is_valid():\n url = urlform.cleaned_data['url']\n album = albumform.cleaned_data['album']\n name = urlparse(url).path.split('/')[-1]\n\n tmp_img = NamedTemporaryFile(delete=True)\n tmp_img.write(urllib2.urlopen(url).read())\n tmp_img.flush()\n\n max_order = Photo.objects.filter(album=album).aggregate(Max('order'))['order__max'] or 0\n path = 'albums/%s/%s/' % (request.user.id, album.id)\n\n photo = Photo(user=request.user, album=album)\n photo.image.save(name, File(tmp_img))\n photo.image.open()\n\n # get the resizing dimensions from the preferences #TODO this might move to utils in the future\n preferences = Preferences.get_or_create(request.user)\n resize_dimensions = get_default_img_size(preferences)\n img_data = resize(photo.image, upload_to=path, sizes_data=[resize_dimensions], overwrite=True)\n\n for data in img_data:\n photo.width=data[1]\n photo.height=data[2]\n photo.order = max_order + 1\n photo.save()\n p_id = photo.id\n return HttpResponse(p_id, mimetype=\"text/plain\")\n return render(request, 'albums/uploadify_url.html', {'urlform': urlform})\n\n### search function\ndef search(request):\n inputresults = request.GET.__getitem__('term').split(' ')\n query = []\n for value in inputresults:\n q = Q(title__icontains=value) | Q(description__icontains=value) | Q(user__username__icontains=value)\n query.append(q)\n if len(query) > 0 and len(query) < 10:\n if not request.user.is_authenticated() or not admin_mode(request.user):\n query.append(Q(public=True))\n albums = Album.objects.filter(trash=False, *query).order_by('title')\n else:\n return HttpResponse()\n output = []\n for album in albums:\n label = mark_safe(u\"%s \\u2022 %s\" % (album.__unicode__(), album.user.get_profile().forum_nickname))\n output.append({\n \"id\": album.id,\n \"label\": label,\n \"value\": album.__unicode__(),\n \"url\": album.get_absolute_url()\n })\n return HttpResponse(json.dumps(output))\n\n### set album_cover\n@login_required_403\ndef set_cover(request):\n if request.method == \"POST\":\n p_id = request.POST['photo']\n try:\n p_id = int(p_id)\n if admin_mode(request.user):\n photo = get_object_or_404(Photo, pk=p_id)\n else:\n photo = get_object_or_404(Photo, pk=p_id, user=request.user)\n photo.album.cover = photo\n photo.album.save()\n return HttpResponse(1)\n except ValueError: #not an integer\n pass\n return HttpResponse()\n\n@login_required_403\ndef delete_photo(request):\n if request.method == \"POST\":\n p_id = request.POST['photo']\n try:\n p_id = int(p_id)\n if admin_mode(request.user):\n photo = get_object_or_404(Photo, pk=p_id)\n else:\n photo = get_object_or_404(Photo, pk=p_id, user=request.user)\n photo.trash = True\n photo.save()\n return HttpResponse(1)\n except ValueError: #not an integer\n pass\n return HttpResponse()\n\n@login_required_403\ndef reorder(request):\n form = OrderAlbumForm(request.user, request.POST)\n if form.is_valid():\n album = form.cleaned_data['album']\n album_before = form.cleaned_data['album_before']\n album_after = form.cleaned_data['album_after']\n\n if album.writable_to in [\"g\", \"o\"]:\n q = Q(writable_to=\"g\") | Q(writable_to=\"o\")\n else:\n q = Q(writable_to=\"u\")\n if album_after and album.order > album_after.order: # moved forward\n lower = album_after.order\n upper = album.order\n album.order = lower\n\n albums_to_reorder = Album.objects.filter(q, order__gte=lower, order__lt=upper)\n albums_to_reorder.update(order=(F('order') + 1))\n album.save()\n\n elif album_before and album_before.order > album.order: # moved backwards\n lower = album.order\n upper = album_before.order\n album.order = upper\n\n albums_to_reorder = Album.objects.filter(q, order__gt=lower, order__lte=upper)\n albums_to_reorder.update(order=(F('order') - 1))\n album.save()\n elif ((album_before and album_before.order == album.order) \\\n or (album_after and album_after.order == album.order)):\n order = album.order\n if album_before:\n album.order = (F('order') + 1)\n q1 = Q(order__exact=order, title__gt=album.title)\n q2 = Q(order__gt=order)\n albums_to_reorder = Album.objects.filter(q, Q(q1 | q2)).exclude(pk=album_before.id)\n albums_to_reorder.update(order=(F('order') + 2))\n album.save()\n elif album_after:\n q1 = Q(order__exact=order, title__gt=album.title)\n q2 = Q(order__gt=order)\n album_after.order = (F('order') + 1)\n albums_to_reorder = Album.objects.filter(q, Q(q1 | q2)).exclude(pk=album_after.id)\n albums_to_reorder.update(order=(F('order') + 2))\n album_after.save()\n return HttpResponse()\n\n@login_required_403\ndef get_all_own_albums(request):\n own_albums = Album.objects.filter(user=request.user, writable_to='u', trash=False)\n return render(request, 'albums/albums_list/albums_li.html', {'albums': own_albums})\n\n@login_required_403\ndef edit_album(request):\n admin = admin_mode(request.user)\n editform, formset, photos = None, None, None\n if request.method == \"POST\":\n form = PickAlbumForm(request.user, request.POST, admin_mode=admin)\n if form.is_valid():\n album = form.cleaned_data[\"album\"]\n formset = GroupFormset(request.POST, instance=album)\n editform = EditAlbumFormAjax(request.POST, instance=album, user=request.user)\n photos = editform.fields[\"cover\"].queryset.select_related('album', 'album__cover')\n if editform.is_valid() and (album.user == request.user or admin):\n editform.save()\n \"\"\"\n try:\n album.validate_unique()\n album.save()\n except ValidationError:\n error = _(\"You have used this album title before. Make sure to pick an unique title.\")\n print editform._errors\n \"\"\"\n if formset.is_valid() and album.writable_to == 'g':\n formset.save()\n album = get_object_or_404(Album, pk=album.id);\n # TODO: convert to JSON - see new_album_jquery-ui\n return render(request, 'albums/ajax/album_li.html', {'album': album, 'custom_id': 'temp'})\n else:\n form = PickAlbumForm(request.user, request.GET, admin_mode=admin)\n if form.is_valid():\n album = form.cleaned_data[\"album\"]\n if request.user.id == album.user_id or admin:\n editform = EditAlbumFormAjax(instance=album, user=request.user)\n #formset = GroupFormset(instance=album)\n photos = editform.fields[\"cover\"].queryset.select_related('user', 'album', 'album__cover')\n else:\n return HttpResponse(_('This event has been logged'))\n else:\n return HttpResponse(form.as_p())\n return render(request, 'albums/ajax/edit_album.html', {'form': editform, 'formset': formset, 'photos': photos})\n\n@login_required_403\ndef edit_albumgroup(request):\n GroupFormset = inlineformset_factory(\n Album, AlbumGroup,\n form=AlbumGroupForm, extra=1,\n can_delete=False\n )\n admin = admin_mode(request.user)\n form = PickAlbumForm(request.user, request.GET, admin_mode=admin)\n if form.is_valid():\n album = form.cleaned_data[\"album\"]\n formset = GroupFormset(instance=album)\n return render(request, 'albums/ajax/group_rights.html', {'formset': formset})\n return HttpResponse(0)\n\n@login_required_403\ndef remove_album(request):\n status = 'fail'\n form = PickAlbumForm(request.user, request.POST)\n if form.is_valid():\n album = form.cleaned_data[\"album\"]\n album.trash = True\n album.title = \"trash_%s_%s\" % (datetime.now().strftime('%d%m%Y_%H.%M.%s'), album.title)\n album.save()\n status = 'ok'\n return HttpResponse(status)\n\n@login_required_403\ndef new_album_jquery_ui(request):\n if request.method == \"POST\":\n try:\n from_page = request.POST['from-page']\n except KeyError:\n from_page = None\n\n new_album = Album(user=request.user)\n form = AlbumForm(request.POST, user=request.user, instance=new_album)\n t = loader.get_template(u'albums/ajax/new_album_jquery-ui.html')\n\n context = {'from_page': from_page}\n if form.is_valid():\n album = form.save()\n context['form'] = AlbumForm(\n instance = Album(user=request.user),\n user=request.user\n )\n rendered_form = t.render(RequestContext(request, context))\n\n output = {\n 'status': 1,\n 'form': rendered_form,\n }\n\n if from_page == u\"upload\": # return option to append to select\n option = '' % (\n album.id,\n album.__unicode__()\n )\n output['option'] = mark_safe(option)\n elif from_page == u\"my-albums-list\": # return li element to place in list\n t2 = loader.get_template(u'albums/album_li.html')\n album_li = t2.render(RequestContext(request, {'album': album}))\n output['album_li'] = album_li\n output['album_write_mode'] = album.writable_to\n else:\n context['form'] = form\n rendered_form = t.render(RequestContext(request, context))\n output = {'form': rendered_form, 'status': 0}\n return HttpResponse(json.dumps(output), mimetype=\"application/json\")\n\n@login_required_403\ndef get_title(request):\n title = ''\n form = PickAlbumForm(request.user, request.GET)\n if form.is_valid():\n album = form.cleaned_data[\"album\"]\n title = album.title\n return HttpResponse(title)\n\n@login_required_403\ndef get_covers(request):\n admin = admin_mode(request.user)\n form = PickAlbumForm(request.user, request.GET, admin_mode=admin)\n if form.is_valid():\n album = form.cleaned_data[\"album\"]\n photos = Photo.objects.select_related('user', 'album').filter(album=album, trash=False).order_by('order')\n return render(request, 'albums/ajax/album_covers.html', {'album': album, 'photos':photos})\n return HttpResponse(0)\n\n@login_required_403\ndef restore_album(request):\n form = PickAlbumForm(request.user, request.POST, trash=True)\n if form.is_valid():\n album = form.cleaned_data[\"album\"]\n album.trash = False\n album.title = album.clean_title\n\n i = itertools.count(2)\n saved = False\n while(not saved):\n try:\n album.save()\n saved = True\n return HttpResponse('ok')\n except IntegrityError:\n album.title = \"%s_%s\" % (album.clean_title, i.next())\n\n return HttpResponse(''+form.as_table()+'
')\n\n\n### CLASS BASED VIEWS ###\nfrom django.views.generic import View\nfrom django.views.generic.detail import SingleObjectMixin\n\nclass RotateView(SingleObjectMixin, View):\n \"\"\" View taking a photo, rotating it, and returning the success status when done \"\"\"\n #TODO: unittest!\n model = Photo\n\n def get_queryset(self):\n qs = super(RotateView, self).get_queryset()\n if not admin_mode(self.request.user):\n qs = qs.filter(user=self.request.user)\n return qs\n\n def post(self, request, *args, **kwargs):\n photo = self.get_object()\n direction = self.request.POST['direction']\n if direction == 'cw':\n photo.rotate_right()\n elif direction == 'ccw':\n photo.rotate_left()\n else:\n response = {'result': 'Invalid direction'}\n\n response = {'result': 'success', 'ok': True}\n return HttpResponse(json.dumps(response), mimetype='application/json')\n","sub_path":"albums/ajax_views.py","file_name":"ajax_views.py","file_ext":"py","file_size_in_byte":16355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"124169367","text":"__author__ = 'altvod'\n\nfrom ..format import FormatHandler\n\n\nclass HTMLFormatHandler(FormatHandler):\n def __init__(self, html_indent=None, **kwargs):\n super(HTMLFormatHandler, self).__init__(**kwargs)\n self.indent = html_indent\n self.tab = 0\n\n def accept_item(self, t, item):\n return t == 'html_tag'\n\n def _format_block(self, block, formatter):\n text = ''\n for child in block:\n if isinstance(child, str):\n if self.indent is not None:\n child = ' '*self.tab + child + '\\n'\n text += child\n\n elif isinstance(child, list):\n text += self._format_block(child, formatter)\n\n else:\n text += formatter.format_item(child)\n\n return text\n\n def format_item(self, formatter, t, item):\n text = ''\n if item[\"tag\"] == 'html':\n # Add the DOCTYPE delcaration before the tag\n text += ''.format(\n doctype=item.get('doctype', 'html')\n )\n if self.indent is not None:\n text += '\\n'\n\n if self.indent is not None:\n text += ' '*self.tab\n\n text += '<{0}'.format(item[\"tag\"])\n for attr_name, attr_value in item.get('@', {}).items():\n text += ' {0}=\"{1}\"'.format(attr_name, attr_value)\n\n if 'content' in item and item[\"content\"]:\n text += '>'\n if self.indent is not None:\n text += '\\n'\n self.tab += self.indent\n\n text += self._format_block(item[\"content\"], formatter)\n\n if self.indent is not None:\n self.tab -= self.indent\n text += ' '*self.tab\n\n text += ''.format(item[\"tag\"])\n\n else:\n text += \" />\"\n\n if self.indent is not None:\n text += '\\n'\n\n return text\n","sub_path":"docscheme/html/format.py","file_name":"format.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"637068899","text":"class BinTreeNode(object):\r\n def __init__(self, value):\r\n self.value=value\r\n self.left=None\r\n self.right=None\r\n\r\ndef tree_insert(tree, item):\r\n if tree==None:\r\n tree=BinTreeNode(item)\r\n else:\r\n if(item < tree.value):\r\n if(tree.left==None):\r\n tree.left=BinTreeNode(item)\r\n else:\r\n tree_insert(tree.left,item)\r\n else:\r\n if(tree.right==None):\r\n tree.right=BinTreeNode(item)\r\n else:\r\n tree_insert(tree.right,item)\r\n return tree\r\n\r\ndef post_order(tree):\r\n if(tree.left!=None):\r\n post_order(tree.left)\r\n if(tree.right!=None):\r\n post_order(tree.right)\r\n print (tree.value)\r\n\r\ndef in_order(tree):\r\n ''' \r\n An iterative function to sort nodes in a tree.\r\n'''\r\n currentNode = tree #Set currentNode to root of binary tree\r\n emptyStack = [] #Initializing stack\r\n completed = 0\r\n \r\n while (completed !=1):\r\n #Keeps looking through left nodes first.\r\n if currentNode != None:\r\n emptyStack.append(currentNode) #Appends empty stack with current node.\r\n currentNode = currentNode.left #Keeps looking through left-hand nodes\r\n #Goes back from empty subtree to visit node at the top of stack.\r\n #If node is empty, complete.\r\n else:\r\n if (len(emptyStack)>0):\r\n currentNode = emptyStack.pop()\r\n print (currentNode.value)\r\n currentNode = currentNode.right\r\n else:\r\n completed = 1\r\n\r\nif __name__ == '__main__':\r\n t=tree_insert(None,6)\r\n tree_insert(t,10)\r\n tree_insert(t,-12)\r\n tree_insert(t,5)\r\n tree_insert(t,2)\r\n tree_insert(t,3)\r\n tree_insert(t,4)\r\n tree_insert(t,11)\r\n tree_insert(t,110)\r\n in_order(t)\r\n","sub_path":"Task 12 - Week 6.py","file_name":"Task 12 - Week 6.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"80328573","text":"\"\"\" This function creates a thumbnail from an image and registers the thumbnail with the DB\n\nFunction will take the following steps:\n Create a thumbnail directory\n Utilize FFMPEG to create a thumbnail\n With a video, we want to take ~25% through a video just so it's not the beginning\n NOTE: Can we take full storyboard at some point?\n Update the database entry with a Thumbnail section\n Return success or failure\n\nauthor: Michael Schuler [mischuler@deloitte.com]\n\n\"\"\"\n\nimport boto3\nfrom botocore.client import Config\nimport botocore\nimport sys\nimport os\nimport subprocess\nimport string\nimport simplejson as json\nimport math\nimport fnmatch\n\nimport logging\nimport logging.config\n\nsys.path.insert(0, '/Assets/sharedLibraries')\nimport parseHelper\nimport databaseHelper\n\n\n\ndef main(args):\n\n DOMAIN = 'ITD'\n TASKLIST = 'default'\n VERSION = '1'\n\n taskName = os.path.basename(__file__)[:-3]\n\n logging.config.fileConfig('/Assets/sharedLibraries/logging_config.ini')\n logging.debug(\"Creating SWF boto client\")\n botoConfig = Config(connect_timeout=50, read_timeout=70) # suggestion is the read is higher than connect\n swf = boto3.client('swf', config=botoConfig)\n logging.debug(\"Created SWF boto client: %s\", swf)\n \n while True:\n\n task = swf.poll_for_activity_task(\n domain=DOMAIN,\n taskList={'name': taskName},\n identity='%s-01' %(taskName)\n )\n\n if 'taskToken' not in task:\n logging.info(\"%s - Poll timed out, no new task. Repoll\", taskName)\n \n # Run the operation\n else:\n taskToken = task['taskToken']\n workID = task['workflowExecution']['workflowId']\n logging.info(\"[%s] New request for %s\", workID, taskName)\n\n INPUT = json.loads(task['input'])\n asset = INPUT['asset']\n dbPrimaryKey = INPUT['dbPrimaryKey']\n # Take the thumbnail 25% through the video\n \n #scale = \"640x360\"\n # Use the multipliers so that we don't distort vertical videos. This makes it generic. \n scale = \"iw/3:ih/3\" # 1/3 gives 1920 (HD) down to 640\n fps = 1 # Set the number of frames to be once per second\n newDir = \"thumbnails\"\n (filePath, fileName, fileExt) = parseHelper.splitFilename(asset)\n subDir = parseHelper.createDir(filePath, newDir)\n \n # We require the %d to keep the file names incremented\n # Note that we need to escape the percentage sign by using another %, hence the double %\n outfile = '%s_thumbnail_%%d.jpg' % (fileName)\n vtt = '%s.vtt' % (fileName)\n \n # Parameters are\n # -y for\n # -i for Input\n # -vf, fps=1,scale= for the video filter stating we want to take every one second\n cmd = ['ffmpeg'\n ,'-y'\n ,'-i', asset\n ,'-vf', 'fps=%s,scale=%s' %(fps, scale)\n ,'-loglevel', 'fatal'\n ,'%s/%s' %(subDir, outfile)\n ]\n\n logging.debug(\"[%s] Execute video thumbnail creation: %s\", workID, cmd)\n try:\n output = subprocess.check_output(cmd)\n \n # Start setting the parameters needed to update the thumbnail\n \n # Comment block is staying for reference sake\n '''# Call the update function\n # The \"thumbnails\" map will need to be created if it doesn't exist (Note: It shouldn't at this point)\n # A validation exception will be thrown, and when this is thrown, we will create an empty map and try it again\n try:\n response = databaseHelper.updateEntry(key, updateExpression, expressionValues) \n \n except botocore.exceptions.ClientError as err:\n if err.response['Error']['Code'] == 'ValidationException':\n \n \n response = databaseHelper.updateEntry(key, 'set thumbnails = :t', {':t' : {}})\n response = databaseHelper.updateEntry(key, updateExpression, expressionValues)\n '''\n \n # After the thumbnails are created, we need to do two things:\n # OLD # 1. Create the storyboard object which is [http://docs.brightcove.com/en/perform/brightcove-player/guides/thumbnails-plugin.html#collectimages]\n # 1. Create the storyboard VTT file (https://support.jwplayer.com/customer/portal/articles/1407439-adding-preview-thumbnails)\n # 2. We also need to identify the thumbnail for the video which we will take a percentage of the way through the video\n\n \n #STORYBOARD = {}\n thumbnailTime = .25 # Pick the thumbnail that's 25% of the way through the video\n counter = 0\n \n for thumb in os.listdir(subDir):\n if fnmatch.fnmatch(thumb, '*_thumbnail_*.jpg'): # Match files in the directory that are the thumbnails\n #sequenceNum = thumb[thumb.rfind('_')+1:-4] # filename_thumbnail_$frame.jpg\n #STORYBOARD[sequenceNum] = {'src' : '/%s/%s' %(newDir, thumb) }\n counter = counter + 1\n\n # Open the VTT file and write\n logging.debug(\"[%s] Writing VTT file: %s\", workID, vtt)\n vttFile = open('%s/%s' %(subDir, vtt), 'w')\n vttFile.write(\"WEBVTT\")\n # The counter represents how many files of FPS we have -- range is COUNTER*FPS --> (COUNTER+1)* fps\n # FPS references the frames per second so if we put (1/60), that means a frame EVERY MINUTE\n # Therefore, we need to invest the FPS\n # Use %02d to PAD the numbers \n \n baseURL = \"https://dnt4vq51jg2tj.cloudfront.net\" # There needs to be a better way then the full URL\n for i in range(0,counter):\n startSecond = i * (1/fps)\n endSecond = (i + 1) * (1/fps)\n startSpan = '%02d:%02d:%02d.000' % ( startSecond / 3600, startSecond / 60 % 60, startSecond % 60) \n endSpan = '%02d:%02d:%02d.000' % ( endSecond / 3600, endSecond / 60 % 60, endSecond % 60)\n \n \n thumbSpan = '%s/%s/%s/%s_thumbnail_%d.jpg' % (baseURL, fileName, newDir, fileName,i + 1)\n \n vttFile.write(\"\\n\\n%s --> %s\\n%s\" % (startSpan, endSpan, thumbSpan))\n \n vttFile.close()\n logging.debug(\"[%s] Wrote VTT file: %s\", workID, vtt)\n \n index = str(math.trunc(counter * thumbnailTime))\n logging.debug(\"[%s] Key frame identified in index: %s\", workID, index)\n \n updateExpression = 'set thumbnail = :t, storyboard = :s'\n thumbnail = '/%s/%s_thumbnail_%s.jpg' % (newDir, fileName,index)\n \n # THERE MUST BE A DYNAMIC WAY TO DO THIS BUT I DONT KNOW YET\n storyboard = '/%s/%s' %(newDir, vtt)\n \n '''expressionValues = {\n ':t' : STORYBOARD[index]['src'],\n ':s' : STORYBOARD\n }'''\n \n expressionValues = {\n ':t' : thumbnail,\n ':s' : storyboard,\n }\n \n logging.debug(\"[%s] Update thumbnail value\", workID)\n response = databaseHelper.updateEntry(dbPrimaryKey, updateExpression, expressionValues)\n\n OUTPUT = {\n 'tool' : output,\n 'dbPrimaryKey' : dbPrimaryKey,\n 'assetClass' : INPUT['assetClass'], \n 'asset' : asset,\n }\n \n swf.respond_activity_task_completed(\n taskToken = taskToken,\n result = json.dumps(OUTPUT)\n )\n # We should catch other errors here\n except subprocess.CalledProcessError as err:\n \n result = { \n 'reason' : 'THB-0002_Error in video thumbnail creation',\n 'detail' : str(err)\n }\n \n logging.error(\"%s\", result)\n \n swf.respond_activity_task_failed(\n taskToken=taskToken,\n reason=json.dumps(result['reason']),\n details=json.dumps(result['detail'])\n )\n \n logging.info(\"[%s] %s Complete\", workID, taskName)\n\nif __name__ == '__main__':\n \n main(sys.argv)\n","sub_path":"EC2-Backup/Deprecated Code/workflowcode/createThumbnailFromVideo.py","file_name":"createThumbnailFromVideo.py","file_ext":"py","file_size_in_byte":8976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"2661653","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('request', views.send_request),\n path('approve-reject', views.approve_reject),\n path('list', views.chat_list),\n path('group/create', views.create_group),\n path('group/edit', views.edit_group),\n path('group/member/add-remove', views.add_remove_member),\n path('group/detail', views.group_details),\n path('group/update/message', views.update_last_message),\n path('group/leave', views.leave_group)\n]\n","sub_path":"chat/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"315091159","text":"# encoding: utf-8\n#\n# Copyright (c) 2015 Safari Books Online. All rights reserved.\n#\n# This software may be modified and distributed under the terms\n# of the 3-clause BSD license. See the LICENSE file for details.\n\nfrom __future__ import unicode_literals, with_statement\n\nimport csv\nfrom datetime import datetime, timedelta\ntry:\n from cStringIO import StringIO\nexcept ImportError:\n try:\n from StringIO import StringIO\n except ImportError:\n from io import StringIO\nfrom itertools import chain\nimport logging\nimport os\nimport time\nfrom xml.etree import ElementTree\n\nimport requests\nfrom simple_salesforce import Salesforce\n\nlogger = logging.getLogger('salesforce-bulk-api')\n\nNAMESPACE = 'http://www.force.com/2009/06/asyncapi/dataload'\n\n\ndef salesforce_session():\n \"\"\"Returns an authenticated simple_salesforce.Salesforce instance.\"\"\"\n return Salesforce(username=os.environ['SALESFORCE_USERNAME'],\n password=os.environ['SALESFORCE_PASSWORD'],\n security_token=os.environ['SALESFORCE_SECURITY_TOKEN'],\n instance=os.environ['SALESFORCE_INSTANCE'],\n sandbox=os.environ.get('SALESFORCE_SANDBOX') == 'True',\n version='34.0')\n\n\nclass SalesforceBulkJob:\n \"\"\"A Python interface to the Salesforce Bulk API.\"\"\"\n\n PUBLISHING_BATCH_SIZE = 9999\n SUPPORTED_OPERATIONS = {'insert', 'update', 'delete', 'upsert'}\n\n def __init__(self, operation, object_name, external_id_field=None, salesforce=None):\n \"\"\"Creates a new API interface to Salesforce's bulk API, from which any\n number of jobs may be created. The operation should be one of ('insert',\n 'update', 'upsert', or 'delete'), and the object_name should be the\n proper-case name of a Salesforce object (like Lead or Contact).\"\"\"\n if not salesforce:\n salesforce = salesforce_session()\n self.session_id = salesforce.session_id\n self.async_url = (salesforce.base_url\n .replace('/data/', '/async/')\n .replace('v' + salesforce.sf_version,\n salesforce.sf_version))\n\n assert operation in self.SUPPORTED_OPERATIONS, '{} is not a valid bulk operation.'.format(operation)\n self.operation = operation\n\n supported_objects = {o['name'] for o in salesforce.describe()['sobjects']}\n assert object_name in supported_objects, '{} is not a known Salesforce object.'.format(object_name)\n self.object_name = object_name\n self.external_id_field = external_id_field\n\n self.reset()\n\n def upload(self, fields, data):\n \"\"\"Given a list of fields and a (potentially very long) iterable of\n tuples matching those fields, perform a complete upload to Salesforce\"\"\"\n self.create()\n for chunk in chunked(data, self.PUBLISHING_BATCH_SIZE):\n if chunk:\n self.add_batch(fields, chunk)\n if not self.pending_batches:\n logger.info('No batches added to job.')\n self.abort()\n return\n self.close()\n self.wait()\n\n def create(self):\n \"\"\"Creates a new Salesforce bulk Job and prepares for adding batches.\"\"\"\n assert not self.job, 'The current job is still open.'\n\n logger.info('Creating new job to %s %s', self.operation, self.object_name)\n job_request = '''\n \n {operation}\n {object_name}\n '''\n\n if self.operation == 'upsert':\n job_request += '{external_id_field}'\n\n job_request += '''\n CSV\n \n '''\n\n job_request = job_request.format(\n NAMESPACE=NAMESPACE,\n object_name=self.object_name,\n operation=self.operation,\n external_id_field=self.external_id_field\n )\n response = self.request('post', self.async_url + 'job',\n data=job_request)\n\n self.job = bulk_response_attribute(response, 'id')\n self.job_url = self.async_url + 'job/' + self.job\n self.pending_batches = []\n self.is_open = True\n\n def add_batch(self, fields, data):\n \"\"\"Given a list of fields and an iterable of tuples matching those\n fields, adds a batch of data to the current job. The data must be\n shorter than PUBLISHING_BATCH_SIZE rows\"\"\"\n assert self.job, 'There is no current job.'\n assert self.is_open, 'The current job is not open.'\n\n logger.info('Adding batch to job %s', self.job_url)\n response = self.request('post', self.job_url + '/batch',\n data=itercsv(fields, data),\n content_type='text/csv; charset=UTF-8')\n batch = bulk_response_attribute(response, 'id')\n self.pending_batches.append(batch)\n\n def close(self):\n \"\"\"Closes the current job, which signals to Salesforce that no further\n batches will be added to it.\"\"\"\n logger.info('Closing job %s', self.job_url)\n self.set_job_state('Closed')\n self.is_open = False\n\n def abort(self):\n \"\"\"Aborts the current job, and resets the instance\"\"\"\n logger.info('Aborting job %s', self.job_url)\n self.set_job_state('Aborted')\n self.reset()\n\n def set_job_state(self, state):\n \"\"\"Sets the current job to the given state (\"Closed\" or \"Aborted\")\"\"\"\n assert self.job, 'There is no current job.'\n assert self.is_open, 'The current job is not open.'\n\n state_request = '''\n \n {state}\n \n '''.format(NAMESPACE=NAMESPACE, state=state)\n self.request('post', self.job_url, data=state_request, expected_response=200)\n\n def wait(self):\n \"\"\"Waits for all batches of the current job to finish\"\"\"\n assert self.job, 'There is no current job.'\n assert not self.is_open, 'The current job must be closed before waiting.'\n\n self.finished_batches = []\n total = len(self.pending_batches)\n while self.pending_batches:\n finished = []\n for i, batch in enumerate(self.pending_batches):\n batch_url = self.job_url + '/batch/' + batch\n response = self.request('get', batch_url, expected_response=200)\n state = bulk_response_attribute(response, 'state')\n if state not in {'Queued', 'InProgress'}:\n finished.append(i)\n log_method = (logger.warn\n if state in {'Failed', 'Not Processed'}\n else logger.info)\n log_method('Batch %s (%s/%s) finished with state %s',\n batch_url, total - len(self.pending_batches) + len(finished), total, state)\n\n for i in sorted(finished, reverse=True):\n self.finished_batches.append(self.pending_batches.pop(i))\n\n if self.pending_batches:\n logger.info('Waiting for %s more batches to complete...', len(self.pending_batches))\n time.sleep(10)\n\n def results(self):\n assert self.job, 'There is no current job.'\n assert not self.is_open, 'The current job must be closed before getting results.'\n assert self.finished_batches is not None, 'SalesforceBulkJob.wait() should be called before getting results.'\n\n for batch in self.finished_batches:\n result_url = self.job_url + '/batch/' + batch + '/result'\n response = self.request('get', result_url, expected_response=200)\n reader = csv.reader(StringIO(response.decode('utf-8')))\n next(reader) # consume the header row\n for id, success, created, error in reader:\n yield id, success == 'true', created == 'true', error\n\n def reset(self):\n \"\"\"Resets the state of this job to that of a new instance. This *does\n not* change anything that has happened so far at Salesforce. See\n `.abort()` to cancel the currently open job.\"\"\"\n self.is_open = False\n self.job = self.job_url = self.pending_batches = self.finished_batches = None\n\n def request(self, method, url,\n data=None,\n content_type='application/xml; charset=UTF-8',\n expected_response=201):\n \"\"\"Performs an HTTP request against Salesforce's bulk API, and validates\n the expected response. Returns the content of the response\"\"\"\n\n headers = {'X-SFDC-Session': self.session_id}\n kwargs = {'headers': headers}\n if data is not None:\n headers['Content-Type'] = content_type\n kwargs['data'] = data\n\n RETRIES, LAST, WAIT = 3, 2, timedelta(seconds=5)\n for retry in range(RETRIES):\n try:\n response = getattr(requests, method)(url, **kwargs)\n except requests.exceptions.ConnectionError:\n if retry == LAST:\n raise\n logger.info('ConnectionError from %r %r. Retrying in %r...',\n method, url, WAIT, exc_info=True)\n else:\n if retry < LAST and response.status_code in (502, 503):\n logger.info('%r response from %r %r. Retrying in %r...',\n response.status_code, method, url, WAIT)\n else:\n break\n time.sleep(WAIT.total_seconds())\n\n if response.status_code != expected_response:\n raise Exception(('Unexpected status {} from '\n 'Salesforce async API. Details: {}'\n ).format(response.status_code, response.content))\n return response.content\n\n\ndef bulk_response_attribute(response, attribute):\n \"\"\"Given a Salesforce bulk API response bytes, and the name of an attribute,\n find it in the given document, or raise if it isn't present\"\"\"\n tree = ElementTree.fromstring(response)\n value = tree.findtext('{{{}}}{}'.format(NAMESPACE, attribute))\n if not value:\n raise Exception(('<{}> not found in Salesforce '\n 'async API response. Response: {}'\n ).format(attribute, response))\n return value\n\n\ndef chunked(iterable, size):\n \"\"\"Yields chunks of the requested size from the iterable. The final chunk\n may be smaller than size\"\"\"\n if not size:\n for item in iterable:\n yield item\n return\n\n chunk = []\n for i, item in enumerate(iterable):\n chunk.append(item)\n if len(chunk) == size:\n yield chunk\n chunk = []\n if chunk:\n yield chunk\n\ndef itercsv(headers, data):\n \"\"\"Given a list of headers name and a (potentially large) iterable of\n tuples, yield the lines of a CSV file representing that data\"\"\"\n buffer = StringIO()\n writer = csv.writer(buffer)\n\n for row in chain([headers], data):\n writer.writerow(row)\n buffer.seek(0)\n yield buffer.read().encode('utf-8')\n buffer.truncate(0)\n buffer.seek(0)\n","sub_path":"Lib/site-packages/salesforce_bulk_api.py","file_name":"salesforce_bulk_api.py","file_ext":"py","file_size_in_byte":11492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"111911272","text":"import numpy as np\nimport pandas as pd\nimport altair as alt\nimport streamlit as st\nfrom sklearn import metrics\n\nfrom app_utils import load_model, load_data, predict\nfrom preprocess.constants import FEATURES, TARGET, CONFIG_FAI\nfrom .static_fai import (\n get_aif_metric,\n compute_fairness_measures,\n get_confusion_matrix_chart,\n plot_fmeasures_bar,\n color_red,\n)\nfrom .toolkit import get_perf_measure_by_group\n\nMETRICS_TO_USE = [\"Equal opportunity\", \"Predictive parity\", \"Statistical parity\"]\n\n\ndef print_model_perf(y_val, y_pred):\n text = \"\"\n text += \"Model accuracy = {:.4f}\\n\".format(metrics.accuracy_score(y_val, y_pred))\n text += \"Weighted Average Precision = {:.4f}\\n\".format(\n metrics.precision_score(y_val, y_pred, average=\"weighted\"))\n text += \"Weighted Average Recall = {:.4f}\\n\\n\".format(\n metrics.recall_score(y_val, y_pred, average=\"weighted\"))\n text += metrics.classification_report(y_val, y_pred, digits=4)\n return text\n\n\n@st.cache\ndef prepare_pred(x_valid, y_valid, debias=False):\n # Load model\n clf = load_model(\"output/lgb_model.pkl\")\n\n # Predict on val data\n y_prob = predict(clf, x_valid)\n\n # st.header(\"Prediction Distributions\")\n cutoff = 0.5\n y_pred = (y_prob > cutoff).astype(int)\n\n if debias:\n raise NotImplementedError\n\n # Model performance\n text_model_perf = print_model_perf(y_valid, y_pred)\n\n return y_pred, text_model_perf\n\n\ndef fai(debias=False):\n protected_attribute = st.selectbox(\"Select protected column.\", list(CONFIG_FAI.keys()))\n\n # Load data\n valid = load_data(\"output/test.gz.parquet\").fillna(0)\n x_valid = valid[FEATURES]\n y_valid = valid[TARGET].values\n valid_fai = valid[list(CONFIG_FAI.keys())]\n\n # Get predictions\n y_pred, text_model_perf = prepare_pred(x_valid, y_valid, debias=debias)\n\n st.header(\"Model Performance\")\n st.text(text_model_perf)\n\n st.header(\"Algorithmic Fairness Metrics\")\n fthresh = st.slider(\"Set fairness deviation threshold\", 0., 1., 0.2, 0.05)\n st.write(\"Absolute fairness is 1. The model is considered fair \"\n f\"if **ratio is between {1-fthresh:.2f} and {1+fthresh:.2f}**.\")\n\n # Compute fairness measures\n privi_info = CONFIG_FAI[protected_attribute]\n aif_metric = get_aif_metric(\n valid_fai,\n y_valid,\n y_pred,\n protected_attribute,\n privi_info[\"privileged_attribute_values\"],\n privi_info[\"unprivileged_attribute_values\"],\n )\n fmeasures = compute_fairness_measures(aif_metric)\n fmeasures = fmeasures[fmeasures[\"Metric\"].isin(METRICS_TO_USE)]\n fmeasures[\"Fair?\"] = fmeasures[\"Ratio\"].apply(\n lambda x: \"Yes\" if np.abs(x - 1) < fthresh else \"No\")\n\n st.altair_chart(plot_fmeasures_bar(fmeasures, fthresh), use_container_width=True)\n \n st.dataframe(\n fmeasures[[\"Metric\", \"Unprivileged\", \"Privileged\", \"Ratio\", \"Fair?\"]]\n .style.applymap(color_red, subset=[\"Fair?\"])\n .format({\"Unprivileged\": \"{:.3f}\", \"Privileged\": \"{:.3f}\", \"Ratio\": \"{:.3f}\"})\n )\n\n st.subheader(\"Confusion Matrices\")\n cm1 = aif_metric.binary_confusion_matrix(privileged=None)\n c1 = get_confusion_matrix_chart(cm1, \"All\")\n st.altair_chart(alt.concat(c1, columns=2), use_container_width=False)\n cm2 = aif_metric.binary_confusion_matrix(privileged=True)\n c2 = get_confusion_matrix_chart(cm2, \"Privileged\")\n cm3 = aif_metric.binary_confusion_matrix(privileged=False)\n c3 = get_confusion_matrix_chart(cm3, \"Unprivileged\")\n st.altair_chart(c2 | c3, use_container_width=False)\n\n st.header(\"Annex\")\n st.subheader(\"Performance Metrics\")\n all_perfs = []\n for metric_name in [\n 'TPR', 'TNR', 'FPR', 'FNR', 'PPV', 'NPV', 'FDR', 'FOR', 'ACC',\n 'selection_rate', 'precision', 'recall', 'sensitivity',\n 'specificity', 'power', 'error_rate']:\n df = get_perf_measure_by_group(aif_metric, metric_name)\n c = alt.Chart(df).mark_bar().encode(\n x=f\"{metric_name}:Q\",\n y=\"Group:O\",\n tooltip=[\"Group\", metric_name],\n )\n all_perfs.append(c)\n \n all_charts = alt.concat(*all_perfs, columns=1)\n st.altair_chart(all_charts, use_container_width=False)\n\n st.subheader(\"Notes\")\n st.write(\"**Equal opportunity**:\")\n st.latex(r\"\\frac{\\text{FNR}(D=\\text{unprivileged})}{\\text{FNR}(D=\\text{privileged})}\")\n st.write(\"**Predictive parity**:\")\n st.latex(r\"\\frac{\\text{PPV}(D=\\text{unprivileged})}{\\text{PPV}(D=\\text{privileged})}\")\n st.write(\"**Statistical parity**:\")\n st.latex(r\"\\frac{\\text{Selection Rate}(D=\\text{unprivileged})}{\\text{Selection Rate}(D=\\text{privileged})}\")\n\n\ndef chart_cm_comparison(orig_clf_metric, clf_metric, privileged, title):\n cm1 = orig_clf_metric.binary_confusion_matrix(privileged=privileged)\n cm2 = clf_metric.binary_confusion_matrix(privileged=privileged)\n c1 = get_confusion_matrix_chart(cm1, f\"{title}: Before Mitigation\")\n c2 = get_confusion_matrix_chart(cm2, f\"{title}: After Mitigation\")\n return c1 | c2\n\n\ndef compare():\n protected_attribute = st.selectbox(\"Select protected column.\", list(CONFIG_FAI.keys()))\n\n # Load data\n valid = load_data(\"output/valid.csv\")\n x_valid = valid[FEATURES]\n y_valid = valid[TARGET].values\n\n # Get predictions\n orig_y_pred, orig_text_model_perf = prepare_pred(x_valid, y_valid, debias=False)\n y_pred, text_model_perf = prepare_pred(x_valid, y_valid, debias=True)\n\n st.header(\"Model Performance\")\n st.subheader(\"Before Mitigation\")\n st.text(orig_text_model_perf)\n st.subheader(\"After Mitigation\")\n st.text(text_model_perf)\n\n st.header(\"Algorithmic Fairness Metrics\")\n fthresh = st.slider(\"Set fairness deviation threshold\", 0., 1., 0.2, 0.05)\n st.write(\"Absolute fairness is 1. The model is considered fair \"\n f\"if **ratio is between {1 - fthresh:.2f} and {1 + fthresh:.2f}**.\")\n\n # Compute fairness measures\n privi_info = CONFIG_FAI[protected_attribute]\n orig_aif_metric = get_aif_metric(\n valid,\n y_valid,\n orig_y_pred,\n protected_attribute,\n privi_info[\"privileged_attribute_values\"],\n privi_info[\"unprivileged_attribute_values\"],\n )\n orig_fmeasures = compute_fairness_measures(orig_aif_metric)\n orig_fmeasures[\"Fair?\"] = orig_fmeasures[\"Ratio\"].apply(\n lambda x: \"Yes\" if np.abs(x - 1) < fthresh else \"No\")\n\n aif_metric = get_aif_metric(\n valid,\n y_valid,\n y_pred,\n protected_attribute,\n privi_info[\"privileged_attribute_values\"],\n privi_info[\"unprivileged_attribute_values\"],\n )\n fmeasures = compute_fairness_measures(aif_metric)\n fmeasures[\"Fair?\"] = fmeasures[\"Ratio\"].apply(\n lambda x: \"Yes\" if np.abs(x - 1) < fthresh else \"No\")\n\n for m in METRICS_TO_USE:\n source = pd.concat([orig_fmeasures.query(f\"Metric == '{m}'\"),\n fmeasures.query(f\"Metric == '{m}'\")])\n source[\"Metric\"] = [\"1-Before Mitigation\", \"2-After Mitigation\"]\n\n st.write(m)\n st.altair_chart(plot_fmeasures_bar(source, fthresh), use_container_width=True)\n\n \nif __name__ == \"__main__\":\n fai()\n","sub_path":"xai_fairness/app_fai.py","file_name":"app_fai.py","file_ext":"py","file_size_in_byte":7200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"597872791","text":"#Programmer: Chris Tralie\n#Purpose: To wrap around Rann's pipeline to compute persistence diagrams and\n#Dionysus for computing bottleneck distance\nimport subprocess\nimport os\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\nfrom SparseEdgeList import *\n\ndef plotDGM(dgm, color = 'b', sz = 20, label = 'dgm'):\n if dgm.size == 0:\n return\n # Create Lists\n # set axis values\n axMin = np.min(dgm)\n axMax = np.max(dgm)\n axRange = axMax-axMin;\n # plot points\n plt.scatter(dgm[:, 0], dgm[:, 1], sz, color,label=label)\n plt.hold(True)\n # plot line\n plt.plot([axMin-axRange/5,axMax+axRange/5], [axMin-axRange/5, axMax+axRange/5],'k');\n # adjust axis\n #plt.axis([axMin-axRange/5,axMax+axRange/5, axMin-axRange/5, axMax+axRange/5])\n # add labels\n plt.xlabel('Time of Birth')\n plt.ylabel('Time of Death')\n\ndef plotDGMAx(ax, dgm, color = 'b', sz = 20, label = 'dgm'):\n if dgm.size == 0:\n return\n axMin = np.min(dgm)\n axMax = np.max(dgm)\n axRange = axMax-axMin;\n ax.scatter(dgm[:, 0], dgm[:, 1], sz, color,label=label)\n ax.hold(True)\n ax.plot([axMin-axRange/5,axMax+axRange/5], [axMin-axRange/5, axMax+axRange/5],'k');\n ax.set_xlabel('Time of Birth')\n ax.set_ylabel('Time of Death')\n\ndef plot2DGMs(P1, P2, l1 = 'Diagram 1', l2 = 'Diagram 2'):\n plotDGM(P1, 'r', 10, label = l1)\n plt.hold(True)\n plt.plot(P2[:, 0], P2[:, 1], 'bx', label = l2)\n plt.legend()\n plt.xlabel(\"Birth Time\")\n plt.ylabel(\"Death Time\")\n\ndef savePD(filename, I):\n if os.path.exists(filename):\n os.remove(filename)\n fout = open(filename, \"w\")\n for i in range(I.shape[0]):\n fout.write(\"%g %g\"%(I[i, 0], I[i, 1]))\n if i < I.shape[0]-1:\n fout.write(\"\\n\")\n fout.close()\n\n#Wrap around Dionysus's bottleneck distance after taking the log\ndef getInterleavingDist(PD1, PD2):\n savePD(\"PD1.txt\", np.log(PD1))\n savePD(\"PD2.txt\", np.log(PD2))\n proc = subprocess.Popen([\"./bottleneck\", \"PD1.txt\", \"PD2.txt\"], stdout=subprocess.PIPE)\n lnd = float(proc.stdout.readline())\n return np.exp(lnd) - 1.0 #Interleaving dist is 1 + eps\n\ndef getBottleneckDist(PD1, PD2):\n savePD(\"PD1.txt\", PD1)\n savePD(\"PD2.txt\", PD2)\n proc = subprocess.Popen([\"./bottleneck\", \"PD1.txt\", \"PD2.txt\"], stdout=subprocess.PIPE)\n return float(proc.stdout.readline())\n\ndef parsePDs(filename):\n PDs = {}\n fin = open(filename)\n for l in fin.readlines():\n fs = [float(s.rstrip()) for s in l.split()]\n dim = int(fs[0])\n if not dim in PDs:\n PDs[dim] = []\n if fs[-2] == fs[-1]:\n continue #Don't display classes which die instantly\n PDs[dim].append(fs[-2:])\n fin.close()\n ret = []\n count = 0\n for i in range(len(PDs)):\n ret.append(np.array(PDs[i]))\n return ret\n\ndef getPDs(I, J, D, N, m):\n if os.path.exists(\"temp.dimacs\"):\n os.remove(\"temp.dimacs\")\n writeResults(I, J, D, N, \"temp.dimacs\")\n if os.path.exists(\"temp.results\"):\n os.remove(\"temp.results\")\n proc = subprocess.Popen([\"./phatclique\", \"-i\", \"temp.dimacs\", \"-m\", \"%i\"%m, \"-o\", \"temp.results\"], stdout=subprocess.PIPE)\n #stdout = proc.communicate()[0]\n while True:\n output=proc.stdout.readline()\n if (output == b'' or output == '') and proc.poll() is not None:\n break\n if output:\n print(output.strip())\n rc = proc.poll()\n return parsePDs(\"temp.results\")\n\ndef doRipsFiltration(X, maxHomDim, eps = 0):\n (I, J, D) = makeComplex(X, 0)\n PDs = getPDs(I, J, D, X.shape[0], maxHomDim+2)\n return PDs\n \nif __name__ == '__main__':\n X = np.random.randn(200, 2)\n X = X/np.sqrt(np.sum(X**2, 1)[:, None])\n #plt.plot(X[:, 0], X[:, 1], '.')\n #plt.show()\n PDs = doRipsFiltration(X, 1)\n plotDGM(PDs[1])\n plt.show()\n","sub_path":"TDA.py","file_name":"TDA.py","file_ext":"py","file_size_in_byte":3854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"380490159","text":"\"\"\"\nImplements article search and filter\n\"\"\"\nfrom urllib.parse import unquote\n\nfrom django.db import models\nfrom django.db.models import Q\n\nfrom authors.apps.articles.filter_search_extras import extra_vars, get_response\n\n\nclass ArticleManager(models.Manager):\n \"\"\"\n define custom manager for articles\n \"\"\"\n\n def search(self, params):\n \"\"\"\n customised search functionality\n \"\"\"\n author = unquote(params.get(\"author\", \"\"))\n title = unquote(params.get(\"title\", \"\"))\n tag = unquote(params.get(\"tag\", \"\"))\n\n author_query = (Q(author__username__icontains=author) | Q(author__email__exact=author))\n tag_query = Q(tags__tag_name__exact=tag)\n title_query = Q(title__icontains=title)\n\n all_fields = (author and title and tag)\n author_and_title = (author and title and not tag)\n author_and_tag = (author and tag and not title)\n author_only, tag_only, title_and_tag, title_only = extra_vars(all_fields, author, tag, title)\n\n queryset = self.get_queryset()\n\n attrs = (all_fields, author_and_tag, author_and_title, author_only, queryset, tag_only,\n title_and_tag, title_only, author_query, title_query, tag_query)\n\n return get_response(attrs)\n","sub_path":"authors/apps/articles/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"325154852","text":"# -*- coding:utf-8 -*-\nfrom setuptools import setup\nfrom setuptools import find_packages\n\n\nversion = '1.0.2.dev0'\ndescription = \"JSON based migrations for Plone\"\n\nrequirements = [\n 'setuptools',\n 'collective.transmogrifier>=1.5',\n 'plone.app.transmogrifier',\n 'zope.app.container',\n]\n\ntry:\n import json\nexcept ImportError:\n requirements.append('simplejson')\n\n\nsetup(\n name='collective.jsonmigrator',\n version=version,\n description=description,\n long_description=\"%s\\n%s\" % (\n open(\"README.rst\").read(),\n open(\"CHANGES.rst\").read(),\n ),\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Web Environment\",\n \"Framework :: Plone\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: GNU General Public License v2 (GPLv2)\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n keywords='plone transmogrifier ',\n author='Rok Garbas',\n author_email='rok@garbas.si',\n url='https://github.com/collective/collective.jsonmigrator',\n license='BSD',\n packages=find_packages(),\n namespace_packages=['collective'],\n include_package_data=True,\n zip_safe=False,\n install_requires=requirements,\n entry_points=\"\"\"\n [z3c.autoinclude.plugin]\n target = plone\n \"\"\"\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"57504838","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport pywikibot, re, sys, argparse, copy\n\nimport blib\nfrom blib import getparam, rmparam, tname, msg, errmsg, site\n\ndef process_page(page, index, parsed):\n global args\n pagetitle = str(page.title())\n def pagemsg(txt):\n msg(\"Page %s %s: %s\" % (index, pagetitle, txt))\n def errpagemsg(txt):\n msg(\"Page %s %s: %s\" % (index, pagetitle, txt))\n errmsg(\"Page %s %s: %s\" % (index, pagetitle, txt))\n\n pagemsg(\"Processing\")\n\n def expand_text(tempcall):\n return blib.expand_text(tempcall, pagetitle, pagemsg, args.verbose)\n\n notes = []\n for t in parsed.filter_templates():\n origt = str(t)\n if tname(t) in [\"ru-conj\", \"ru-conj-old\"]:\n if [x for x in t.params if str(x.value) == \"or\"]:\n errpagemsg(\"WARNING: Skipping multi-arg conjugation: %s\" % str(t))\n continue\n param2 = getparam(t, \"2\")\n if \"+p\" in param2:\n continue\n ppp = getparam(t, \"ppp\") or getparam(t, \"past_pasv_part\")\n if not ppp or ppp == \"-\":\n continue\n ppp2 = getparam(t, \"ppp2\") or getparam(t, \"past_pasv_part2\")\n rmparam(t, \"ppp\")\n rmparam(t, \"past_pasv_part\")\n rmparam(t, \"ppp2\")\n rmparam(t, \"past_pasv_part2\")\n t.add(\"2\", param2 + \"+p\")\n if tname(t) == \"ru-conj\":\n tempcall = re.sub(r\"^\\{\\{ru-conj\", \"{{ru-generate-verb-forms\", str(t))\n else:\n tempcall = re.sub(r\"^\\{\\{ru-conj-old\", \"{{ru-generate-verb-forms|old=1\", str(t))\n result = expand_text(tempcall)\n if not result:\n errpagemsg(\"WARNING: Error expanding template %s\" % tempcall)\n continue\n forms = blib.split_generate_args(result)\n pppform = forms.get(\"past_pasv_part\", \"\")\n if \",\" in pppform:\n auto_ppp, auto_ppp2 = pppform.split(\",\")\n wrong = False\n if ppp != auto_ppp:\n errpagemsg(\"WARNING: ppp %s != auto_ppp %s\" % (ppp, auto_ppp))\n wrong = True\n if ppp2 != auto_ppp2:\n errpagemsg(\"WARNING: ppp2 %s != auto_ppp2 %s\" % (ppp2, auto_ppp2))\n wrong = True\n if wrong:\n continue\n else:\n if ppp != pppform:\n errpagemsg(\"WARNING: ppp %s != auto_ppp %s\" % (ppp, pppform))\n continue\n newt = str(t)\n if origt != newt:\n notes.append(\"Replaced manual ppp= with irreg verb with +p\")\n pagemsg(\"Replaced %s with %s\" % (origt, newt))\n\n return parsed, notes\n\nparser = blib.create_argparser(\"Make irregular verbs use +p instead of manual ppp=\")\nargs = parser.parse_args()\nstart, end = blib.parse_start_end(args.start, args.end)\n\nfor category in [\"Russian irregular verbs\"]:\n msg(\"Processing category: %s\" % category)\n for i, page in blib.cat_articles(category, start, end):\n blib.do_edit(page, i, process_page, save=args.save, verbose=args.verbose)\n","sub_path":"make_irreg_verbs_use_plus_p.py","file_name":"make_irreg_verbs_use_plus_p.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"383262506","text":"import sys\nimport json\nsys.path.append('./db/')\nfrom Basketballdb import Basketballdb\n\nfrom flask import Flask\nfrom flask import request\nfrom flask import render_template\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n\treturn 'Welcome to the app. Go to /query for the application home page.'\n\ndef filterByTeamName(teams,theTeam):\n result = False\n if (theTeam != False):\n #return ateam\n for team in teams:\n if theTeam == team['name']:\n result = team\n break\n else:\n return False\n\n if result:\n return [result]\n\ndef filterByPlayerName(players, thePlayer):\n a = [];\n result = False\n while (thePlayer != False):\n for player in players:\n if thePlayer == player['lastname']:\n result = player\n a.append(result)\n break\n \n else:\n return False\n\n if result:\n return a\n \n \n \n \n@app.route('/query', methods=['POST', 'GET'])\ndef query():\n #get full players and teams\n db = Basketballdb()\n teams = db.getTeams()\n players = db.getPlayers()\n\n #start with full teams and players\n filterteams = teams\n filterplayers = players\n\n #filters\n if( request.args.get('team', False) ):\n filterteams = filterByTeamName(filterteams, request.args.get('team', False))\n\n if( request.args.get('player_last_name', False) ):\n filterplayers = filterByPlayerName(players, request.args.get('player_last_name', False))\n \n data = { 'teams' : teams, 'players' : players,\n 'filteredTeams' : filterteams[:50],\n 'filteredPlayers' : filterplayers[:50] }\n \n return render_template('main.html', data=data)\n #return json.dumps(filterteams)\n\nif __name__ == '__main__':\n\tapp.debug = True\n\tapp.run()\n","sub_path":"multi-name mod.py","file_name":"multi-name mod.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"557354616","text":"\"\"\"Celery tasks.\"\"\"\nfrom app import app\nimport requests\n\n\nclass HTTPTask(app.Task):\n\n def __init__(self):\n self.client = requests.Session()\n\n def on_failure(self, exc, task_id, args, kwargs, einfo):\n print('Handled exception: {}'.format(exc))\n\n # def after_return(self, status, retval, task_id, args, kwargs, einfo): pass\n # def on_retry(self, exc, task_id, args, kwargs, einfo): pass\n # def on_success(self, retval, task_id, args, kwargs): pass\n\n\n\n@app.task(base=HTTPTask, name='tasks.get_url')\ndef get_url(url=None):\n response = get_url.client.get(url)\n response.raise_for_status()\n return response.json()\n\n\n\nif __name__ == '__main__':\n urls = [\n 'https://httpbin.org/status/404',\n 'https://httpbin.org/get'\n ]\n for url in urls:\n get_url.delay(url=url)\n","sub_path":"libraries_third_party/celery_/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"385341860","text":"\"\"\"\n\n**INPUT FILE FORMAT**\n\nThe file format consists of a one-row template header followed by a one-row data header and subsequent data\nrows.\n\nThe data represents tailpipe emission rates by model year, age, reg-class and fuel type as estimated by\nEPA's MOVES model.\n\nFile Type\n comma-separated values (CSV)\n\nSample Header\n .. csv-table::\n\n input_template_name:,emission_rates_vehicles,input_template_version:,0.2\n\nSample Data Columns\n .. csv-table::\n :widths: auto\n\n start_year,sourcetype_name,reg_class_id,market_class_id,in_use_fuel_id,rate_name,independent_variable,slope,intercept,ind_variable_data,rate_data,equation\n 1995,passenger car,car,non_hauling.ICE,pump gasoline,pm25_exhaust_grams_per_mile,age,0.000020575,0.02556,\"[22, 30]\",\"[0.02601255162083171, 0.026177151337127946]\",((2.0575e-05 * age) + 0.02556)\n 1995,passenger car,car,non_hauling.ICE,pump gasoline,nmog_exhaust_grams_per_mile,age,-0.00059478,0.77323,\"[22, 30]\",\"[0.7601447516760625, 0.7553865333609487]\",((-0.00059478 * age) + 0.77323)\n\nData Column Name and Description\n :start_year:\n The model year to which the rate applies; model years not shown will apply the start_year rate\n less than or equal to the model year.\n\n :sourcetype_name:\n The MOVES sourcetype name (e.g., passenger car, passenger truck, light-commercial truck, etc.).\n\n :reg_class_id:\n Vehicle regulatory class at the time of certification, e.g. 'car','truck'. Reg class definitions may differ\n across years within the simulation based on policy changes. ``reg_class_id`` can be considered a 'historical'\n or 'legacy' reg class.\n\n :market_class_id:\n The OMEGA market class (e.g., non-hauling.ICE, hauling.BEV, etc.).\n\n :in_use_fuel_id:\n In-use fuel id, for use with context fuel prices, must be consistent with the context data read by\n ``class context_fuel_prices.ContextFuelPrices``\n\n :rate_name:\n The emission rate providing the pollutant and units.\n\n :independent_variable:\n The independent variable used in calculating the emission rate (e.g., age).\n\n :slope:\n The slope of the linear fit to the emission rate input data.\n\n :intercept:\n The intercept of the linear fit to the emission rate input data.\n\n :ind_variable_data:\n Input data for the independent variable used to generate the emission rate curve where data represent the age\n associated with the corresponding input data.\n\n :rate_data:\n The emission rate data used to generate the emission rate curve.\n\n :equation:\n The linear fit emission rate equation used to calculate an emission rate at the given independent variable.\n\n----\n\n**CODE**\n\n\"\"\"\nfrom omega_effects.general.general_functions import read_input_file\nfrom omega_effects.general.input_validation import validate_template_version_info, validate_template_column_names\n\n_cache = dict()\n\n\nclass EmissionRatesVehicles:\n \"\"\"\n Loads and provides access to vehicle emission factors by model year, age, legacy reg class ID and in-use fuel ID.\n\n \"\"\"\n def __init__(self):\n self._data = dict()\n self._cache = dict()\n self.startyear_min = 0\n self.deets = {} # this dictionary will not include the legacy fleet\n\n def init_from_file(self, filepath, effects_log):\n \"\"\"\n\n Initialize class data from input file.\n\n Args:\n filepath: the Path object to the file.\n effects_log: an instance of the EffectsLog class.\n\n Returns:\n Nothing, but reads the appropriate input file.\n\n \"\"\"\n # don't forget to update the module docstring with changes here\n input_template_name = 'emission_rates_vehicles'\n input_template_version = 0.2\n input_template_columns = {\n 'start_year',\n 'sourcetype_name',\n 'reg_class_id',\n 'market_class_id',\n 'in_use_fuel_id',\n 'rate_name',\n 'equation',\n }\n\n df = read_input_file(filepath, effects_log)\n validate_template_version_info(df, input_template_name, input_template_version, effects_log)\n\n # read in the data portion of the input file\n df = read_input_file(filepath, effects_log, skiprows=1)\n validate_template_column_names(filepath, df, input_template_columns, effects_log)\n\n rate_keys = zip(\n df['start_year'],\n df['sourcetype_name'],\n df['reg_class_id'],\n df['in_use_fuel_id'],\n df['rate_name']\n )\n df.set_index(rate_keys, inplace=True)\n\n self.startyear_min = min(df['start_year'])\n\n self._data = df.to_dict('index')\n\n for rate_key in rate_keys:\n rate_eq = self._data[rate_key]['equation']\n self._data[rate_key].update({'equation': compile(rate_eq, '', 'eval')})\n\n def get_emission_rate(self, session_settings, model_year, sourcetype_name, reg_class_id,\n in_use_fuel_id, age, *rate_names):\n \"\"\"\n\n Args:\n session_settings: an instance of the SessionSettings class\n model_year (int): vehicle model year for which to get emission factors\n sourcetype_name (str): the MOVES sourcetype name (e.g., 'passenger car', 'light commercial truck')\n reg_class_id (str): the regulatory class, e.g., 'car' or 'truck'\n in_use_fuel_id (str): the liquid fuel ID, e.g., 'pump gasoline'\n age (int): vehicle age in years\n rate_names: name of emission rate(s) to get\n\n Returns:\n A list of emission rates for the given type of vehicle of the given model_year and age.\n\n \"\"\"\n locals_dict = locals()\n rate = 0\n return_rates = list()\n\n if model_year < self.startyear_min:\n model_year = self.startyear_min\n\n for rate_name in rate_names:\n\n cache_key = (model_year, sourcetype_name, reg_class_id, in_use_fuel_id, age, rate_name)\n if cache_key in self._cache:\n rate = self._cache[cache_key]\n else:\n rate_keys = [\n k for k in self._data\n if k[0] <= model_year\n and k[1] == sourcetype_name\n and k[2] == reg_class_id\n and k[3] == in_use_fuel_id\n and k[4] == rate_name\n ]\n if not rate_keys:\n rate_keys = [\n k for k in self._data\n if k[1] == sourcetype_name\n and k[2] == reg_class_id\n and k[3] == in_use_fuel_id\n and k[4] == rate_name\n ]\n start_year = min([k[0] for k in rate_keys])\n else:\n max_start_year = max([k[0] for k in rate_keys])\n start_year = min(model_year, max_start_year)\n\n rate_key = start_year, sourcetype_name, reg_class_id, in_use_fuel_id, rate_name\n\n rate = eval(self._data[rate_key]['equation'], {}, locals_dict)\n\n if rate < 0:\n temp_key = (model_year, sourcetype_name, reg_class_id, in_use_fuel_id, age - 1, rate_name)\n rate = self._cache[temp_key]\n\n self._cache[cache_key] = rate\n\n self.deets.update(\n {cache_key: {\n 'session_policy': session_settings.session_policy,\n 'session_name': session_settings.session_name,\n 'model_year': model_year,\n 'age': age,\n 'reg_class_id': reg_class_id,\n 'sourcetype_name': sourcetype_name,\n 'in_use_fuel_id': in_use_fuel_id,\n 'rate_name': rate_name,\n 'rate': rate,\n }}\n )\n return_rates.append(rate)\n\n return return_rates\n","sub_path":"omega_effects/effects/emission_rates_vehicles.py","file_name":"emission_rates_vehicles.py","file_ext":"py","file_size_in_byte":8131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"419956359","text":"'''\nDelete Duplicates\nDescription\nGiven an unsorted linked list of N nodes. The task is to remove duplicate elements from this unsorted Linked List. When a value appears in multiple nodes, the node which appeared first should be kept, all others duplicates are to be removed. You need to implement remove duplicates function only.\n\nInput\nFirst line contains an integer N denoting the size of the Linked List.\n\nNext line contain N space separated integers dentoing the LL.\n\nOutput\nPrint the final LL\n\nInput:\n\n5\n\n1 1 2 2 5\n\nOutput:\n\n1 2 5\n'''\nclass Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n \n \nclass LinkedList:\n def __init__(self):\n self.head = None\n self.last_node = None\n \n def append(self, data):\n if self.last_node is None:\n self.head = Node(data)\n self.last_node = self.head\n else:\n self.last_node.next = Node(data)\n self.last_node = self.last_node.next\n \n def get_prev_node(self, ref_node):\n current = self.head\n while (current and current.next != ref_node):\n current = current.next\n return current\n \n def remove(self, node):\n prev_node = self.get_prev_node(node)\n if prev_node is None:\n self.head = self.head.next\n else:\n prev_node.next = node.next\n \n def display(self):\n current = self.head\n while current:\n print(current.data, end = ' ')\n current = current.next\n \n# Implement this Function \ndef remove_duplicates(llist):\n\t#valid for sorted as well as unsorted\n\ttrack=set()\n\ttemp=llist.head\n\tif temp==None:\n\t\treturn\n\ttrack.add(temp.data)#adding the first value in the set\n\twhile temp.next!=None:\n\t\tif temp.next.data in track:\n\t\t\ttemp.next=temp.next.next #temp.next fdeleted\n\t\telse:\n\t\t\ttrack.add(temp.next.data)\n\t\t\ttemp=temp.next\n\treturn\n\t\n\t\n \n \na_llist = LinkedList()\n \nn = int(input())\nl = list(map(int, input().split(' ')))\nfor data in l:\n a_llist.append(data)\n \nremove_duplicates(a_llist)\n \na_llist.display()","sub_path":"Delete Duplicates.py","file_name":"Delete Duplicates.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"585677235","text":"#coding:utf-8\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport matplotlib as mpl\nimport os, sys\n\nmpl.rcParams['axes.linewidth'] = 1.2 #set the value globally\nplt.rcParams['font.sans-serif']=['SimHei']\nplt.rcParams['axes.unicode_minus']=False\n\ndir = '../../Data/ClusteringQuality/CluStream/'\nfileName = 'CluStream-KDD99-Normalized-patent'\ndata = pd.read_excel(dir + fileName + '.xlsx')\n\nplt.rc('pdf', fonttype=42)\n\nplt.figure(figsize=(4.0, 2.5))\nplt.subplots_adjust(\n left=0.12,\n bottom=0.18,\n right=0.96,\n top=0.94,\n wspace=0.00,\n hspace=0.00)\n\nplt.rcParams['xtick.direction'] = 'in'\nplt.rcParams['ytick.direction'] = 'in'\n\n\nfont = {'family': 'Helvetica',\n 'weight': 'demibold',\n 'size': 12,\n }\n\n# plt.xticks(fontsize=8, weight='medium')\n# plt.yticks(fontsize=8, weight='medium')\nplt.xlabel(u'数据量 (' + r'$\\times{10^3}$' + ')')#, size=8, weight='medium')\nplt.ylabel(u'聚类质量CMM')#, size=8, weight='medium')\nplt.ylim(0.3, 1.05)\nplt.xlim(0, 500)\n\nmarksize = 2\nlinewidth = 1\n\n\nplt.plot(data[data.columns[0]], data[data.columns[1]], linestyle=\":\", linewidth=linewidth, color='black')#color='#978a84')\nplt.plot(data[data.columns[0]], data[data.columns[3]], marker='D', markersize=marksize, linewidth=linewidth, color='gray')\nplt.plot(data[data.columns[0]], data[data.columns[2]], marker='^', markersize=marksize, linewidth=linewidth, color='black')\n\nplt.legend(labels=[data.columns[1], data.columns[3], data.columns[2]], loc=8, frameon=False, bbox_to_anchor=(0.5, 0))\n# plt.show()\nplt.savefig(dir + fileName + \".png\")\n\n","sub_path":"src/ClusteringQuality/CluStream/CluStreamKDD99CMM-patent.py","file_name":"CluStreamKDD99CMM-patent.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"292535869","text":"## Old development version\nimport numpy as np\nimport utils\n\nclass Connect:\n def __init__(self, num_cols=5, num_rows=3, num_connect=3, verbose=True):\n \"\"\"\n Define a new Connect object\n \"\"\"\n\n self.num_cols = num_cols\n self.num_rows = num_rows\n self.num_connect = num_connect\n self.verbose = verbose\n\n self.players = ['o', 'x']\n self.other_player = {'o': 'x', 'x': 'o'}\n\n def reset(self, first_player='random'):\n self.grid = np.full(fill_value=\" \", shape=(self.num_rows, self.num_cols), dtype=str)\n\n # Each column index is one action.\n self.available_actions = np.arange(self.num_cols)\n\n # Keep track of the lowest free row position per column (where a disk would land if dropped in that column)\n self.lowest_free_rows = np.zeros(self.num_cols, dtype=int)\n\n if first_player == 'random':\n self.player_at_turn = np.random.choice(self.players)\n elif first_player in self.players:\n self.player_at_turn = first_player\n else:\n raise ValueError(\"The argument first_player has to be either 'random', 'x', or 'o'.\")\n\n # Keep track of the last action played (simplifies checking for terminal states).\n self.last_action = None\n\n self.game_over = False\n if self.verbose:\n print(\"Game has been reset.\")\n print(self.grid[::-1, ])\n\n def change_turn(self):\n self.player_at_turn = self.other_player[self.player_at_turn]\n\n def act(self, action):\n \"\"\"\n Given an action (a column index; known to be a valid action!), generate the new board\n\n :param action: an integer referring to the column index where a new token/disk should be dropped\n \"\"\"\n self.grid[self.lowest_free_rows[action], action] = self.player_at_turn\n self.lowest_free_rows[action] += 1\n if self.lowest_free_rows[action] == self.num_rows:\n self.available_actions = np.setdiff1d(self.available_actions, action)\n self.last_action = action\n\n if self.verbose:\n print(self.grid[::-1, ])\n\n def grid_is_full(self):\n return np.all(self.lowest_free_rows == self.num_rows)\n\n def was_winning_move(self):\n \"\"\"\n Check if the move that has just been made wins the game.\n\n Determine in which row the disk (token) landed using self.last_action and look at that row,\n column and both diagonals including this token. Check whether there is any sequence of\n length 'num_connect' of the same token type.\n\n For example, if num_connect == 3\n\n ' 'd' ' ' 'c' ' ' 'u' ' '\n ' ' ' 'd' 'c' 'u' ' ' ' '\n ' 'r' 'r' 'x' 'r' 'r' ' '\n ' ' ' 'u' 'c' 'd' ' ' ' '\n ' 'u' ' ' 'c' ' ' 'd' ' '\n ' ' ' ' ' ' ' ' ' ' ' ' '\n ' ' ' ' ' ' ' ' ' ' ' ' '\n\n and \"x\" is the position the token has dropped, check whether there is a sequence of 'x' of length 3\n in the corresponding row (r), column (c), upward-diagonal (u), or downward diagonal (d).\n\n [This function could be made MUCH more efficient by excluding some of the checks beforehand, for\n example, based on the row height of the last_action.]\n\n :return: a boolean, True if the last move was a winning move\n \"\"\"\n game_is_won = False\n\n action_row = self.lowest_free_rows[self.last_action] - 1\n action_col = self.last_action\n winning_sequence = np.full(shape=self.num_connect, fill_value=self.player_at_turn)\n\n # Calculate candidate vectors\n row_candidates = self.grid[action_row, max(0, action_col - self.num_connect + 1) : min(self.num_cols, action_col + self.num_connect)]\n if utils.search_sequence_numpy(row_candidates, winning_sequence):\n game_is_won = True\n else:\n col_candidates = self.grid[max(0, action_row - self.num_connect + 1): min(self.num_rows, action_row + self.num_connect), action_col]\n if utils.search_sequence_numpy(col_candidates, winning_sequence):\n game_is_won = True\n else:\n diag_index_up = action_col - action_row\n diag_up_candidates = np.diagonal(self.grid, diag_index_up)\n if utils.search_sequence_numpy(diag_up_candidates, winning_sequence):\n game_is_won = True\n else:\n diag_index_down = action_row + action_col - (self.num_rows - 1)\n diag_down_candidates = np.diagonal(self.grid[::-1], diag_index_down)\n if utils.search_sequence_numpy(diag_down_candidates, winning_sequence):\n game_is_won = True\n\n if self.verbose and game_is_won:\n print(\"Player '\", self.player_at_turn, \"' has won the game!\")\n return game_is_won\n\n","sub_path":"connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":4847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"367229587","text":"#AP : Accident Prediction\n#\n\nimport sys\nimport os \nimport numpy as np\nimport time\n\nimport torch\nfrom torch import nn, optim\nfrom torch.nn import functional as F\nfrom torch.utils import data\nfrom torchsummaryX import summary\n\nfrom lib.utils.ap_train_val_utils_AdaLEA import train_ap_wo_ego, val_ap_wo_ego\nfrom lib.models.ap_model_LEA import AP_wo_ego\nfrom lib.utils.ap_dataloader_for_LEA import load_fol_hidden_state\nfrom config.config import * \n\nfrom tensorboardX import SummaryWriter\nimport pandas as pd\n\nGPU_NUM =0\ndevice = torch.device(f'cuda:{GPU_NUM}' if torch.cuda.is_available() else 'cpu')\nprint(device)\ntorch.cuda.set_device(device)\n\n#print(\"Cuda available: \", torch.cuda.is_available())\n#device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n# load args\nargs = parse_args()\n\nif args.enc_concat_type == 'cat':\n args.dec_hidden_size = args.box_enc_size + args.flow_enc_size\nelse:\n if args.box_enc_size != args.flow_enc_size:\n raise ValueError('Box encoder size %d != flow encoder size %d'\n %(args.box_enc_size,args.flow_enc_size))\n else:\n args.dec_hidden_size = args.box_enc_size\n\n\n\n\nprint(\">> Setting the Accident Precition model ... \")\nAP_model = AP_wo_ego(args).to(device)\nall_params = AP_model.parameters()\n#optimizer = optim.RMSprop(all_params, lr=args.lr)\noptimizer = optim.Adam(all_params, lr=args.lr)\n\ndataloader_params ={\n \"batch_size\": args.batch_size,\n \"shuffle\": args.shuffle,\n \"num_workers\": args.num_workers\n }\n\nval_set = load_fol_hidden_state(args, 'val')\nprint(\">> Number of validation samples:\", val_set.__len__())\nval_gen = data.DataLoader(val_set, **dataloader_params)\n\nprint(\">> Check the Model's architecture\")\nsummary(AP_model, \n torch.zeros(1, args.segment_len, args.pred_timesteps, args.dec_hidden_size).to(device)\n )\n\n\nprint(\">> Train data root:\", args.data_root)\n\nwriter = SummaryWriter('summary/train_on_DoTA/concatenation/AdaLEA/')\n\n\n# MODEL TRAINING\nmin_loss = 1e6\n\nbest_ap_model = None\n\nbefore_ATTC = 0.\n\n#save train(mAP, ATTC), val(mAP, ATTC)\ninform = np.zeros((args.train_epoch, 4))\n\nfor epoch in range(1, args.train_epoch+1):\n print(\"\\n\")\n print(\"=====================================\")\n print(\"// Epoch :\", epoch)\n # regenerate the training dataset \n train_set = load_fol_hidden_state(args, 'train')\n train_gen = data.DataLoader(train_set, **dataloader_params)\n print(\" Number of training samples:\", train_set.__len__())\n\n start = time.time()\n\n #===== train\n train_loss, train_mAP, train_ATTC = train_ap_wo_ego(epoch, AP_model, optimizer, train_gen, before_ATTC, verbose=True)\n writer.add_scalar('data/train_loss', train_loss, epoch)\n writer.add_scalar('data/train_mAP', train_mAP, epoch)\n writer.add_scalar('data/train_ATTC', train_ATTC, epoch)\n inform[epoch-1,0] = train_mAP\n inform[epoch-1,1] = train_ATTC\n\n #===== validation\n val_loss, val_mAP, val_ATTC = val_ap_wo_ego(epoch, AP_model, val_gen, before_ATTC, verbose=True)\n writer.add_scalar('data/val_loss', val_loss, epoch)\n writer.add_scalar('data/val_mAP', val_mAP, epoch)\n writer.add_scalar('data/val_ATTC', val_ATTC, epoch)\n inform[epoch-1,2] = val_mAP\n inform[epoch-1,3] = val_ATTC\n\n before_ATTC = train_ATTC\n\n\n # print time\n elipse = time.time() - start\n print(\"Elipse: \", elipse)\n\n # save checkpoint per epoch\n saved_ap_model_name = 'epoch_' + str(format(epoch,'03')) + '.pt'\n print(\"Saving checkpoints: \" + saved_ap_model_name)\n torch.save(AP_model.state_dict(), os.path.join(args.checkpoint_dir, saved_ap_model_name))\n\ndf = pd.DataFrame(inform)\ndf.to_csv(args.checkpoint_dir+\"/train_val_inform.csv\", index=False)\nnp.save(args.checkpoint_dir+\"/train_val_inform\" ,inform)\n","sub_path":"RunFile/final_code/train_AP_with_AdaLEA_concat.py","file_name":"train_AP_with_AdaLEA_concat.py","file_ext":"py","file_size_in_byte":3773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"309370338","text":"#!/usr/bin/env python3\n'''\n This demo application demonstrates the functionality of the safrs documented REST API\n After installing safrs with pip, you can run this app standalone:\n $ python3 demo_relationship.py [Listener-IP]\n\n This will run the example on http://Listener-Ip:5000\n\n - A database is created and a user is added\n - A rest api is available\n - swagger documentation is generated\n\n This is a minimal example, you'll probably want to use demo_relationship_ext.py instead!!!\n'''\nimport sys\nimport logging\nimport builtins\nfrom flask import Flask, redirect\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_swagger_ui import get_swaggerui_blueprint\nfrom flask_cors import CORS\nfrom safrs import SAFRSBase, SAFRSAPI, jsonapi_rpc\n\ndb = SQLAlchemy()\n\n\nclass Response(SAFRSBase, db.Model):\n '''\n description: Response description\n '''\n __tablename__ = 'Responses'\n id = db.Column(db.String, primary_key=True)\n response_data = db.Column(db.String, default='')\n\n\nif __name__ == '__main__':\n HOST = sys.argv[1] if len(sys.argv) > 1 else '0.0.0.0'\n PORT = 5000\n app = Flask('SAFRS Demo Application')\n app.config.update(SQLALCHEMY_DATABASE_URI='sqlite://', DEBUG=True)\n db.init_app(app)\n db.app = app\n # Create the database\n db.create_all()\n API_PREFIX = ''\n \n with app.app_context():\n # Create a user and a book and add the book to the user.books relationship\n response = Response(response_data='{}')\n api = SAFRSAPI(app, host='{}:{}'.format(HOST,PORT), port=PORT, prefix=API_PREFIX)\n # Expose the database objects as REST API endpoints\n api.expose_object(Response)\n # Register the API at /api/docs\n print('Starting API: http://{}:{}{}'.format(HOST, PORT, API_PREFIX))\n app.run(host=HOST, port=PORT)\n","sub_path":"examples/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"61339220","text":"#!/usr/bin/python3\n#coding:utf-8\nimport tensorflow as tf\nfrom tensorflow.python.platform import gfile\nimport numpy as np\nimport pyaudio\nimport wave\nfrom struct import pack\nfrom array import array\nimport collections\nfrom collections import Counter\nimport sys\nimport signal\nimport time\n\nFORMAT = pyaudio.paInt16\nCHANNELS = 1\nRATE = 16000\nclip_stride_ms = 32 # 需要确保 RATE / clip_stride_ms 为整数\nCHUNK_SIZE = int(RATE / clip_stride_ms) # 500\nNUM_PADDING_CHUNKS = clip_stride_ms # 32\nNUM_WINDOW_CHUNKS = 13\naverage_window_ms = 500\nsuppression_ms = 1500\ndetection_threshold = 0.9\n\naverage_window_samples = int(average_window_ms / clip_stride_ms)+2 # 15\nsuppression_samples = int(suppression_ms * RATE / 1000) # 240000\n\n\nclass KWS:\n def __init__(self, model_dir='model\\ds_cnn.pb'): #model/CNN_L.pb model/dnn.pb model\\Pretrained_models\\DS_CNN/DS_CNN_L.pb\n # load model\n self.sess = tf.InteractiveSession()\n # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)\n # self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))\n self.load_pb(self.sess, pb_path=model_dir)\n print('Model restored.')\n self.softmax_tensor = self.sess.graph.get_tensor_by_name('labels_softmax:0')\n\n def recognize_file(self, wav='wav/test.wav', label_path='label/labels.txt',\n num_top_predictions=3):\n # recognize\n with open(wav, 'rb') as wav_file:\n wav_data = wav_file.read()\n print(wav_data)\n self.predictions = np.squeeze(self.sess.run(self.softmax_tensor, {'decoded_sample_data:0': wav_data})) # decoded_sample_data:0 wav_data:0\n # Sort to show labels in order of confidence\n top_k = self.predictions.argsort()[-num_top_predictions:] # argsort()元素从小到大排列,提取其对应的index(索引)\n labels = self.load_labels(label_path)\n result = []\n for node_id in top_k:\n human_string = labels[node_id]\n score = self.predictions[node_id]\n result.append('%s (score = %.5f)' % (human_string, score))\n print('%s (score = %.5f)' % (human_string, score))\n print(result)\n return '\\n'.join(result)\n\n def recognize_realtime(self, wav_stream, label_path='label/labels.txt', num_top_predictions=3):\n wav_stream = wav_stream.read()\n print(wav_stream)\n print(int.from_bytes(wav_stream, byteorder='big'))\n self.predictions = np.squeeze(self.sess.run(self.softmax_tensor, {'wav_data:0': wav_stream}))\n # Sort to show labels in order of confidence\n top_k = self.predictions.argsort()[-num_top_predictions:] # argsort()元素从小到大排列,提取其对应的index(索引)\n labels = self.load_labels(label_path)\n result = []\n for node_id in top_k:\n human_string = labels[node_id]\n score = self.predictions[node_id]\n result.append('%s (score = %.5f)' % (human_string, score))\n print('%s (score = %.5f)' % (human_string, score))\n # print(result)\n return '\\n'.join(result)\n\n def record_to_file(self, path, data, sample_width):\n \"Records from the microphone and outputs the resulting data to 'path'\"\n # sample_width, data = record()\n data = pack('<' + ('h' * len(data)), *data)\n wf = wave.open(path, 'wb')\n wf.setnchannels(1)\n wf.setsampwidth(sample_width)\n wf.setframerate(RATE)\n wf.writeframes(data)\n wf.close()\n\n def handle_int(self, sig, chunk):\n global leave, got_10_result\n leave = True\n got_10_result = True\n\n def normalization(self, data):\n # 归一化数据到[-1,1]\n _range = np.max(abs(data))\n return data / _range\n\n def standardization(self, data):\n # 标准化\n mu = np.mean(data, axis=0)\n sigma = np.std(data, axis=0)\n return (data - mu) / sigma\n\n def counter(self, human_string_arr, score_arr):\n # print(human_string_arr)\n top_num = 2\n string_top2 = Counter(human_string_arr).most_common(top_num)\n # print(string_top2)\n human_string_and_score_dict = {}\n if len(string_top2) == 1:\n human_string = string_top2[0][0]\n human_string_index = [j for j, x in enumerate(human_string_arr) if x == human_string]\n human_string_and_score_dict[human_string] = sum([score_arr[k] for k in human_string_index]) / len(\n human_string_arr)\n else:\n for i in range(top_num):\n human_string = string_top2[i][0]\n # print(human_string)\n human_string_index = [j for j, x in enumerate(human_string_arr) if x == human_string]\n human_string_and_score_dict[human_string] = sum([score_arr[k] for k in human_string_index]) / len(\n human_string_arr)\n return human_string_and_score_dict\n\n def record(self, label_path='label/labels.txt'):\n flag = 0\n pa = pyaudio.PyAudio()\n stream = pa.open(format=pyaudio.paInt16,\n channels=1,\n rate=RATE,\n input=True,\n start=False,\n # input_device_index=2,\n frames_per_buffer=CHUNK_SIZE)\n leave = False\n got_10_result = False\n signal.signal(signal.SIGINT, self.handle_int)\n # print('raw_data', raw_data)\n while not leave:\n suppression_flag = 0\n ring_buffer = collections.deque(maxlen=NUM_PADDING_CHUNKS)\n human_string_flags = ['none'] * average_window_samples\n human_string_index = 0\n score_flags = [0] * average_window_samples\n score_index = 0\n # ring_buffer_flags = [0] * NUM_WINDOW_CHUNKS\n # ring_buffer_index = 0\n print(\"* recording: \")\n stream.start_stream()\n while not got_10_result and not leave:\n\n chunk = stream.read(CHUNK_SIZE)\n # print(chunk)\n ring_buffer.append(chunk)\n if len(ring_buffer) < NUM_PADDING_CHUNKS:\n continue\n # print(ring_buffer)\n data_save = b''\n for i in range(len(ring_buffer)):\n data_save += ring_buffer[i]\n raw_data = array('h')\n raw_data.extend(array('h', data_save))\n raw_data = np.array(raw_data,dtype=np.float32).reshape([16000,1])\n raw_data = self.normalization(raw_data)\n if suppression_flag == 0:\n self.predictions = np.squeeze(self.sess.run(self.softmax_tensor, {'decoded_sample_data:0': raw_data}))\n # Sort to show labels in order of confidence\n top_1 = self.predictions.argsort()[-1] # argsort()元素从小到大排列,提取其对应的index(索引)\n labels = self.load_labels(label_path)\n human_string = labels[top_1]\n score = self.predictions[top_1]\n # print(human_string, str(score))\n\n human_string_flags[human_string_index] = human_string\n human_string_index += 1\n human_string_index %= average_window_samples\n\n score_flags[score_index] = score\n score_index += 1\n score_index %= average_window_samples\n\n human_string_and_score_dict = self.counter(human_string_flags, score_flags)\n human_string_big_score_tuple = sorted(human_string_and_score_dict.items(), key=lambda item:item[1])[0]\n human_string = human_string_big_score_tuple[0]\n score = human_string_big_score_tuple[1]\n\n if score < detection_threshold or human_string == '_silence_' or human_string == '_unknown_' or human_string == 'none':\n sys.stdout.write('_')\n else:\n sys.stdout.write(human_string + '(' + str(score) + ')')\n # sys.stdout.write(human_string)\n # flag += 1\n suppression_flag = 1\n start = time.time()\n else:\n if time.time()-start < suppression_ms / 1000:\n chunk = stream.read(CHUNK_SIZE)\n sys.stdout.write('_')\n else:\n suppression_flag = 0\n sys.stdout.flush()\n if flag >= 1000:\n got_10_result = True\n sys.stdout.write('\\n')\n stream.stop_stream()\n print(\"* done recording\")\n got_10_result = False\n leave = True\n stream.close()\n\n def load_pb(self, sess, pb_path):\n # pb模型导入\n with gfile.GFile(pb_path, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n sess.graph.as_default()\n tf.import_graph_def(graph_def, name='') # 导入计算图\n\n def load_labels(self, filename):\n \"\"\"Read in labels, one label per line.\"\"\"\n return [line.rstrip() for line in tf.gfile.GFile(filename)]\n\n\nif __name__ == '__main__':\n a = KWS()\n a.record()\n","sub_path":"recognition.py","file_name":"recognition.py","file_ext":"py","file_size_in_byte":9458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"63863709","text":"from ShopifyManagement import ManageOrder\nimport time\nfrom multiprocessing import Process\nfrom watchDog import setWatchDog\n\ndef shopifyOrderManagement():\n #Upon start\n while True:\n #Define which store to use\n store = 'DK'\n mo = ManageOrder(switch = store) #Instantiate the store\n \n #Check for existing orders for data base update - closed \n status, orders = mo.getOrders(orderType = 'closed')\n if status is True:\n mo.insert_orders_to_database(orders)\n\n #Check for new orders for data base update - open\n status, orders = mo.getOrders(orderType = 'open')\n\n if status is True:\n mo.insert_orders_to_database(orders)\n\n #Check for sending warning SMS\n mo.sms_delayWarn(orders)\n \n #Print out new orders\n mo.print_orders()\n\n #Geoplotting\n mo.geo_plotter()\n\n #Check for fulfillment\n mo.fulfill_and_capture()\n\n #Define which store to use\n store = 'HK'\n mohk = ManageOrder(switch = store) #Instantiate the storeß\n \n #Check for existing orders for data base update - closed\n\n status, orders = mohk.getOrders(orderType = 'closed')\n\n if status is True:\n mohk.insert_orders_to_database(orders)\n\n #Check for new orders for data base update - open\n status, orders = mohk.getOrders(orderType = 'open')\n\n if status is True:\n mohk.insert_orders_to_database(orders)\n\n #Check for sending warning SMS\n mohk.sms_delayWarn(orders)\n \n #Print out new orders\n mohk.print_orders()\n\n #Geoplotting\n mohk.geo_plotter()\n\n #Check for fulfillment\n mohk.fulfill_and_capture()\n\n #Stamp last system up time\n mohk.stampTime()\n\n print('system working...')\n time.sleep(5)\n\n#Use multiprocessing to run main order tracking method and watch dog simultanously\nif __name__ == '__main__':\n p1 = Process(target = shopifyOrderManagement, args = ())\n p2 = Process(target = setWatchDog, args = (1, 'lastTimeStamp.txt'))\n print('Starting the main process')\n p1.start()\n print('Main process started waiting now for 60 sec...')\n time.sleep(60)\n print('60 sec wait process passed starting watch dog')\n p2.start()\n print('watch dog started. System running again!')\n p1.join()\n p2.join()\n","sub_path":"runShopify.py","file_name":"runShopify.py","file_ext":"py","file_size_in_byte":2458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"616395567","text":"from typing import get_type_hints\n\ndef strict_types(function):\n def type_checker(*args, **kwargs):\n hints = get_type_hints(function)\n\n all_args = kwargs.copy()\n all_args.update(dict(zip(function.__code__.co_varnames, args)))\n\n for argument, argument_type in ((i, type(j)) for i, j in all_args.items()):\n if argument in hints:\n if not issubclass(argument_type, hints[argument]):\n raise TypeError('Type of {} is {} and not {}'.format(argument, argument_type, hints[argument]))\n\n result = function(*args, **kwargs)\n\n if 'return' in hints:\n if not isinstance(result, hints['return']):\n raise TypeError('Type of result is {} and not {}'.format(type(result), hints['return']))\n\n return result\n\n return type_checker\n","sub_path":"strict_types.py","file_name":"strict_types.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"64166397","text":"class ShortestPath:\n graph = {\n '1': ['2', '3', '4'],\n '2': ['5', '6'],\n '5': ['9', '10'],\n '4': ['7', '8'],\n '7': ['11', '12']\n }\n\n def bfs(self, graph, start, goal):\n explored = []\n\n queue = [[start]]\n\n if start == goal:\n return \"That was easy! Start = goal\"\n \n while queue:\n path = queue.pop(0)\n\n node = path[-1]\n\n if node not in explored:\n neighbors = graph[node]\n\n for neighbor in neighbors:\n new_path = list(path)\n new_path.append(neighbor)\n queue.append(new_path)\n\n if neighbor == goal:\n return new_path\n \n explored.append(node)\n \n return \"So sorry, but a connecting path doesn't exist\"","sub_path":"BFS/ShortestPath.py","file_name":"ShortestPath.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"343554415","text":"from rest_framework import generics, permissions\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework_simplejwt.authentication import JWTAuthentication\n\nfrom core import models\nfrom . import serializers\n\n\nclass ProductListApiView(generics.ListAPIView):\n \"\"\"\n API endpoint for listing available products\n \"\"\"\n serializer_class = serializers.ProductSerializer\n permission_classes = (permissions.IsAuthenticated,)\n authentication_classes = (JWTAuthentication,)\n queryset = models.Product.objects.all()\n\n def get_queryset(self):\n queryset = self.queryset.filter(stock__gt=0)\n return queryset\n\n\nclass OrderCreateViewSet(generics.CreateAPIView):\n \"\"\"\n API endpoint for order products\n \"\"\"\n serializer_class = serializers.OrderingSerializer\n permission_classes = (permissions.IsAuthenticated,)\n authentication_classes = (JWTAuthentication,)\n\n @staticmethod\n def sum_quantity_by_id(key, value, products):\n \"\"\"\n Function that adding quantity to existing product in order\n \"\"\"\n for product in products:\n if product['id'] == key:\n product['quantity'] += value\n\n def create(self, request, *args, **kwargs):\n data_items = request.data\n products = []\n\n # check product id is available and check quantity must be more than 0\n for product in data_items:\n # this error handler is for not existing product\n try:\n # try to get object by id\n temp = models.Product.objects.get(id=product['id'])\n\n # check product is available\n if temp.stock == 0:\n res = {\n 'message': 'The order contains unavailable products!'\n }\n return Response(res, status=status.HTTP_400_BAD_REQUEST)\n\n # check quantity that user want is positive\n if product['quantity'] <= 0:\n res = {\n 'message': f'The quantity of {temp.name} is lower than one!'\n }\n return Response(res, status=status.HTTP_400_BAD_REQUEST)\n\n except models.Product.DoesNotExist:\n res = {\n 'message': 'The product not found!'\n }\n return Response(res, status=status.HTTP_404_NOT_FOUND)\n\n # sum quantity of same product\n for product in data_items:\n if product['id'] not in [item['id'] for item in products]:\n temp = {\n 'id': product['id'],\n 'quantity': product['quantity']\n }\n products.append(temp)\n else:\n self.sum_quantity_by_id(product['id'], product['quantity'], products)\n\n # check quantity of products isn't more than our inventory\n for product in products:\n product_obj = models.Product.objects.get(id=product['id'])\n if product['quantity'] > product_obj.stock:\n res = {\n 'message': f'The quantity of {product_obj.name} is more than our inventory!'\n }\n return Response(res, status=status.HTTP_400_BAD_REQUEST)\n\n # save product item in database\n price = 0 # for total price\n # create new order\n order = models.Order.objects.create(user=request.user, price=price)\n\n # this is for compute total price and add order item to database\n for product in products:\n product_obj = models.Product.objects.get(id=product['id'])\n price += product_obj.price * product['quantity']\n models.OrderItem.objects.create(\n order=order,\n product_id=product['id'],\n quantity=product['quantity']\n )\n product_obj.stock -= product['quantity'] # minus product stock from quantity of this order\n product_obj.save()\n # save total price\n order.price = price\n order.save()\n\n res = {\n 'message': 'Your order has been successfully registered',\n 'data': serializers.OrderSerializer(order).data\n }\n\n return Response(\n res,\n status=status.HTTP_201_CREATED\n )\n","sub_path":"shopper/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"92927538","text":"# -*- coding: utf-8 -*-\n__author__ = 'Alex Bo'\n__email__ = 'bosha@the-bosha.ru'\n\nfrom tornado.web import url\n\nfrom handlers import (\n JSONBasePageHandler, JSONTestListHandler, JSONTestViewHandler, AJAXTestRemoveHandler,\n AJAXTestCreateHandler, AJAXTestEditHandler\n)\n\nurl_patterns = [\n url(r\"/\", JSONBasePageHandler, name=\"json_index\"),\n url(r\"/ajax/posts/\", JSONTestListHandler, name=\"json_list\"),\n url(r\"/ajax/posts/add/\", AJAXTestCreateHandler, name=\"json_add\"),\n url(r\"/ajax/posts/view/(?P\\d+)/\", JSONTestViewHandler, name=\"json_view\"),\n url(r\"/ajax/posts/remove/(?P\\d+)/\", AJAXTestRemoveHandler, name=\"json_remove\"),\n url(r\"/ajax/posts/edit/(?P\\d+)/\", AJAXTestEditHandler, name=\"json_edit\"),\n]\n","sub_path":"examples/ajax_weblog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"425961218","text":"r\"\"\"Interpreter za programski \"jezik\" koji reprezentira liste.\nListe se pišu kao [x1,x2,...,xk], svaki xi može biti broj, string ili lista.\nBrojevi su samo prirodni (veći od 0).\nStringovi se pišu kao \"...\", gdje unutar ... ne smije biti znak \".\nStringovi se mogu pisati i kao '...', gdje unutar ... nema znaka '.\nZapravo, \"...\"-stringovi smiju sadržavati i \", ali escape-ane znakom \\.\nDakle, \\\" označava \". \\n označava novi red. \\\\ označava \\.\nUnutar '...'-stringova \\ nema nikakvo posebno značenje.\n\"\"\"\n\n\nfrom pj import *\n\nBKSL, N1, N2, NOVIRED = '\\\\', \"'\", '\"', '\\n'\n\ndef makni(it):\n \"\"\"Miče obrnute kose crte (backslashes) iz iteratora.\"\"\"\n for znak in it:\n if znak == BKSL:\n sljedeći = next(it)\n if sljedeći == 'n': yield NOVIRED\n else: yield sljedeći\n else: yield znak\n\nclass L(enum.Enum):\n UOTV, UZATV, ZAREZ = '[],'\n class BROJ(Token):\n def vrijednost(self): return int(self.sadržaj)\n class STRING(Token):\n def vrijednost(self):\n s = self.sadržaj[1:-1]\n if self.sadržaj.startswith(N2): return ''.join(makni(iter(s)))\n else: return s\n\ndef l_lex(lista):\n lex = Tokenizer(lista)\n for znak in iter(lex.čitaj, ''):\n if znak.isspace(): lex.zanemari()\n elif znak.isdigit() and znak != '0':\n lex.zvijezda(str.isdigit)\n yield lex.token(L.BROJ)\n elif znak == N1:\n lex.pročitaj_do(N1)\n yield lex.token(L.STRING)\n elif znak == N2:\n while True:\n z = lex.čitaj()\n if not z: raise lex.greška('Nezavršeni string!')\n elif z == BKSL: lex.čitaj()\n elif z == N2:\n yield lex.token(L.STRING)\n break\n else: yield lex.literal(L)\n\n# lista -> UOTV elementi UZATV\n# elementi -> element | element ZAREZ elementi | ''\n# element -> BROJ | STRING | lista\n\nclass LParser(Parser):\n def lista(self):\n self.pročitaj(L.UOTV)\n el = self.elementi()\n self.pročitaj(L.UZATV)\n return Lista(el)\n \n def elementi(self):\n rezultat = []\n if not self >= L.UZATV:\n rezultat.append(self.element())\n while self >> L.ZAREZ: rezultat.append(self.element())\n return rezultat\n\n def element(self):\n if self >= L.UOTV: return self.lista()\n else: return self.pročitaj(L.BROJ, L.STRING)\n \n start = element\n\n\nclass Lista(AST('elementi')):\n def vrijednost(self): return [el.vrijednost() for el in self.elementi]\n\n\nif __name__ == '__main__':\n print(LParser.parsiraj(l_lex(r'''\n [23, \"ab\\\"c]\", 'a[]', [2, 3], 523,\n '\"', '\\', \"\\e\", \"\\\\\"]\n ''')).vrijednost())\n\n# DZ: sve više jezika dopušta \"zarez na kraju\" stil pisanja listi\n# (npr. [2,3,] je isto što i [2,3]) -- omogućite to!)\n# DZ: omogućite razne druge \\-escape sekvence (npr. \\u za Unicode znakove)\n# DZ: omogućite izraze umjesto literala: polimorfni + za zbrajanje/konkatenaciju\n","sub_path":"PJ/10_liste_i_stringovi.py","file_name":"10_liste_i_stringovi.py","file_ext":"py","file_size_in_byte":3048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"287672987","text":"import pygame\nimport glob2\nimport poker\n\ndef isBetween(cursor,card_pos):\n \"\"\"pos and card_pos are both tuples of type (x,y)\n Returns true if pos is between card_pos and card_pos+100\"\"\"\n return all([cursorpos>=cardpos and cursorpos<=cardpos+100 for cursorpos,cardpos in zip(cursor,card_pos)])\n\nclass Button():\n def __init__(self, color, x,y,width,height, text=''):\n self.color = color\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.text = text\n\n def draw(self,win,outline=None):\n #Call this method to draw the button on the screen\n if outline:\n pygame.draw.rect(win, outline, (self.x-2,self.y-2,self.width+4,self.height+4),0)\n\n pygame.draw.rect(win, self.color, (self.x,self.y,self.width,self.height),0)\n\n if self.text != '':\n font = pygame.font.SysFont('Garamond', 20)\n text = font.render(self.text, 1, white)\n win.blit(text, (self.x + (self.width/2 - text.get_width()/2), self.y + (self.height/2 - text.get_height()/2)))\n\n def isOver(self, pos):\n #Pos is the mouse position or a tuple of (x,y) coordinates\n if pos[0] > self.x and pos[0] < self.x + self.width:\n if pos[1] > self.y and pos[1] < self.y + self.height:\n return True\n\n return False\n\nclass Poker():\n def __init__(self):\n pygame.init()\n self.state = 'HandState'\n\n self.deck = poker.createDeck()\n self.black = (0,0,0)\n self.white = (255,255,255)\n self.green = (0,68,1)\n self.lightgray = (100,100,100)\n self.height = 600\n self.width = 1500\n self.mainWindow = pygame.display.set_mode((width,height))\n pygame.display.set_caption(\"Poker Calculator\")\n\n self.deckcards100 = []\n self.card_positions = []\n\n self.ConfirmButton = Button(black,25,490,150,60,'Confirm Hand')\n\n self.CardLimit = 0\n self.running = True\n\n self.myhand = []\n self.table_cards = []\n\n\n def GetCardDirectory(self):\n self.spadejpg = glob2.glob(\"deck/100Percent/spades/*.jpg\")\n self.heartsjpg = glob2.glob(\"deck/100Percent/hearts/*.jpg\")\n self.clubsjpg = glob2.glob(\"deck/100Percent/clubs/*.jpg\")\n self.diamondjpg = glob2.glob(\"deck/100Percent/diamonds/*.jpg\")\n\n for clubs in clubsjpg:\n self.deckcards100.append(pygame.image.load(clubs))\n for diamond in diamondjpg:\n self.deckcards100.append(pygame.image.load(diamond))\n for spades in spadejpg:\n self.deckcards100.append(pygame.image.load(spades))\n for hearts in heartsjpg:\n self.deckcards100.append(pygame.image.load(hearts))\n\n def GenerateCardPositions(self):\n x = 15 # initial x\n y = 25 # initial y\n for count in range(len(self.deckcards100)):\n if count % 13 == 0 and count != 0: # change row every 13 cards!\n y += 110\n x = 15\n self.card_positions.append((x,y))\n x += 110\n\n def getEvents(self):\n self.events = pyame.event.get()\n for event in self.events:\n #Quit Event\n if event.type == pygame.QUIT:\n self.running = False\n self.pos = pygame.mouse.get_pos()\n #Button Change Color Mechanic\n if event.type == pygame.MOUSEMOTION:\n if self.ConfirmButton.isOver(pos):\n self.ConfirmButton.color = self.lightgray\n else:\n self.ConfirmButton.color = self.black\n if event.type == pygame.MOUSEBUTTONDOWN:\n if pygame.mouse.get_pressed()[0]:\n for i in range(len(self.card_positions)):\n if isBetween(self.pos,self.card_positions[i]):\n if self.deckcards100[i].get_size() == (100,100) and self.CardLimit<2:\n self.deckcards100[i] = pygame.transform.scale(self.deckcards100[i],(80,80))\n self.myhand.append(deck[i])\n self.CardLimit +=1\n elif self.deckcards100[i].get_size() == (80,80):\n self.deckcards100[i] = pygame.transform.scale(self.deckcards100[i],(100,100))\n self.myhand.remove(deck[i])\n self.CardLimit -= 1\n def mainLoop(self):\n while self.running:\n count = 0\n for img in self.deckcards100:\n self.mainWindow.blit(img,card_positions[count])\n count += 1\n self.ConfirmButton.draw(self.mainWindow)\n\n\n\npygame.init()\n\ndeck = poker.createDeck()\n\n#Set Colors\nblack = (0,0,0)\nwhite = (255,255,255)\ngreen = (0,68,1)\nlightgray = (100,100,100)\n#Window dimensions\nheight = 600\nwidth = 1500\n\n# deselected_scale = 1.25\n# selected_scale = 0.8\n\ndeckcards100 =[]\n\nmainWindow = pygame.display.set_mode((width,height))\npygame.display.set_caption(\"Poker Calculator\")\n\n#Get directory for deck\nspadejpg = glob2.glob(\"deck/100Percent/spades/*.jpg\")\nheartsjpg = glob2.glob(\"deck/100Percent/hearts/*.jpg\")\nclubsjpg = glob2.glob(\"deck/100Percent/clubs/*.jpg\")\ndiamondjpg = glob2.glob(\"deck/100Percent/diamonds/*.jpg\")\n\n\n#get Dire\n\n#Load pygame.image into a deckcard List\n\nfor clubs in clubsjpg:\n deckcards100.append(pygame.image.load(clubs))\nfor diamond in diamondjpg:\n deckcards100.append(pygame.image.load(diamond))\nfor spades in spadejpg:\n deckcards100.append(pygame.image.load(spades))\nfor hearts in heartsjpg:\n deckcards100.append(pygame.image.load(hearts))\n\n\ncard_positions = [] # We will save the card positions here\n\n# Generate Card Positions and Save them\nx = 15 #initial x\ny = 25 # initial y\nfor count in range(len(deckcards100)):\n if count % 13 == 0 and count != 0: # change row every 13 cards!\n y += 110\n x = 15\n card_positions.append((x,y))\n x += 110\n\nConfirmButton = Button(black,25,490,150,60,'Confirm Hand')\n\nrunning = True\ncounter = 0\n#initialize lists\nmyhand = []\nflop = []\n\nwhile running:\n mainWindow.fill(green)\n\n\n #Add cards in mainWindow\n count = 0\n for img in deckcards100:\n mainWindow.blit(img,card_positions[count])\n count += 1\n\n for event in pygame.event.get():\n #Quit Event\n if event.type == pygame.QUIT:\n running = False\n pos = pygame.mouse.get_pos()\n\n\n #Button Change Color Mechanic\n if event.type == pygame.MOUSEMOTION:\n if ConfirmButton.isOver(pos):\n ConfirmButton.color = lightgray\n else:\n ConfirmButton.color = black\n #Click on Card Mechanic\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n\n if pygame.mouse.get_pressed()[0]:\n for i in range(len(card_positions)):\n if isBetween(pos,card_positions[i]):\n if deckcards100[i].get_size() == (100,100) and counter<2:\n deckcards100[i] = pygame.transform.scale(deckcards100[i],(80,80))\n myhand.append(deck[i])\n counter +=1\n elif deckcards100[i].get_size() == (80,80):\n deckcards100[i] = pygame.transform.scale(deckcards100[i],(100,100))\n myhand.remove(deck[i])\n counter -= 1\n if ConfirmButton.isOver(pos) and pygame.mouse.get_pressed()[0]:\n poker.removeCardsfromDeck(myhand,deck)\n\n ConfirmButton.draw(mainWindow)\n pygame.display.flip()\n","sub_path":"legacy/pygamewindow_withclasses.py","file_name":"pygamewindow_withclasses.py","file_ext":"py","file_size_in_byte":7668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"529599435","text":"from flask import jsonify, request\nfrom flask import Flask\nimport pickle\n\n\nwith open('dv.bin', 'rb') as dv_file:\n dv = pickle.load(dv_file)\nwith open('model1.bin', 'rb') as model_file:\n model = pickle.load(model_file)\n\n\ncustomer = {\n \"contract\": \"two_year\",\n \"tenure\": 12,\n \"monthlycharges\": 19.7\n}\n\napp = Flask('churn')\n\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n customer = request.get_json()\n\n X = dv.transform([customer])\n y_pred = model.predict_proba(X)[0, 1]\n\n result = {\n \"churn_probability\": round(float(y_pred), 3)\n }\n\n return jsonify(result)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True, host='0.0.0.0', port=9697)\n","sub_path":"week_5/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"485166949","text":"import collections\n\nclass Solution:\n\n # use monotonic descreasing deque\n def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:\n if not nums or k is None or k < 1:\n return []\n n, ans = len(nums), []\n left = 0\n dq = collections.deque()\n for right in range(n):\n self.push(dq, nums, right)\n length = right - left + 1\n if length > k:\n if dq[0] == left:\n dq.popleft()\n left += 1\n length -= 1\n if length == k:\n ans.append(nums[dq[0]])\n return ans\n\n def push(self, dq, nums, i):\n while dq and nums[i] > nums[dq[-1]]:\n dq.pop()\n dq.append(i)\n","sub_path":"Two pointers/239. Sliding Window Maximum.py","file_name":"239. Sliding Window Maximum.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"370385127","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTencent is pleased to support the open source community by making 蓝鲸智云 - 监控平台 (BlueKing - Monitor) available.\nCopyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License.\nYou may obtain a copy of the License at http://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\n\n\nimport json\nfrom copy import copy\n\nfrom django.utils import six\nfrom rest_framework.renderers import BaseRenderer\n\nfrom bkmonitor.utils.common_utils import DatetimeEncoder\n\n\ndef is_status_code_ok(code):\n return 200 <= code < 300\n\n\nclass UJSONRenderer(BaseRenderer):\n \"\"\"\n Renderer which serializes to JSON.\n Applies JSON's backslash-u character escaping for non-ascii characters.\n Uses the blazing-fast ujson library for serialization.\n \"\"\"\n\n media_type = \"application/json\"\n format = \"json\"\n ensure_ascii = True\n charset = None\n\n def render(self, data, *args, **kwargs):\n\n if data is None:\n return bytes()\n\n ret = json.dumps(data, ensure_ascii=self.ensure_ascii, cls=DatetimeEncoder)\n\n # force return value to unicode\n if isinstance(ret, six.text_type):\n return bytes(ret.encode(\"utf-8\"))\n return ret\n\n\nclass MonitorJSONRenderer(UJSONRenderer):\n def render(self, data, accepted_media_type=None, renderer_context=None):\n\n if hasattr(self, \"rendered_content\"):\n return self.rendered_content\n response = renderer_context[\"response\"]\n\n formatted_data = {\n \"result\": is_status_code_ok(response.status_code),\n \"code\": response.status_code,\n \"message\": \"OK\",\n }\n\n if formatted_data[\"result\"]:\n if isinstance(data, dict) and \"data\" in data and \"result\" in data:\n # 如果是字典类型且字典中已经存在键名为'data'的键\n # 说明已经处理过\n formatted_data = data\n\n else:\n if isinstance(data, dict):\n if \"results\" in data:\n origin_data = copy(data)\n data = origin_data.pop(\"results\")\n meta = origin_data\n formatted_data.update(\n {\n \"data\": data,\n \"_meta\": meta,\n }\n )\n\n formatted_data.update(\n {\n \"data\": data,\n }\n )\n else:\n formatted_data = data\n\n return super(MonitorJSONRenderer, self).render(formatted_data, accepted_media_type, renderer_context)\n","sub_path":"packages/monitor_api/renderers.py","file_name":"renderers.py","file_ext":"py","file_size_in_byte":3115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"327870929","text":"#!/usr/local/bin/python3\n# -*- coding: utf-8 -*- \nimport requests\nimport pymysql\nimport time\nimport sys\nimport re\nimport os\nfrom parsel import Selector\n\n\nclass douyin:\n def __init__(self):\n self.header = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Encoding': '',\n 'Accept-Language': 'zh-CN,zh;q=0.8',\n 'Cache-Control': 'max-age=0',\n 'Connection': 'keep-alive',\n 'Host': 'www.douyin.com',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36',\n }\n self.uids = {'75382141971':'LL', '9330557830':'WD', '101019751610':'HNN'}\n self.db = pymysql.connect(\"localhost\",\"yaccai\",\"go\",\"daily\" )\n self.home = os.environ['HOME']\n\n def __del__(self):\n self.db.close()\n\n def jiexi(self, lists):\n pat = {\n u\"\\ue60d\": 0,\n u\"\\ue603\": 0,\n u\"\\ue616\": 0,\n u\"\\ue60e\": 1,\n u\"\\ue618\": 1,\n u\"\\ue602\": 1,\n u\"\\ue605\": 2,\n u\"\\ue610\": 2,\n u\"\\ue617\": 2,\n u\"\\ue611\": 3,\n u\"\\ue604\": 3,\n u\"\\ue61a\": 3,\n u\"\\ue606\": 4,\n u\"\\ue619\": 4,\n u\"\\ue60c\": 4,\n u\"\\ue60f\": 5,\n u\"\\ue607\": 5,\n u\"\\ue61b\": 5,\n u\"\\ue61f\": 6,\n u\"\\ue612\": 6,\n u\"\\ue608\": 6,\n u\"\\ue61c\": 7,\n u\"\\ue60a\": 7,\n u\"\\ue613\": 7,\n u\"\\ue60b\": 8,\n u\"\\ue61d\": 8,\n u\"\\ue614\": 8,\n u\"\\ue615\": 9,\n u\"\\ue61e\": 9,\n u\"\\ue609\": 9,\n \"w\": \"w\",\n \".\": \".\" }\n _li = list()\n for i in lists:\n if str(i).strip():\n i = i.replace(u'', \"\").strip()\n i = i.replace(u'', \"\").strip()\n i = i.replace(u'', \"\").strip()\n i = pat.get(i, i)\n _li.append(str(i))\n return \"\".join(_li)\n\n\n def fetch(self, url):\n try:\n html = requests.get(url, headers = self.header).text\n except Exception as e:\n print('error:')\n print(e)\n html = None\n return html\n\n\n def spider(self, uid):\n html = self.fetch(\"https://www.douyin.com/share/user/%s\" % uid)\n xbody = Selector(text = html)\n stmp = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n name = self.uids[uid]\n\n douyinID = xbody.xpath(\"//p[@class='shortid']\").extract_first()\n douyinID = re.findall(r'>([\\s\\S]+?)<', douyinID)\n douyinID = self.jiexi(douyinID).replace(u\"抖音ID:\", '').strip()\n # print('ID ', douyinID)\n\n douyinSID = uid\n\n nickname = xbody.xpath(\"//p[@class='nickname']/text()\").extract_first()\n # print('昵称', nickname)\n\n works = xbody.xpath(\"//div[@class='user-tab active tab get-list']/span\").extract_first()\n works = re.findall(r'>([\\s\\S]+?)<', works)\n works = int(self.jiexi(works).strip())\n # print('作品', works)\n\n like = xbody.xpath(\"//div[@class='like-tab tab get-list']/span\").extract_first()\n like = re.findall(r'>([\\s\\S]+?)<', like)\n like = int(self.jiexi(like).strip())\n # print('喜欢', like)\n\n follow = xbody.xpath(\"//span[contains(@class,'focus block')]/span[@class='num']\").extract_first()\n follow = re.findall(r'>([\\s\\S]+?)<', follow)\n follow = int(self.jiexi(follow))\n # print('关注', follow)\n\n fans = xbody.xpath(\"//span[contains(@class,'follower block')]/span[@class='num']\").extract_first()\n fans = re.findall(r'>([\\s\\S]+?)<', fans)\n fans = int(self.jiexi(fans))\n # print('粉丝', fans)\n\n liked = xbody.xpath(\"//span[contains(@class,'liked-num block')]/span[@class='num']\").extract_first()\n liked = re.findall(r'>([\\s\\S]+?)<', liked)\n liked = int(self.jiexi(liked))\n # print('获赞', liked)\n\n sql_search = \"select stmp, name, works, `like`, follow from douyin where name = '%s' order by stmp DESC limit 1\" % name\n cursor = self.db.cursor()\n cursor.execute(sql_search)\n predata = cursor.fetchone() # 前面的数据\n diff = ''\n flag = 0\n if predata is not None:\n if predata[2] != works:\n diff += ('%-6s: %5d ==> %-5d\\n' % ('works', predata[2], works))\n flag |= 0b001\n if predata[3] != like:\n diff += ('%-6s: %5d ==> %-5d\\n' % ('like', predata[3], like))\n flag |= 0b010\n if predata[4] != follow:\n diff += ('%-6s: %5d ==> %-5d\\n' % ('follow', predata[4], follow))\n flag |= 0b100\n if diff != '':\n fname = time.strftime(name + \".%m-%d_%H-%M.txt\", time.localtime())\n fpath = os.path.join(self.home, 'Desktop', fname)\n with open(fpath,'w') as f:\n f.write(diff)\n # os.system('say , do check ' + name)\n sql_insert = \"insert into douyin values(DEFAULT, '%s', '%s', '%s', '%s', '%d', '%d', '%d', '%d', '%d', '%d', '%s')\" % (stmp, name, douyinID, douyinSID, works, like, follow, fans, liked, flag, nickname)\n cursor.execute(sql_insert)\n self.db.commit()\n\n\n def start(self):\n for uid in self.uids:\n self.spider(uid)\n \n\nif __name__ == '__main__':\n douyin().start()","sub_path":"Users/yaccai/iconfig/launch/check/douyin.py","file_name":"douyin.py","file_ext":"py","file_size_in_byte":5595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"24860066","text":"'''\nExecution:\nReal --> dispel4py simple dispel4py_RA.pgm_story.py -d '{\"streamProducer\": [ {\"input\": \"IV.MA9..HHR.START.OTLOC.SAC.20.50.real\"} ] }'\nSynth --> dispel4py simple dispel4py_RA.pgm_story.py -d '{\"streamProducer\": [ {\"input\": \"IV.MA9.HXR.semv.sac.20.50.synt\"} ] }'\n\n\nComparison:\ndispel4py simple dispel4py_RA.pgm_story.py -d '{\"streamProducerReal\": [ {\"input\": \"IV.MA9..HHR.START.OTLOC.SAC.20.50.real\"} ], \"streamProducerSynth\": [ {\"input\": \"IV.MA9.HXR.semv.sac.20.50.synt\"} ] }'\n\nOutput:\nWriteStream3: output_data is {'GroundMotion': {'stream': 'IV.MA9..HHR.START.OTLOC.SAC.20.50.real', 'ty': 'velocity', 'p_norm': 'max', 'pgd': '0.0006945877', 'pgv': '0.0002320527', 'pga': '0.00013708159', 'dmp_spec_acc': '0.00032428280150622804'}}\n\n'''\n\nfrom dispel4py.core import GenericPE\nfrom dispel4py.base import BasePE, IterativePE, ConsumerPE, create_iterative_chain\nfrom dispel4py.workflow_graph import WorkflowGraph\n\nfrom obspy.core.stream import read\nfrom obspy.signal.invsim import corn_freq_2_paz, simulate_seismometer\nfrom obspy.signal import differentiate_and_integrate as di\n\nimport math\nimport numpy as np\nimport os\nimport json\nfrom collections import defaultdict\n\ndef calculate_norm(stream):\n station = stream[0].stats.station\n channels = set()\n for tr in stream:\n if station == tr.stats.station:\n channels.add(tr.stats.channel[-1])\n else:\n return None\n\n data_mean = None\n data_max = None\n if channels < set(['R','T']) or channels < set(['N','E']):\n\n if len(stream) == 1:\n return stream[0].data.copy(), stream[0].data.copy(), None\n\n for tr in stream:\n d = tr.data.copy()\n if data_mean is None:\n data_mean = np.square(d)\n data_max = np.abs(d)\n else:\n data_mean = data + np.square(d)\n data_max = data + np.abs(d)\n\n data_mean = np.sqrt(data)\n data_max = np.max(data)\n\n return data_mean, data_max, d\n\ndef calculate_pgm(data, ty, delta):\n pgm = max(abs(data))\n if ty == 'velocity':\n pgv = pgm\n int_data = di.integrate_cumtrapz(data, delta)\n pgd = max(abs(int_data))\n grad_data = np.gradient(data, delta)\n pga = max(abs(grad_data))\n elif ty == 'displacement':\n pgd = pgm\n grad_data = np.gradient(data, delta)\n pgv = max(abs(grad_data))\n grad2_data = np.gradient(grad_data, delta)\n pga = max(abs(grad2_data))\n elif ty == 'acceleration':\n pga = pgm\n int_data = di.integrate_cumtrapz(data, delta)\n pgv = max(abs(int_data))\n int2_data = di.integrate_cumtrapz(int_data, delta)\n pgd = max(abs(int2_data))\n return pgd, pgv, pga\n\ndef calculate_damped_spectral_acc(data,delta,freq,damp,ty):\n\n samp_rate = 1.0 / delta\n t = freq * 1.0\n d = damp\n omega = (2 * math.pi * t) ** 2\n\n paz_sa = corn_freq_2_paz(t, damp=d)\n paz_sa['sensitivity'] = omega\n paz_sa['zeros'] = []\n\n if ty == 'displacement':\n data = np.gradient(data, delta)\n data = np.gradient(data, delta)\n elif ty == 'velocity':\n data = np.gradient(data, delta)\n\n data = simulate_seismometer(data, samp_rate, paz_remove=None,\n paz_simulate=paz_sa, taper=True,\n simulate_sensitivity=True, taper_fraction=0.05)\n dmp_spec_acc = max(abs(data))\n\n return dmp_spec_acc\n\n\nclass StreamProducer(IterativePE):\n\n def __init__(self, label):\n IterativePE.__init__(self)\n self.label = label\n\n def _process(self, input):\n filename = '{}/{}'.format(os.environ['STAGED_DATA'], input)\n self.write('output', [read(filename), self.label])\n\n\nclass NormPE(GenericPE):\n def __init__(self):\n GenericPE.__init__(self)\n self._add_input(\"input\")\n self._add_output(\"output_mean\")\n self._add_output(\"output_max\")\n\n def _process(self, data):\n stream, filename = data['input']\n data_mean, data_max, d = calculate_norm(stream)\n self.write('output_mean', [stream, filename, data_mean, 'mean'])\n self.write('output_max', [stream, filename, data_max, 'max'])\n\n\nclass PeakGroundMotion(IterativePE):\n def __init__(self,ty,freq=(0.3, 1.0, 3.0),damp=0.1):\n IterativePE.__init__(self)\n self.ty=ty\n self.frequencies = freq\n self.damp = damp\n\n def _process(self, s_data):\n stream, filename, data, p_norm = s_data\n delta = stream[0].stats.delta\n pgd, pgv, pga = calculate_pgm(data, self.ty, delta)\n dmp_spec_acc = {}\n for freq in self.frequencies:\n dmp = calculate_damped_spectral_acc(data, delta, freq, self.damp, self.ty)\n dmp_spec_acc['PSA_{}Hz'.format(freq)] = dmp.item()\n\n results = {\n 'PGD': pgd.item(),\n 'PGV': pgv.item(),\n 'PGA': pga.item(),\n 'p_norm': p_norm\n }\n results.update(dmp_spec_acc)\n self.write('output', [\n stream[0].stats.station,\n filename, stream, self.ty, results]\n )\n\n\nclass Match(GenericPE):\n def __init__(self):\n GenericPE.__init__(self)\n self._add_input('input', grouping=[0])\n self._add_output('output')\n self.store = defaultdict(lambda: {})\n\n def _process(self, data):\n station, label,stream, ty, pgm = data['input']\n p_norm = pgm['p_norm']\n self.store[(station, p_norm)][label] = stream, ty, pgm\n if len(self.store[(station, p_norm)]) >= 2:\n print('output: {} {}'.format(station, p_norm))\n self.write('output', [station, p_norm, self.store[(station, p_norm)]])\n del self.store[station, p_norm]\n\n\ndef comp(real_param, synt_param):\n result_diff = real_param - synt_param\n result_rel_diff = (real_param - synt_param)/real_param\n return result_diff, result_rel_diff\n\n\nclass WriteGeoJSON(ConsumerPE):\n def __init__(self):\n ConsumerPE.__init__(self)\n\n def _process(self, data):\n station, p_norm, matching_data = data\n\n difference = { }\n relative_difference = {}\n stream_r, ty_r, pgm_r = matching_data['real']\n stream_s, ty_s, pgm_s = matching_data['synth']\n try:\n sac = stream_r[0].stats.sac\n coordinates = [sac.stla.item(), sac.stlo.item()]\n except:\n coordinates = []\n for param in pgm_r:\n if param == 'p_norm':\n continue\n diff, rel_diff = comp(pgm_r[param], pgm_s[param])\n difference[param] = diff\n relative_difference[param] = rel_diff\n\n output_dir = os.environ['OUTPUT']\n if not os.path.exists(output_dir):\n try:\n os.makedirs(output_dir)\n except:\n pass\n output_data={\n \"type\": \"Feature\",\n \"properties\": {\n \"station\": station,\n \"data\": pgm_r,\n \"synt\": pgm_s,\n \"difference\": difference,\n \"relative_difference\": relative_difference,\n \"geometry\": {\n \"type\": \"Point\",\n \"coordinates\": coordinates\n }\n }\n }\n # self.log(\"output_data is %s\" % json.dumps(output_data))\n filename = \"{}/{}_{}.json\".format(output_dir, station, p_norm)\n with open(filename, 'w') as outfile:\n json.dump(output_data, outfile)\n\n\nstreamProducerReal=StreamProducer('real')\nstreamProducerReal.name=\"streamProducerReal\"\nstreamProducerSynth=StreamProducer('synth')\nstreamProducerSynth.name='streamProducerSynth'\nnorm=NormPE()\npgm_mean=PeakGroundMotion('velocity')\npgm_max=PeakGroundMotion('velocity')\nmatch = Match()\nwrite_stream = WriteGeoJSON()\n\n\ngraph = WorkflowGraph()\ngraph.connect(streamProducerReal, 'output', norm,'input')\ngraph.connect(streamProducerSynth, 'output', norm,'input')\ngraph.connect(norm, 'output_mean', pgm_mean,'input')\ngraph.connect(norm, 'output_max', pgm_max,'input')\ngraph.connect(pgm_max, 'output', match, 'input')\ngraph.connect(pgm_mean, 'output', match, 'input')\ngraph.connect(match,'output',write_stream,'input')\n","sub_path":"processing_elements/CWL_total_staged/dispel4py_RA.pgm_story.py","file_name":"dispel4py_RA.pgm_story.py","file_ext":"py","file_size_in_byte":8198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"109790470","text":"import pygame\npygame.init()\n#创建窗口\nscreen=pygame.display.set_mode((480,700))\n#加载背景图像\nbg=pygame.image.load(\"./images/background.png\")\nscreen.blit(bg,(0,0))\npygame.display.update()\n#加载飞机图像\nplane=pygame.image.load(\"./images/me1.png\")\nscreen.blit(plane,(185,500))\npygame.display.update()\n#创建时钟对象\nclock=pygame.time.Clock()\n#定义rect记录飞机初始位置\nplane_rect=pygame.Rect(185,500,102,126)\n#游戏循环\nwhile True:\n #游戏循环内部执行频率\n clock.tick(60)\n #修改飞机位置\n plane_rect.y -= 1\n if plane_rect.y+plane_rect.height<=0:\n plane_rect.y=700\n #绘制修改后图像\n screen.blit(bg,(0,0))\n screen.blit(plane,plane_rect)\n #更新显示\n pygame.display.update()\npygame.quit()","sub_path":"text/planegame_04_飞机循环.py","file_name":"planegame_04_飞机循环.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"575236298","text":"#!/usr/bin/env python3\n\nclass Solution:\n def uniqueOccurrences(self, arr) -> bool:\n from collections import Counter\n v = Counter(arr).values()\n return len(v) == len(set(v))\n \n\ns = Solution()\ndef test(arr, expected):\n assert s.uniqueOccurrences(arr) == expected\n\ntest([1,2,2,1,1,3], True)\ntest([1], True)\ntest([1,2], False)\ntest([-3,0,1,-3,1,1,1,-3,10,0], True)","sub_path":"1207_unique_number_of_occurrences.py","file_name":"1207_unique_number_of_occurrences.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"327630202","text":"from tree import TypeTree, volpe_assert, get_obj_key_value\nfrom volpe_types import is_int, is_flt, is_char\n\n\ndef math(self, tree: TypeTree):\n values = self.visit_children(tree)\n if is_int(tree.return_type):\n return getattr(self, tree.data + \"_int\")(values)\n if is_char(tree.return_type):\n # Use unsigned division and modulus for chars\n if tree.data in [\"div\", \"mod\"]: \n return getattr(self, tree.data + \"_uint\")(values)\n return getattr(self, tree.data + \"_int\")(values)\n if is_flt(tree.return_type):\n return getattr(self, tree.data + \"_flt\")(values)\n assert False, \"can't happen\"\n\n\ndef comp(self, tree: TypeTree):\n values = self.visit_children(tree)\n if is_int(tree.children[0].return_type) or is_char(tree.children[0].return_type):\n return getattr(self, tree.data + \"_int\")(values)\n if is_flt(tree.children[0].return_type):\n return getattr(self, tree.data + \"_flt\")(values)\n assert False, \"can't happen\"\n\n\ndef assign(self, tree: TypeTree, value):\n if tree.data == \"object\":\n for i, child in enumerate(tree.children):\n key, attribute = get_obj_key_value(child, i)\n index = list(value.type.type_dict.keys()).index(key)\n assign(self, attribute, self.builder.extract_value(value, index))\n\n elif tree.data == \"attribute\":\n obj = tree.children[0].return_type\n index = list(obj.type_dict.keys()).index(tree.children[1])\n value = self.builder.insert_value(self.visit(tree.children[0]), value, index)\n # update scope\n assign(self, tree.children[0], value)\n\n elif tree.data == \"array\":\n for i, child in enumerate(tree.children):\n assign(self, child, self.builder.extract_element(value, i))\n\n elif tree.data == \"array_index\":\n array, index = self.visit_children(tree)\n new_array = self.builder.insert_element(self.visit(tree.children[0]), value, index)\n assign(self, tree.children[0], new_array)\n\n else:\n volpe_assert(tree.data == \"symbol\", f\"cannot assign to {tree.data}\", tree)\n name = tree.children[0].value\n self.local_scope[name] = value\n","sub_path":"volpe/builder_utils.py","file_name":"builder_utils.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"100647855","text":"# -*- coding: utf-8 -*-\n\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.datasets import load_iris\nfrom sklearn_porter import Porter\n\n\niris_data = load_iris()\nX = iris_data.data\ny = iris_data.target\n\nclf = KNeighborsClassifier(algorithm='brute',\n n_neighbors=3,\n weights='uniform')\nclf.fit(X, y)\n\nporter = Porter(clf, language='js')\noutput = porter.export(export_data=True)\nprint(output)\n\n\"\"\"\nif (typeof XMLHttpRequest === 'undefined') {\n var XMLHttpRequest = require(\"xmlhttprequest\").XMLHttpRequest;\n}\n\nvar KNeighborsClassifier = function(jsonFile) {\n this.data = undefined;\n\n var Neighbor = function(y, dist) {\n this.y = y;\n this.dist = dist;\n };\n\n var promise = new Promise(function(resolve, reject) {\n var httpRequest = new XMLHttpRequest();\n httpRequest.onreadystatechange = function() {\n if (httpRequest.readyState === 4) {\n if (httpRequest.status === 200) {\n resolve(JSON.parse(httpRequest.responseText));\n } else {\n reject(new Error(httpRequest.statusText));\n }\n }\n };\n httpRequest.open('GET', jsonFile, true);\n httpRequest.send();\n });\n\n var compute = function(temp, cand, q) {\n var dist = 0.,\n diff;\n for (var i = 0, l = temp.length; i < l; i++) {\n \t diff = Math.abs(temp[i] - cand[i]);\n \t if (q==1) {\n \t dist += diff;\n \t } else if (q==2) {\n \t dist += diff*diff;\n \t } else if (q==Number.POSITIVE_INFINITY) {\n \t if (diff > dist) {\n \t dist = diff;\n \t }\n \t } else {\n \t dist += Math.pow(diff, q);\n }\n }\n if (q==1 || q==Number.POSITIVE_INFINITY) {\n return dist;\n } else if (q==2) {\n return Math.sqrt(dist);\n } else {\n return Math.pow(dist, 1. / q);\n }\n };\n\n this.predict = function(features) {\n return new Promise(function(resolve, reject) {\n promise.then(function(data) {\n if (typeof this.data === 'undefined') {\n this.data = data;\n this.nTemplates = this.data.X.length;\n }\n var classIdx = 0, i, dist;\n if (this.data.nNeighbors == 1) {\n var minDist = Number.POSITIVE_INFINITY;\n for (i = 0; i < this.data.nTemplates; i++) {\n dist = compute(this.data.X[i], features, this.data.power);\n if (dist <= minDist) {\n minDist = dist;\n classIdx = this.data.y[i];\n }\n }\n } else {\n var classes = new Array(this.data.nClasses).fill(0);\n var dists = [];\n for (i = 0; i < this.nTemplates; i++) {\n dist = compute(this.data.X[i], features, this.data.power);\n dists.push(new Neighbor(this.data.y[i], dist));\n }\n dists.sort(function compare(n1, n2) {\n return (n1.dist < n2.dist) ? -1 : 1;\n });\n for (i = 0; i < this.data.kNeighbors; i++) {\n classes[dists[i].y]++;\n }\n for (i = 0; i < this.data.nClasses; i++) {\n classIdx = classes[i] > classes[classIdx] ? i : classIdx;\n }\n }\n resolve(classIdx);\n }, function(error) {\n reject(error);\n });\n });\n };\n\n};\n\nif (typeof process !== 'undefined' && typeof process.argv !== 'undefined') {\n if (process.argv[2].trim().endsWith('.json')) {\n\n // Features:\n var features = process.argv.slice(3);\n\n // Parameters:\n var json = process.argv[2];\n\n // Estimator:\n var clf = new KNeighborsClassifier(json);\n\n // Prediction:\n clf.predict(features).then(function(prediction) {\n console.log(prediction);\n });\n\n }\n}\n\"\"\"\n","sub_path":"examples/estimator/classifier/KNeighborsClassifier/js/basics_imported.py","file_name":"basics_imported.py","file_ext":"py","file_size_in_byte":4254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"614995519","text":"# -*- coding: utf-8 -*-\n# Soohwan Kim @sooftware\n# This source code is licensed under the Apache 2.0 License license found in the\n# LICENSE file in the root directory of this source tree\n\nimport torch\nimport numpy as np\nfrom tacotron2.model.tacotron2 import Tacotron2\nfrom .args import DefaultArgument\n\nbatch_size = 3\nseq_length = 3\n\ninputs = torch.LongTensor(np.arange(batch_size * seq_length).reshape(batch_size, seq_length))\ninput_lengths = torch.LongTensor([3, 3, 2])\ntargets = torch.FloatTensor(batch_size, 100, 80).uniform_(-0.1, 0.1)\n\nargs = DefaultArgument()\nmodel = Tacotron2(args)\noutput = model(inputs, targets, input_lengths)\n\nprint(model)\nprint(output)\n","sub_path":"test/test_tacotron2.py","file_name":"test_tacotron2.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"133077646","text":"import cv2\n\ndef find(img,x,y):\n\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\n dst = cv2.inRange(gray,x,y)\n\n cv2.imshow(\"2\",dst)\n\n cnt = cv2.findContours(dst,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2]\n\n c = max(cnt,key = cv2.contourArea)\n\n ((x,y),radius) = cv2.minEnclosingCircle(c)\n\n M = cv2.moments(c)\n print(M)\n center = (int(M[\"m10\"]/(0.01+M[\"m00\"])), int(M[\"m01\"]/(0.01+M[\"m00\"]))) \n ptx=int(M[\"m10\"]/(0.01+M[\"m00\"]))\n pty=int(M[\"m01\"]/(0.01+M[\"m00\"]))\n\n cv2.circle(img,center,20,(255,0,0),3)\n\n cv2.imshow(\"after\",img)\n\n cv2.waitKey(0)\n\n\n\nimg = cv2.imread(\"t.jpeg\")\n\nfind(img,200,255)\n","sub_path":"Python/task1-3/c3/Find_C.py","file_name":"Find_C.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"15899671","text":"import numpy as np\nfrom scipy.sparse import csr_matrix\nimport sys\nfrom sklearn.datasets import load_svmlight_file\nimport random\nfrom datetime import datetime\nimport math\n\ndef main():\n\n\t# Get training file name from the command line\n\ttraindatafile = sys.argv[1];\n\t# For how many iterations do we wish to execute SCD?\n\tn_iter = int(sys.argv[2]);\n\t# After how many iterations do we want to timestamp?\n\tspacing = int(sys.argv[3]);\n\t\n\t# The training file is in libSVM format\n\ttr_data = load_svmlight_file(traindatafile);\n\n\tXtr = tr_data[0]; # Training features in sparse format\n\tYtr = tr_data[1]; # Training labels\n\t\n\t# We have n data points each in d-dimensions\n\tn, d = Xtr.get_shape();\n\t\n\t# The labels are named 1 and 2 in the data set. Convert them to our standard -1 and 1 labels\n\tYtr = 2*(Ytr - 1.5);\n\tYtr = Ytr.astype(int);\n\t#convert Ytr into csr matrix for calculatioins\n\tYtr=csr_matrix(Ytr).T;\n\t\n\t# Optional: densify the features matrix.\n\t# Warning: will slow down computations\n\tX = Xtr.toarray();\n\t\n\t# Initialize model\n\t# For dual SCD, you will need to maintain d_alpha and w\n\t# Note: if you have densified the Xt matrix then you can initialize w as a NumPy array\n\tw = csr_matrix((1, d));\n\td_alpha = np.zeros((n,));\n\t\n\t# We will take a timestamp after every \"spacing\" iterations\n\ttime_elapsed = np.zeros(math.ceil(n_iter/spacing));\n\ttick_vals = np.zeros(math.ceil(n_iter/spacing));\n\tobj_val = np.zeros(math.ceil(n_iter/spacing));\n\t\n\ttick = 0;\n\t\n\tttot = 0.0;\n\tt_start = datetime.now();\n\t\n\tfor t in range(n_iter):\t\t\n\t\t### Doing dual SCD ###\n\t\t\n\t\t# Choose a random coordinate from 0 to n-1\n\t\ti_rand = random.randint(0,n-1);\n\t\t\n\t\t# Store the old and compute the new value of alpha along that coordinate\n\t\td_alpha_old = d_alpha[i_rand];\n\t\tQ=X[i_rand].T.dot(X[i_rand]);\n\t\t#print(Xtr[i_rand].shape)\n\t\t#print(w.shape)\n\t\tA=w*Xtr[i_rand].T\n\t\tDelta=A*(Ytr[i_rand]).toarray()-1;\n\t\td_alpha[i_rand] = min(max(d_alpha_old-Delta/Q,0),1);\n\t\t\n\t\t# Update the model - takes only O(d) time!\n\t\tw = w + (d_alpha[i_rand] - d_alpha_old)*Ytr[i_rand]*Xtr.getrow(i_rand);\n\t\t\n\t\t# Take a snapshot after every few iterations\n\t\t# Take snapshots after every spacing = 5000 or so SCD iterations since they are fast\n\t\t# if t%spacing == 0:\n\t\t# \t# Stop the timer - we want to take a snapshot\n\t\t# \tt_now = datetime.now();\n\t\t# \tdelta = t_now - t_start;\n\t\t# \ttime_elapsed[tick] = ttot + delta.total_seconds();\n\t\t# \tttot = time_elapsed[tick];\n\t\t# \ttick_vals[tick] = tick;\n\t\t# \tip=Xtr.dot(w.T);\n\t\t# \thinge=1-(Ytr.multiply(ip)).toarray();\n\t\t# \tgrmlt=np.sign(hinge);\n\t\t# \thinge=(hinge+hinge*grmlt)/2;\n\t\t# \tobj_val[tick] = 0.5* w.dot(w.T) + hinge.sum(axis=0); # Calculate the objective value f(w) for the current model w^t or the current averaged model \\bar{w}^t\n\t\t# \tprint(delta.total_seconds(),obj_val[tick]);\n\t\t# \ttick = tick+1;\n\t\t# \t# Start the timer again - training time!\n\t\t# \tt_start = datetime.now();\n\t\t\t\n\tw_final = w.toarray();\n\tnp.save(\"model_SCD.npy\", w_final);\n\t# np.save(\"obj_val_SCD.npy\",obj_val);\n\t# np.save(\"time_elapsed_SCD.npy\",time_elapsed);\n\t\t\nif __name__ == '__main__':\n main()","sub_path":"CS771/Assignments/assn2/solver_SCD.py","file_name":"solver_SCD.py","file_ext":"py","file_size_in_byte":3060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"242430318","text":"class Solution(object):\n def subarraySum(self, nums, k):\n count = 0\n sums = 0\n d = dict()\n d[0] = 1\n\n for i in range(len(nums)):\n sums += nums[i]\n count += d.get(sums - k, 0)\n d[sums] = d.get(sums, 0) + 1\n\n return (count)\nif __name__ == '__main__':\n f = Solution()\n nums = [-1,-1,1]\n k = 0\n print(f.subarraySum(nums,k))","sub_path":"FB/subarrayEqK.py","file_name":"subarrayEqK.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"591020224","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport pandas as pd\n\n# create a synthetic dataset and save to data.csv\nif __name__ == '__main__':\n # create a linear series y= 10 + 0.5 * x plus random gaussian noise\n n = 100\n x = np.random.uniform(0, 5, n)\n y = 10 + 0.5 * x + np.random.normal(0, 0.2, n)\n\n # make outliers\n mean = np.mean(y)\n sd = np.std(y)\n cutoff = [2.4, 2.9, 3.4]\n for i in range(len(cutoff)):\n y[i * 2] = mean + cutoff[i] * sd\n y[i * 2 + 1] = mean - cutoff[i] * sd\n\n # save file\n df = pd.DataFrame(np.column_stack((x, y)), columns=['x', 'y'])\n df.to_csv('data.csv', index=False)\n","sub_path":"example/simple/gen_data.py","file_name":"gen_data.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"304470490","text":"#! C:/Bat/python.bat -3.10\nimport sys\nimport os\n\n\ndef getdirs(repertoire):\n dirs = []\n files = []\n try:\n for fichier in os.scandir(repertoire):\n if fichier.is_dir():\n if os.path.exists(os.path.join(repertoire, fichier.name, \"__init__.py\")):\n dirs.append(fichier.name)\n\n elif fichier.name.endswith(\".py\"):\n files.append(fichier.name)\n\n print(\"-\", repertoire, \"(\", len(dirs) + len(files), \")\")\n for rep in sorted(dirs):\n print(f\" /{rep}\")\n for file in sorted(files):\n print(f\" > {file}\")\n print()\n\n except NotADirectoryError:\n pass\n\n\nif True:\n print(\"Liste des modules :\")\n for module in sorted(sys.modules):\n if module[0] != \"_\":\n print(\"-\", f\"{module:20} :\", sys.modules[module])\n\nif sys.version_info[:2] < (3, 10):\n print(\"Veuillez vérifier que vous utilisez la version 3.10 de python\")\n exit(1)\n\nprint(\"Current diretory:\", os.getcwd())\nprint(\"Version de Python:\", sys.version)\n\nprint(\"\\nRecherche des modules :\")\nfor chemin in sys.path:\n getdirs(chemin)\n","sub_path":"environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"172832071","text":"numList = [9,8,7,6,5,4,3,2,1]\n\ndef quic_sort(L, left, right):\n if left <= right:\n key = L[left]\n i = left\n j = right\n while i < j:\n while i < j and key <= L[j]:\n j -= 1\n L[i] = L[j]\n while i < j and L[i] <= key:\n i += 1\n L[j] = L[i]\n L[i] = key\n quic_sort(L, left, i - 1)\n quic_sort(L, i + 1, right)\n\nif __name__ == '__main__':\n quic_sort(numList, 0, len(numList) - 1)\n print(numList)\n\n","sub_path":"quic_sort.py","file_name":"quic_sort.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"174423476","text":"#!/usr/bin/env python3\n\nfrom datetime import date, datetime\n\nfrom telethon import TelegramClient\nfrom telethon.tl.functions.channels import GetParticipantsRequest\nfrom telethon.tl.functions.contacts import ResolveUsernameRequest\nfrom telethon.tl.types import ChannelParticipantsSearch\n\nfrom IryoAirdrop.groupUsers import config\n\n\nclass TelegramGroupChecker():\n client = None\n\n def json_serial(obj):\n \"\"\"JSON serializer for objects not serializable by default json code\"\"\"\n\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n raise TypeError(\"Type %s not serializable\" % type(obj))\n\n\n LIMIT = 100\n def connect(self):\n self.client = TelegramClient(config.phone, config.api_id, config.api_hash)\n self.client.connect()\n\n print (\"Is user connected:\" + \"YES\" if self.client.is_user_authorized() else \" NO\")\n\n if not self.client.is_user_authorized():\n self.client.send_code_request(config.phone)\n self.client.sign_in(code=int(input('Enter code: ')))\n\n #client.sign_in(\"\"\"phone=config.phone\"\"\")\n #client.start(phone=config.phone)\n #me = client.sign_in(code=int(input('Enter code: ')))\n\n def isUserJoined(self, userID, channelName):\n try:\n LIMIT = 200\n channel = self.client(ResolveUsernameRequest(channelName)).chats[0]\n\n offset = 0\n output = []\n while True:\n participants = self.client(GetParticipantsRequest(\n channel, ChannelParticipantsSearch(''), offset, LIMIT, hash=0))\n if not participants.users:\n break\n\n offset += len(participants.users)\n\n for user in participants.users:\n if userID == user.id:\n return True\n return False\n except Exception as e:\n return 0","sub_path":"IryoAirdrop-master/groupUsers/fetcher.py","file_name":"fetcher.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"436900554","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/unimr/red5/protectedvod/utils.py\n# Compiled at: 2009-08-19 12:31:49\nfrom zope.interface import implements\nfrom Acquisition import aq_inner\nfrom Products.Five.browser import BrowserView\nfrom Products.PythonScripts.standard import url_quote_plus\nfrom Products.CMFCore.utils import getToolByName\nfrom DateTime import DateTime\nfrom plone.memoize.view import memoize\nfrom interfaces import IRed5ProtectedVodTool\nimport logging, hmac\nlogger = logging.getLogger('unimr.red5.protectedvod')\n\nclass Red5ProtectedVodTool(BrowserView):\n \"\"\"A view that implements a hmac algorithm for url signatures\n in interaction with a red5 streaming server\n \"\"\"\n __module__ = __name__\n implements(IRed5ProtectedVodTool)\n\n def netConnectionUrl(self, fieldname='file'):\n \"\"\" returns the netConnectionUrl including path, signature and expire date\"\"\"\n data = self._signature_data(fieldname=fieldname)\n return '%(server_url)s/%(path)s/%(signature)s/%(expires)s' % data\n\n def clip(self, fieldname='file'):\n \"\"\" return clip's name \"\"\"\n data = self._signature_data(fieldname=fieldname)\n return '%(filename)s' % data\n\n @memoize\n def _signature_data(self, fieldname='file'):\n context = aq_inner(self.context)\n request = self.request\n properties_tool = getToolByName(context, 'portal_properties')\n hmac_properties = getattr(properties_tool, 'red5_protectedvod_properties', None)\n red5_server_url = hmac_properties.getProperty('red5_server_url')\n red5_server_url = red5_server_url.rstrip('/')\n secret_phrase = hmac_properties.getProperty('secret')\n try:\n ttl = int(hmac_properties.getProperty('ttl'))\n except ValueError:\n ttl = 60\n\n clientip = request.get('HTTP_X_FORWARDED_FOR', None)\n if not clientip:\n clientip = request.get('REMOTE_ADDR', None)\n expires = '%08x' % (DateTime().timeTime() + ttl)\n (path, filename) = self._fss_info(fieldname)\n sign_path = '/%s/' % (path,)\n signature = hmac_hexdigest(secret_phrase, [sign_path, filename, clientip, expires])\n data = {'server_url': red5_server_url, 'sign_path': sign_path, 'path': path, 'filename': filename, 'expires': expires, 'clientip': clientip, 'signature': url_quote_plus(signature)}\n logger.debug(data)\n return data\n\n def _fss_info(self, fieldname='file'):\n context = aq_inner(self.context)\n field = context.getField(fieldname)\n storage = field.storage\n try:\n info = storage.getFSSInfo(fieldname, context)\n strategy = storage.getStorageStrategy(fieldname, context)\n props = storage.getStorageStrategyProperties(fieldname, context, info)\n except AttributeError:\n logger.error('cannot retrieve fss properties. fss installed?')\n return\n\n valueDirectoryPath = strategy.getValueDirectoryPath(**props)\n valueFilename = strategy.getValueFilename(**props)\n length = len(strategy.storage_path.split('/'))\n path = ('/').join(valueDirectoryPath.split('/')[length - 1:]).strip('/')\n return (\n path, valueFilename)\n\n\ndef hmac_hexdigest(secret, update_list):\n \"\"\" returns a hex encoded digest of signature \"\"\"\n mac = hmac.new(secret)\n for s in update_list:\n mac.update(s)\n\n return mac.hexdigest()","sub_path":"pycfiles/unimr.red5.protectedvod-0.1rc1_r96721-py2.4/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"198426875","text":"from mstrio.utils.helper import response_handler\n\n\ndef projects(connection, error_msg=None, verbose=False):\n \"\"\"\n Args:\n connection: MicroStrategy REST API connection object\n verbose (bool, optional): Verbosity of server responses; defaults to False.\n Returns:\n Complete HTTP response object\n \"\"\"\n\n response = connection.session.get(url=connection.base_url + '/api/projects',\n headers={'X-MSTR-ProjectID': None})\n if verbose:\n print(response.url)\n if not response.ok:\n if error_msg is None:\n error_msg = \"Error connecting to project. Check project name and try again.\"\n response_handler(response, error_msg)\n return response\n","sub_path":"mstrio/api/projects.py","file_name":"projects.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"343122455","text":"# coding=gbk\nimport json\nimport math\nimport os\nimport sys\nimport time\nimport time\nimport requests\nimport multiprocessing\nimport execjs\nfrom urllib.parse import urlencode\nimport re\nimport shutil\nimport js2xml\nfrom lxml import etree\nclass douyu:\n def __init__(self,query):\n self.query=query\n\n pass\n\n def loder(self,url):\n \"\"\"直接请求ts文件的url然后在写入到本地\"\"\"\n html = requests.get(url).content\n l=url.find(\".ts?\")\n\n i = url[l-7:l]\n print(i)\n folder = \"F:\\movies\\%s\" % self.query[\"owner\"]\n if os.path.isdir(folder) is False:\n os.system(\"md %s\" % folder)\n with open(r\"%s\\%07s.ts\" % (folder, i), \"wb\") as f:\n f.write(html)\n\n def ts_to_mp4(self):\n print('ts文件正在进行转录mp4......')\n folder=\"F:\\movies\\%s\" % self.query[\"owner\"]\n\n str = \"cd /d %s\\\\ && copy /b *.ts %s.mp4\"%(folder,self.query[\"title\"]) # copy /b 命令\n print(str)\n os.system(str)\n filename = folder+\"\\/\"+ self.query[\"title\"] + '.mp4'\n if os.path.isfile(filename):\n te = \"del %s\\\\*.ts\"%folder\n print(te)\n os.system(te)\n print('转换完成,祝你观影愉快')\n\n # shutil.rmtree(\"test\")\n\n def get_js(self):\n f = open(\".\\key.js\", 'r', encoding='UTF-8')\n line = f.readline()\n htmlstr = ''\n while line:\n htmlstr = htmlstr + line\n line = f.readline()\n return htmlstr\n\n\n def run(self):\n dom = requests.get(self.query[\"url\"])\n dom = etree.HTML(dom.content)\n\n jstext=\"var CryptoJS = require('crypto-js');\"+dom.xpath(\"//script\")[2].text\n ub98484234 = execjs.compile(jstext)\n data = ub98484234.call('ub98484234', self.query[\"vid\"], \"10000000000000000000000000001501\", int(time.time()))\n data = data + \"&vid=\" + self.query[\"hashId\"]\n headers = {\"Content-Type\": \"application/x-www-form-urlencoded\"}\n print(data)\n res = requests.post(\"https://v.douyu.com/api/stream/getStreamUrl\", data=data, headers=headers)\n print(res.json())\n list = res.json()[\"data\"][\"thumb_video\"][\"high\"][\"url\"]\n tss = requests.get(list)\n\n url = list[:list.find(\"playlist.m3u8\")]\n lis = [url + x for x in tss.text.split(\"\\n\") if x != \"\" and x[0] != \"#\"]\n pool = multiprocessing.Pool(processes=3)\n pool.map(self.loder, lis)\n pool.close()\n pool.join()\n self.ts_to_mp4()\n\nif __name__ == \"__main__\":\n query = {\n \"kw\": 7302297,\n \"page\": 1,\n \"pageSize\": 20,\n \"filterType\": 0,\n \"tabType\": 1\n\n }\n res = requests.get(\"https://www.douyu.com/japi/search/api/searchVideo?\" + urlencode(query))\n total = res.json()[\"data\"][\"total\"]\n lists = []\n for i in range(1, math.ceil(total / 20) + 1):\n query[\"page\"] = i\n print(\"current \"+str(i))\n res = requests.get(\"https://www.douyu.com/japi/search/api/searchVideo?\" + urlencode(query))\n videos = res.json()[\"data\"][\"relateVideo\"]\n lists.append(videos)\n print(len(videos))\n for v in range(len(videos)):\n temp = videos[v]\n temp[\"owner\"] = \"V\" + temp[\"owner\"]\n temp[\"title\"] = temp[\"title\"].replace(\" \", \".\").replace(\":\", \".\")+\"-\"+temp[\"hashId\"]\n\n url = temp[\"url\"]\n folder = \"F:\\movies\\%s\" % temp[\"owner\"]\n filename = folder + \"\\/\" + temp[\"title\"] + '.mp4'\n print(filename)\n if os.path.isfile(filename):\n print(temp[\"title\"] + '.mp4' + \"已存在\")\n continue\n dou = douyu(temp)\n dou.run()\n f=open(\"F:\\movies\\catalogue.json\", 'w', encoding='utf-8')\n json.dump(lists, f)\n\n\n\n\n\n","sub_path":"hack/include/douyu.py","file_name":"douyu.py","file_ext":"py","file_size_in_byte":3798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"507404184","text":"from google.appengine.ext import db\nfrom google.appengine.tools import bulkloader\n\nfrom treasure_raider.models import Experience_limits\n\nclass Experience_limits_loader(bulkloader.Loader):\n def __init__(self):\n bulkloader.Loader.__init__(self, 'Experience_limits',\n [('key_name', str),\n ('group', str),\n ('experience_level', int),\n ('min_experience_points', int),\n ('next_experience_points', int),\n ('air_capacity_reward', int),\n ('cash_reward', int),\n ('coins_reward', int),\n ])\n\nloaders = [Experience_limits_loader]\n\n#newline characters may need to be converted with tr command\n#tr '\\r' '\\n' < macfile.txt > unixfile.txt","sub_path":"app/apps/treasure_raider/loaders/experience_limit_loader.py","file_name":"experience_limit_loader.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"311770491","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 12 12:04:28 2018\n\n@author: Luc\n\"\"\"\n\nimport numpy as np\nfrom scipy.stats import vonmises\nimport matplotlib.pyplot as plt\n\n\n# Create parameter space and initialize prior and likelihood\nclass PSI_RiF:\n\n def __init__(self, rods, frames, kappa_oto, kappa_ver, kappa_hor, tau):\n\n self.rods = rods\n self.frames = frames\n self.kappa_oto = kappa_oto\n self.kappa_ver = kappa_ver\n self.kappa_hor = kappa_hor\n self.tau = tau\n\n # dimensions of the 2D stimulus space\n self.rod_num = len(self.rods);\n self.frame_num = len(self.frames);\n\n # dimensions of the parameter space\n kappa_oto_num = len(self.kappa_oto);\n kappa_ver_num = len(self.kappa_ver);\n kappa_hor_num = len(self.kappa_hor);\n tau_num = len(self.tau);\n \n # the rods I need for the cumulative density function\n theta_rod=np.linspace(-np.pi,np.pi,10000)\n \n # allocate memory for the lookup table (P)\n P = np.zeros([kappa_oto_num,kappa_ver_num,kappa_hor_num,tau_num,self.rod_num,self.frame_num])\n \n self.kappa_oto_mtx=np.zeros([kappa_oto_num,kappa_ver_num,kappa_hor_num,tau_num])\n self.kappa_ver_mtx=np.zeros([kappa_oto_num,kappa_ver_num,kappa_hor_num,tau_num])\n self.kappa_hor_mtx=np.zeros([kappa_oto_num,kappa_ver_num,kappa_hor_num,tau_num])\n self.tau_mtx=np.zeros([kappa_oto_num,kappa_ver_num,kappa_hor_num,tau_num])\n \n \n for k in range(0,kappa_oto_num):\n for l in range(0,kappa_ver_num):\n for m in range(0,kappa_hor_num):\n for n in range(0,tau_num):\n kappa_oto2 = kappa_oto[k]\n kappa_ver2 = kappa_ver[l]\n kappa_hor2 = kappa_hor[m]\n tau2 = tau[n]\n kappa1 = kappa_ver2-(1-np.cos(np.abs(2*self.frames)))*tau2*(kappa_ver2-kappa_hor2)\n kappa2 = kappa_hor2+(1-np.cos(np.abs(2*self.frames)))*(1-tau2)*(kappa_ver2-kappa_hor2)\n\n for i in range(0,self.frame_num):\n \n # the context provided by the frame\n P_frame1 = vonmises.pdf(theta_rod-self.frames[i],kappa1[i])\n P_frame2 = vonmises.pdf(theta_rod-np.pi/2-self.frames[i],kappa2[i])\n P_frame3 = vonmises.pdf(theta_rod-np.pi-self.frames[i],kappa1[i])\n P_frame4 = vonmises.pdf(theta_rod-3*np.pi/2-self.frames[i],kappa2[i])\n \n P_frame = (P_frame1+P_frame2+P_frame3+P_frame4)\n P_frame = P_frame/np.sum(P_frame)\n\n \n # the otoliths\n P_oto = vonmises.pdf(theta_rod,kappa_oto2)\n \n # the upright prior\n \n # compute the cumulative density of all distributions convolved\n cdf=np.cumsum(np.multiply(P_oto, P_frame))/np.sum(np.multiply(P_oto, P_frame))\n cdf=np.nan_to_num(cdf)\n cdf[cdf==0]=1e-10 \n cdf[cdf>1.0]=1.0 \n for j in range(0,self.rod_num):\n index = np.argmax(theta_rod>=rods[j])\n P[k][l][m][n][j][i]=cdf[index]\n \n self.kappa_oto_mtx[k][l][m][n]=kappa_oto2\n self.kappa_ver_mtx[k][l][m][n]=kappa_ver2\n self.kappa_hor_mtx[k][l][m][n]=kappa_hor2\n self.tau_mtx[k][l][m][n]=tau2\n \n\n self.lookup = np.reshape(P,(kappa_oto_num*kappa_ver_num*kappa_hor_num*tau_num,self.rod_num,self.frame_num),order=\"F\")\n self.prior = np.ones(kappa_oto_num*kappa_ver_num*kappa_hor_num*tau_num)/(kappa_oto_num*kappa_ver_num*kappa_hor_num*tau_num)\n self.calcNextStim()\n\n \n def calcNextStim(self):\n \n \n # Compute posterior\n self.paxs = np.empty([self.lookup.shape[0], self.lookup.shape[1], self.lookup.shape[2]])\n self.paxf = np.empty([self.lookup.shape[0], self.lookup.shape[1], self.lookup.shape[2]])\n h = np.empty([self.frame_num, self.rod_num])\n \n self.paxs = np.einsum('i,ijk->ijk', self.prior, self.lookup)\n self.paxf = np.einsum('i,ijk->ijk', self.prior, 1 - self.lookup)\n self.paxs[self.paxs==0]=1e-10;\n self.paxf[self.paxf==0]=1e-10;\n \n ps = np.sum(self.paxs,0) \n pf = np.sum(self.paxf,0)\n \n\n self.paxs = np.einsum('jk,ijk->ijk', 1/ps, self.paxs)\n self.paxf = np.einsum('jk,ijk->ijk', 1/pf, self.paxf)\n \n self.paxs[self.paxs==0]=1e-10;\n self.paxf[self.paxf==0]=1e-10;\n\n hs = np.einsum('ijk,ijk->jk', -self.paxs, np.log(self.paxs))\n hf = np.einsum('ijk,ijk->jk', -self.paxf, np.log(self.paxf))\n\n\n\n # Compute entropy\n #hs = np.sum(-self.paxs * np.log(self.paxs),0)\n #hf = np.sum(-self.paxf * np.log(self.paxf),0)\n \n \n # Compute expected entropy\n h = ps * hs + pf * hf\n \n plt.pcolormesh(h)\n plt.show(block=False)\n \n ind = np.unravel_index(h.argmin(), h.shape) # index of smallest expected entropy\n\n \n #x_f = np.expand_dims(self.rods,axis=1) \n #x_f = np.tile(x_f,(1,self.frame_num))\n #x_f = x_f.flatten('F')\n #y_f = np.expand_dims(self.frames,axis=0) \n #y_f = np.tile(y_f,(self.rod_num,1))\n #y_f = y_f.flatten('F')\n\n\n # Find stimulus that minimizes expected entropy\n self.stim = ([self.rods[ind[0]],self.frames[ind[1]]])\n #self.stim1_index = np.argmin(np.abs(self.rods - self.stim[0]))\n #self.stim2_index = np.argmin(np.abs(self.frames - self.stim[1]))\n self.stim1_index = ind[0]\n self.stim2_index = ind[1]\n \n \n def addData(self,response):\n\n self.stim = None\n\n # Update prior based on response\n if response == 1:\n self.prior = self.paxs[:,self.stim1_index,self.stim2_index]\n elif response == 0:\n self.prior = self.paxf[:,self.stim1_index,self.stim2_index]\n else:\n self.prior = self.prior\n\n ## WARNING: solution for value,index is not unique!\n ## take MAP instead of Expected Value\n \n #self.theta = np.array([self.kappa_oto_mtx[:,:,:,:].flatten('F'), self.kappa_ver_mtx[:,:,:,:].flatten('F'),self.kappa_hor_mtx[:,:,:,:].flatten('F'),self.tau_mtx[:,:,:,:].flatten('F')])\n # dimensions of the parameter space\n kappa_oto_num = len(self.kappa_oto);\n kappa_ver_num = len(self.kappa_ver);\n kappa_hor_num = len(self.kappa_hor);\n tau_num = len(self.tau);\n self.theta =np.array([np.reshape(self.kappa_oto_mtx[:,:,:,:],kappa_oto_num*kappa_ver_num*kappa_hor_num*tau_num), np.reshape(self.kappa_ver_mtx[:,:,:,:],kappa_oto_num*kappa_ver_num*kappa_hor_num*tau_num),np.reshape(self.kappa_hor_mtx[:,:,:,:],kappa_oto_num*kappa_ver_num*kappa_hor_num*tau_num),np.reshape(self.tau_mtx[:,:,:,:],kappa_oto_num*kappa_ver_num*kappa_hor_num*tau_num)])\n \n self.parms = np.matmul(self.theta,self.prior)\n \n diff = (self.theta.transpose()-self.parms).transpose()\n self.var_parms = np.matmul(np.power(diff,2), self.prior)\n \n self.stim = None\n self.calcNextStim()\n #self.stim1_index = np.random.randint(25)\n #self.stim2_index = np.random.randint(8)\n #self.stim = ([self.rods[self.stim1_index],self.frames[self.stim2_index]])\n # print('Variance', self.var_parms)\n return self.parms, self.var_parms\n\n \n\n\n","sub_path":"Alberts/PSI_RiF.py","file_name":"PSI_RiF.py","file_ext":"py","file_size_in_byte":7970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"441974652","text":"import csv\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch.utils.data import Dataset\n\n\ndef get_adjacent_matrix(distance_file: str, num_nodes: int, id_file: str = None, graph_type='connect') -> np.array:\n '''\n\n :param distance_file: str ,path of csv file to save the distance between nodes\n :param num_nodes:int , number of nodes in the graph\n :param id_file:str , path of txt file to save the order of the nodes\n :param graph_type:str, ['connet','distance']\n :return:\n np.array[N,N]\n '''\n A = np.zeros([num_nodes, num_nodes]) # 构造NXN节点数量\n if id_file:\n '''处理存在在矩阵中的真实点'''\n with open(id_file, 'r') as f_id:\n node_id_dict = {int(node_id): idx for idx, node_id in enumerate(f_id.read().strip().split('\\n'))}\n\n with open(distance_file, 'r') as f_d:\n f_d.readline()\n reader = csv.reader(f_d)\n for item in reader:\n if len(item) != 3:\n continue\n i, j, distance = int(item[0]), int(item[1]), float(item[2])\n # 构造邻接矩阵\n if graph_type == 'connect':\n A[node_id_dict[i], node_id_dict[j]] = 1.\n A[node_id_dict[j], node_id_dict[i]] = 1.\n elif graph_type == 'distance':\n A[node_id_dict[i], node_id_dict[j]] = 1. / distance\n A[node_id_dict[j], node_id_dict[i]] = 1. / distance\n else:\n raise ValueError('graph type is not correct(connect or distance)')\n return A\n\n reader = pd.read_csv(distance_file).values\n for item in reader:\n if len(item) != 3:\n continue\n i, j, distance = int(item[0]), int(item[1]), float(item[2])\n\n # 构造邻接矩阵\n if graph_type == 'connect':\n A[i, j] = 1.\n A[j, i] = 1.\n elif graph_type == 'distance':\n A[i, j] = 1. / distance\n A[j, i] = 1. / distance\n else:\n raise ValueError('graph type is not correct(connect or distance)')\n return A\n\n\ndef get_flow_data(flow_file: str) -> np.array:\n '''\n :param flow_file:flow_file:str,path of .npz file to save the traffic flow data\n :return:\n np.array(N,T,D)\n '''\n data = np.load(flow_file)\n flow_data = data['data'].transpose([1, 0, 2])[:, :, 0][:, :, np.newaxis]\n return flow_data\n\n\nclass LoadData(Dataset):\n def __init__(self, data_path, num_nodes, divide_days, time_interval, history_length, train_mode):\n '''\n :param data_path:list ,['graph file name', 'flow data file name'],path to save the data file names;\n :param num_nodes:int, numbers of nodes;\n :param divide_days:list,[days of train data, days of test data],list to divide the original data;\n :param time_interval:int, time interval between two traffic data records(mins);\n :param history_length:int,length of history data to be used;\n :param train_mode:list,['train','test']\n '''\n\n self.data_path = data_path\n self.num_nodes = num_nodes\n self.train_mode = train_mode\n self.train_days = divide_days[0] # 59 - 14 = 45 天\n self.test_days = divide_days[1] # 7*2 天\n self.history_length = history_length # 6\n self.time_interval = time_interval # 5 min 间隔一次数据\n\n self.one_day_length = int(24 * 60 / self.time_interval)\n\n self.graph = get_adjacent_matrix(distance_file=data_path[0], num_nodes=num_nodes)\n\n self.flow_norm, self.flow_data = self.pre_process_data(data=get_flow_data(data_path[1]),\n norm_dim=1) # base , normalization\n\n def __len__(self):\n if self.train_mode == 'train':\n return self.train_days * self.one_day_length - self.history_length\n elif self.train_mode == 'test':\n return self.test_days * self.one_day_length\n else:\n raise ValueError('train mode:[{}] is not in defined'.format(self.train_mode))\n\n def __getitem__(self, index):\n '''\n :param index: int , range of dataset length [0, length-1]\n :return:\n '''\n if self.train_mode == 'train':\n index = index\n elif self.train_mode == 'test':\n index += self.train_days * self.one_day_length\n else:\n raise ValueError('train mode:[{}] is not in defined'.format(self.train_mode))\n\n data_x, data_y = LoadData.slice_data(self.flow_data, self.history_length, index, self.train_mode)\n data_x = LoadData.to_tensor(data_x) # [N,H,D]\n data_y = LoadData.to_tensor(data_y).unsqueeze(1) # [N,1,D]\n\n return {'graph': LoadData.to_tensor(self.graph), 'flow_x': data_x, 'flow_y': data_y}\n\n @staticmethod\n def slice_data(data, history_length, index, train_mode):\n if train_mode == 'train':\n start_index = index\n end_index = index + history_length\n elif train_mode == 'test':\n start_index = index - history_length\n end_index = index\n else:\n raise ValueError('train model:[{}] is not defined'.format(train_mode))\n\n data_x = data[:, start_index:end_index]\n data_y = data[:, end_index]\n return data_x, data_y\n\n @staticmethod\n def pre_process_data(data, norm_dim):\n '''\n :param data:np.array,original traffic data without normalization\n :param norm_dim:int, normalization dimension\n :return:\n norm_base,norm_data\n '''\n norm_base = LoadData.normalization_base(data, norm_dim)\n norm_data = LoadData.normalize_data(norm_base[0], norm_base[1], data)\n\n return norm_base, norm_data\n\n @staticmethod\n def normalization_base(data, norm_dim):\n '''\n :param data:np.array,original traffic data without normalization\n :param norm_dim:int, normalization dimension\n :return:\n max_data:np.array\n min_data:np.array\n '''\n max_data = np.max(data, norm_dim, keepdims=True) # [N,T,D],norm = 1 -> [N, 1, D]\n min_data = np.min(data, norm_dim, keepdims=True)\n return max_data, min_data\n\n @staticmethod\n def normalize_data(max_data, min_data, data):\n mid = min_data\n base = max_data - min_data\n normalized_data = (data - mid) / base\n return normalized_data\n\n @staticmethod\n def recover_data(max_data, min_data, data):\n mid = min_data\n base = max_data - min_data\n recoverd_data = data * base + mid\n\n return recoverd_data\n\n @staticmethod\n def to_tensor(data):\n return torch.tensor(data, dtype=torch.float)\n\n\nif __name__ == '__main__':\n train_Data = LoadData(data_path=['..\\\\PeMS_04\\\\PeMS04.csv', '..\\\\PeMS_04\\\\PeMS04.npz'], num_nodes=307,\n divide_days=[45, 14], time_interval=5, history_length=6, train_mode='train')\n print(train_Data)\n print(train_Data[0]['flow_x'].size())\n print(train_Data[0]['flow_y'].size())\n","sub_path":"script/traffic_dataset.py","file_name":"traffic_dataset.py","file_ext":"py","file_size_in_byte":7180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"257602989","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\n\ndef rgb2gray(rgb):\n return np.dot(rgb[...,:3], [0.299, 0.587, 0.114]) # converts RBG image to grayscale using very specific factors\n\nI = mpimg.imread('a_image.tif')\n# I = plt.imread(\"a_image.tif\")\n# I = plt.imread(\"test_8bit.jpg\")\n\nI_gray = np.array(rgb2gray(I))\nI_gray2 = I_gray.astype(int)\n# I_gray = int(rgb2gray(I))\nplt.imshow(I_gray, cmap = plt.get_cmap('gray'))\nplt.show()\n\nprint(I_gray.ravel)\n\n# print(f)\n#\n# plt.hist(f)\n# plt.show()\n\n# in addition, here is some more\n\n\n'''\ntemp=np.asarray(Image.open('map.jpeg'))\n\nx=temp.shape[0]\ny=temp.shape[1]*temp.shape[2]\ntemp.resize((x,y)) # a 2D array\nprint(temp)\n\n# I = plt.imread(\"a_image.tif\")\n# I = plt.imread(\"MARBLES.tif\")\nI = plt.imread(\"barbara_gray.bmp\", )\nI = np.array(I)\n\nplt.imshow(I)\nplt.show()\n\nx=I.shape[0]\ny=I.shape[1]*I.shape[2]\nI.resize((x,y)) # a 2D array\nprint(I)\n\nplt.plot(I)\nplt.show()\n\n\n\n\n\n#\n# aa= np.array([[1,2,3,4,5],[2,2,2,2,2]])\n# aaa= np.array([[5,6,7,8,9],[10,11,12,14,15]])\n# a = np.array([aa,aaa])\n# print(a)\n# print(np.shape(a))\n# b = a[0]\n# print(b)\n\nprint (I)\nprint(\"poep\")\nprint(I[0])\n\n'''","sub_path":"Tracking/Tracking.py","file_name":"Tracking.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"300676756","text":"# Author: \tSchuyler Rank\n# E-mail: \tsrank@brandeis.edu\n# Date: \t28 February 2017\n# Class:\tCS 132 - Information Retrieval @ Brandeis University\n# Desc:\t\tThis program will build an inverted index and doc-data for a movie corpus\n\nimport json, shelve, sys, math, os\nfrom boolean_terms import boolean_terms\nfrom collections import defaultdict\nfrom vs_search import vs_search\n\n\n# This class takes a Wikipedia movie corpus in JSON and turns it into four shelf files: one to hold film term vectors,\n# one to hold information on the movies in the corpus from which article pages can be generated, one for doc length, and\n# one for inverse document frequencies.\n# film_vectors: {film1: {term1: term_freq, term2: term_freq, ...}, film2: {...}, ...}\n# doc lengths: {movie_id1: length, movie_id2: length, ...}\nclass corpus_to_index:\n def __init__(self, corpus_file):\n # Open the file and get the corpus\n fh = open(corpus_file, 'r')\n self.__corpus = json.load(fh)\n fh.close()\n\n # Set up our class variables\n # It seems smarter to do everything in memory and then write to disk once at the end,\n # rather than write every single little thing to disk constantly as we do it\n self.__film_vectors = dict() # A dictionary from film ids to a dict of terms to weighted term frequencies\n self.__docdata = dict() # A dictionary from film ids to a dictionary of movie data\n self.__doc_lengths = dict() # A dictionary from film ids to the magnitude of the document's term vector\n self.__idf = dict() # A dictionary from terms to float log inverse document frequency\n self.__term_dict = defaultdict(list) # A dictionary from terms to a list of movie ids\n\n # An object that will handle turning strings into a list of boolean search terms\n self.__terminator = boolean_terms()\n\n # Build the index\n self.__build_index()\n\n # Write the index\n self.__write_index()\n\n # Extra credit: for each film, find the 30 most similar films. Since these won't change and it takes a long\n # time to do, it makes sense to pay the cost upfront here in the indexer and cache the results for later.\n # self.__similar = defaultdict(list) # A dictionary from films to a sorted list of tuples (sim_score, sim_film)\n # self.__find_similar()\n\n # This pairs each film with a list of similar films using vs_search\n # This function is not called, since it takes so long to run; the functionality was moved to vs_search\n def __find_similar(self):\n # First, we'll need a vs_search object to find similar movies for us\n films_file = 'data' + os.sep + 'film_vectors'\n idf_file = 'data' + os.sep + 'idf'\n term_file = 'data' + os.sep + 'term_dict'\n vs = vs_search(films_file, idf_file, term_file)\n\n # Now, for each film, find similar films using the first film's terms as a query\n all_films = self.__film_vectors.keys()\n for film_id in all_films:\n # Get a query of all the terms in the vector (which have already been run through boolean_terms,\n # so no need to run them through vs_search's terminator again).\n query = self.__film_vectors[film_id].keys()\n self.__similar[film_id].append(vs.v_search(query, all_films, 30, terminate=False))\n\n # Write the similarity database\n sim_shelf = shelve.open('data' + os.sep + 'similarity', flag='n', writeback=False)\n sim_shelf.update(self.__similar)\n sim_shelf.close()\n\n # This function goes through the corpus file and builds the term index and doc-data files\n def __build_index(self):\n # Go through the corpus one movie at a time\n for film in self.__corpus:\n self.__docdata[str(film)] = self.__corpus[film] # Fill in doc-data\n\n # Get the terms for this movie. Terms are taken from the movie's title and its text\n terms = self.__terminator.get_terms(self.__corpus[film]['title'] + ' ' + self.__corpus[film]['text'])\n\n # Get the counts for each term and the length of the document\n vector = dict()\n squared_sum = 0\n for term in set(terms):\n term_freq = terms.count(term)\n vector[term] = 1 + math.log10(term_freq) # Store the weighted term frequency in this film's vector\n squared_sum += term_freq**2 # Sums of the squares of term freqs to be used in length\n self.__idf[term] = self.__idf.get(term, 0) + 1.0\n self.__term_dict[term].append(film)\n\n self.__film_vectors[str(film)] = vector\n\n # Add the length to the dictionary for this movie\n self.__doc_lengths[str(film)] = math.sqrt(squared_sum)\n\n # Get the inverse document frequency for all terms on the corpus\n num_films = 1.0 * len(self.__corpus)\n for term in self.__idf:\n self.__idf[term] = math.log10(num_films/self.__idf[term])\n\n # Now add idf weighting and length normalization to film vectors.\n for film in self.__film_vectors:\n for term in self.__film_vectors[film]:\n self.__film_vectors[film][term] *= self.__idf[term]\n self.__film_vectors[film][term] /= self.__doc_lengths[film]\n\n # This function writes the indexes to disk\n def __write_index(self):\n # Write doc-data to disk\n doc_shelf = shelve.open('data' + os.sep + 'doc-data', flag='n', writeback=False)\n doc_shelf.update(self.__docdata) # Since doc-data is a dictionary, we can just copy the whole thing at once\n doc_shelf.close()\n\n # Write the document index to disk\n vectors_shelf = shelve.open('data' + os.sep + 'film_vectors', flag='n', writeback=False)\n vectors_shelf.update(self.__film_vectors)\n vectors_shelf.close()\n\n # Write inverse document frequencies to disk\n lengths_shelf = shelve.open('data' + os.sep + 'idf', flag='n', writeback=False)\n lengths_shelf.update(self.__idf)\n lengths_shelf.close()\n\n # Write the term dictionary\n index_shelf = shelve.open('data' + os.sep + 'term_dict', flag='n', writeback=False)\n for term in self.__term_dict:\n postings = sorted([int(u) for u in self.__term_dict[term]]) # Sort as ints because '20' > '100'\n index_shelf[term] = [str(num) for num in postings] # Back into strings for shelf\n index_shelf.close()\n\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n corpus_to_index(sys.argv[1])\n else:\n corpus_to_index('data' + os.sep + '2016_movies_standard.json')\n","sub_path":"vs_index.py","file_name":"vs_index.py","file_ext":"py","file_size_in_byte":6700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"129219664","text":"def collatz(num) :\n while num != 1 :\n if num % 2 == 0 :\n num = num / 2\n else :\n num = num * 3 + 1\n print(int(num))\n\n\nwhile True:\n print(\"Please enter an integer: \")\n num = input()\n \n try:\n num = int(num)\n collatz(num)\n except:\n print(\"you didnt enter an integer, please try again.\")\n\n","sub_path":"collatz.py","file_name":"collatz.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"649819656","text":"from conans import ConanFile, tools, AutoToolsBuildEnvironment\nimport os\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass OpenH264Conan(ConanFile):\n name = \"openh264\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"http://www.openh264.org/\"\n description = \"Open Source H.264 Codec\"\n topics = (\"h264\", \"codec\", \"video\", \"compression\", )\n license = \"BSD-2-Clause\"\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": \"False\",\n \"fPIC\": True,\n }\n\n exports_sources = \"patches/*\"\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _settings_build(self):\n return getattr(self, \"settings_build\", self.settings)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n\n def build_requirements(self):\n if self.settings.arch in (\"x86\", \"x86_64\"):\n self.build_requires(\"nasm/2.15.05\")\n if self._settings_build.os == \"Windows\" and not tools.get_env(\"CONAN_BASH_PATH\"):\n self.build_requires(\"msys2/cci.latest\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n destination=self._source_subfolder, strip_root=True)\n\n def _patch_sources(self):\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n tools.patch(**patch)\n if self.settings.compiler == \"Visual Studio\":\n tools.replace_in_file(os.path.join(self._source_subfolder, \"build\", \"platform-msvc.mk\"),\n \"CFLAGS_OPT += -MT\",\n \"CFLAGS_OPT += -{}\".format(self.settings.compiler.runtime))\n tools.replace_in_file(os.path.join(self._source_subfolder, \"build\", \"platform-msvc.mk\"),\n \"CFLAGS_DEBUG += -MTd -Gm\",\n \"CFLAGS_DEBUG += -{} -Gm\".format(self.settings.compiler.runtime))\n if self.settings.os == \"Android\":\n tools.replace_in_file(os.path.join(self._source_subfolder, \"codec\", \"build\", \"android\", \"dec\", \"jni\", \"Application.mk\"),\n \"APP_STL := stlport_shared\",\n \"APP_STL := {}\".format(self.settings.compiler.libcxx))\n tools.replace_in_file(os.path.join(self._source_subfolder, \"codec\", \"build\", \"android\", \"dec\", \"jni\", \"Application.mk\"),\n \"APP_PLATFORM := android-12\",\n \"APP_PLATFORM := {}\".format(self._android_target))\n\n @property\n def _library_filename(self):\n prefix = \"\" if self.settings.compiler == \"Visual Studio\" else \"lib\"\n if self.options.shared:\n if tools.is_apple_os(self.settings.os):\n suffix = \".dylib\"\n elif self.settings.os == \"Windows\":\n suffix = \".dll\"\n else:\n suffix = \".so\"\n else:\n if self.settings.compiler == \"Visual Studio\":\n suffix = \".lib\"\n else:\n suffix = \".a\"\n return prefix + \"openh264\" + suffix\n\n @property\n def _make_arch(self):\n return {\n \"armv7\": \"arm\",\n \"armv8\": \"arm64\",\n \"x86\": \"i386\",\n \"x86_64\": \"x86_64\",\n }.get(str(self.settings.arch), str(self.settings.arch))\n\n @property\n def _android_target(self):\n return \"android-{}\".format(self.settings.os.api_level)\n\n @property\n def _make_args(self):\n prefix = os.path.abspath(self.package_folder)\n if tools.os_info.is_windows:\n prefix = tools.unix_path(prefix)\n args = [\n \"ARCH={}\".format(self._make_arch),\n \"PREFIX={}\".format(prefix),\n ]\n autotools = AutoToolsBuildEnvironment(self)\n if self.settings.compiler == \"Visual Studio\":\n autotools.flags.extend([\"-nologo\", \"-{}\".format(self.settings.compiler.runtime)])\n autotools.link_flags.insert(0, \"-link\")\n if tools.Version(self.settings.compiler.version) >= \"12\":\n autotools.flags.append(\"-FS\")\n elif self.settings.compiler in (\"apple-clang\",):\n if self.settings.arch in (\"armv8\",):\n autotools.link_flags.append(\"-arch arm64\")\n if self.options.shared:\n autotools.fpic = True\n args.extend([\"{}={}\".format(k, v) for k,v in autotools.vars.items()])\n\n if self.settings.compiler == \"Visual Studio\":\n args.append(\"OS=msvc\")\n autotools.flags.append(\"-FS\")\n else:\n if self.settings.os == \"Windows\":\n args.append(\"OS=mingw_nt\")\n if self.settings.os == \"Android\":\n libcxx = str(self.settings.compiler.libcxx)\n stl_lib = \"$(NDKROOT)/sources/cxx-stl/llvm-libc++/libs/$(APP_ABI)/lib{}\".format(\"c++_static.a\" if libcxx == \"c++_static\" else \"c++_shared.so\") \\\n + \"$(NDKROOT)/sources/cxx-stl/llvm-libc++/libs/$(APP_ABI)/libc++abi.a\"\n ndk_home = os.environ[\"ANDROID_NDK_HOME\"]\n args.extend([\n \"NDKLEVEL={}\".format(self.settings.os.api_level),\n \"STL_LIB={}\".format(stl_lib),\n \"OS=android\",\n \"NDKROOT={}\".format(ndk_home), # not NDK_ROOT here\n \"TARGET={}\".format(self._android_target),\n \"CCASFLAGS=$(CFLAGS) -fno-integrated-as\",\n ])\n\n return args\n\n def build(self):\n self._patch_sources()\n with tools.vcvars(self) if self.settings.compiler == \"Visual Studio\" else tools.no_op():\n with tools.chdir(self._source_subfolder):\n env_build = AutoToolsBuildEnvironment(self)\n env_build.make(args=self._make_args, target=self._library_filename)\n\n def package(self):\n self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n with tools.vcvars(self) if self.settings.compiler == \"Visual Studio\" else tools.no_op():\n with tools.chdir(self._source_subfolder):\n env_build = AutoToolsBuildEnvironment(self)\n env_build.make(args=self._make_args, target=\"install-\" + (\"shared\" if self.options.shared else \"static-lib\"))\n\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n\n def package_info(self):\n if self.settings.compiler == \"Visual Studio\" and self.options.shared:\n self.cpp_info.libs = [\"openh264_dll\"]\n else:\n self.cpp_info.libs = [\"openh264\"]\n if self.settings.os in (\"FreeBSD\", \"Linux\"):\n self.cpp_info.system_libs.extend([\"m\", \"pthread\"])\n if self.settings.os == \"Android\":\n self.cpp_info.system_libs.append(\"m\")\n self.cpp_info.names[\"pkg_config\"] = \"openh264\"\n libcxx = tools.stdcpp_library(self)\n if libcxx:\n self.cpp_info.system_libs.append(libcxx)\n","sub_path":"recipes/openh264/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":7206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"447161707","text":"from functools import wraps\nfrom django.http import Http404\n\n\n#Decorator for protect admin from intruders\ndef staff_or_404(view_func):\n \"\"\"\n Decorator for views that checks that the user is logged in and is a staff\n member, raising a 404 if necessary.\n \"\"\"\n @wraps(view_func)\n def new_view_func(request, *args, **kwargs):\n if request.user.is_authenticated():\n if request.user.is_admin:\n # The user is valid. Continue to the admin page.\n return view_func(request, *args, **kwargs)\n\n raise Http404\n return new_view_func\n","sub_path":"inquitv/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"476047819","text":"def setArguments():\n usage = \"usage: %s [options]\" % sys.argv[0]\n config = Parser(usage=usage).check_args()\n logger.setLevel(logging.INFO)\n if config.console:\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(logging.Formatter(\"%(name)s - %(levelname)s - %(message)s\"))\n logger.addHandler(console_handler)\n if config.log_file:\n if not os.path.isdir(os.path.dirname(config.log_file)):\n # fallback to console only if directory for logs does not exists and\n # continue to run\n raise ValueError('Please create directory %r to store %r log file' % (\n os.path.dirname(config.log_file), config.log_file))\n else:\n file_handler = logging.FileHandler(config.log_file)\n file_handler.setFormatter(logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"))\n logger.addHandler(file_handler)\n logger.info('Configured logging to file %r' % config.log_file)\n if config.pid_file:\n if not os.path.isdir(os.path.dirname(config.pid_file)):\n raise ValueError('Please create directory %r to store %r pid file' % (\n os.path.dirname(config.pid_file), config.pid_file))\n else:\n open(config.pid_file, 'w').write(str(os.getpid()))\n if config.directory:\n if not os.path.isdir(config.directory):\n raise ValueError('Please create directory %r to store local files' % (\n config.directory))\n else:\n os.chdir(config.directory)\n config.cwd = os.getcwd()\n\n return config\n","sub_path":"software/redisdg/source/setArguments.py","file_name":"setArguments.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"559079657","text":"#encoding=utf-8\r\n\r\n\r\nfrom util.db import MySqlHelper\r\nimport util.timeHelper as timeHelper\r\n\r\n\r\nclass ProgramPlayPageDbHelper():\r\n\r\n def __init__(self):\r\n pass\r\n\r\n def saveProgramPlayPage(self, channelId, playDate, pageUrl, html, dbHelper=None):\r\n sql = \"insert ignore into programplaypage (channelId, playDate, url, html, savingTime) values (%s, %s, %s, %s, %s)\"\r\n #sql = \"insert ignore into programplaypage (channelId, playDate, url, html, savingTime) values (%s, %s, %s, %s, %s) on duplicate key update html = values(html)\"\r\n params = (channelId, playDate, pageUrl, html, timeHelper.now())\r\n if not dbHelper:\r\n dbHelper = MySqlHelper()\r\n dbHelper.openByConf()\r\n dbHelper.execute(sql, params)\r\n dbHelper.close()\r\n else:\r\n dbHelper.execute(sql, params)\r\n\r\n def clearProgramPlayPages(self, dbHelper=None):\r\n sql = \"delete from programplaypage\"\r\n if not dbHelper:\r\n dbHelper = MySqlHelper()\r\n dbHelper.openByConf()\r\n dbHelper.execute(sql)\r\n dbHelper.close()\r\n else:\r\n dbHelper.execute(sql)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n dbHelper = MySqlHelper(True)\r\n programPlayPageDbHelper = ProgramPlayPageDbHelper()\r\n programPlayPageDbHelper.clearProgramPlayPages(dbHelper)","sub_path":"data-auto-updater-projects/pytvmao/dal/programPlayPageHelper.py","file_name":"programPlayPageHelper.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"156795572","text":"from aws_xray_sdk.core import patch, xray_recorder\nfrom pandas import DataFrame\n\nfrom raster_analysis.data_cube import DataCube\nfrom raster_analysis.data_environment import DataEnvironment\nfrom raster_analysis.geometry import GeometryTile\nfrom raster_analysis.globals import LOGGER\nfrom raster_analysis.query import Query\nfrom raster_analysis.query_executor import QueryExecutor\nfrom raster_analysis.results_store import AnalysisResultsStore, ResultStatus\n\npatch([\"boto3\"])\n\n\n@xray_recorder.capture(\"Raster Analysis\")\ndef handler(event, context):\n try:\n LOGGER.info(f\"Running analysis with parameters: {event}\")\n results_store = AnalysisResultsStore()\n\n if \"geometry\" in event:\n source_geom = event[\"geometry\"]\n is_encoded = False\n elif \"encoded_geometry\" in event:\n source_geom = event[\"encoded_geometry\"]\n is_encoded = True\n else:\n raise KeyError(\"No valid geometry field\")\n\n tile_geojson = event.get(\"tile\", None)\n geom_tile = GeometryTile(source_geom, tile_geojson, is_encoded)\n\n if not geom_tile.geom:\n LOGGER.info(f\"Geometry for tile {context.aws_request_id} is empty.\")\n results_store.save_result({}, context.aws_request_id)\n return {}\n\n data_environment = DataEnvironment(layers=event[\"environment\"])\n query = Query(event[\"query\"], data_environment)\n\n data_cube = DataCube(geom_tile.geom, geom_tile.tile, query)\n\n query_executor = QueryExecutor(query, data_cube)\n results: DataFrame = query_executor.execute()\n\n LOGGER.debug(f\"Ran analysis with results: {results.head(100)}\")\n results_store.save_result(results, event[\"cache_id\"])\n except Exception as e:\n LOGGER.exception(e)\n\n results_store = AnalysisResultsStore()\n results_store.save_status(event[\"cache_id\"], ResultStatus.error, 0, str(e))\n raise e\n","sub_path":"lambdas/raster_analysis/src/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"558514697","text":"from random import *\n# random() 随机生成一个[0, 1.0]之间的小数\nprint(\n random()\n)\n# randint(a, b) 随机生成一个a到b之间的一个整数\nprint(\n randint(10, 100)\n)\n# uniform(a, b) 随机生成一个a到b之间的一个小数\nprint(\n uniform(10, 1000)\n)\n# randrange(a, b, c) 在a和b之间随机生成一个以c递增的数\nprint(\n randrange(10, 20, 9)\n)\nprint('================================================================')\n# choice() 从列表中随机返回一个元素\nprint(\n choice([1, 2, 3, 4, 5, 66])\n)\n# shuffle() 将列表排序打乱\nabc = ['a', 'a1', 'a2', 'a3', 'a4']\nprint(\n shuffle(abc), abc\n)\n# sample(, k) 从指定列表里获取k个元素\nprint(\n sample(abc, 2)\n)\nprint('========================================================')\na = range(2, 10)\nprint(\n a\n)\n","sub_path":"before/python3/练习/Mic008.随机函数.py","file_name":"Mic008.随机函数.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"42393086","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport numpy as np\nfrom numpy.linalg import eig\nimport matplotlib.pyplot as plt\n\n\n# In[ ]:\n\n\ndef plot_vectors(V, EigVec):\n base = [0 for e in EigVec]\n vx = [np.array(e).ravel()[0] for e in EigVec]\n vy = [np.array(e).ravel()[1] for e in EigVec]\n plt.quiver(base, base, vx, vy, color = ['b', 'b'], angles = 'xy', scale=1.0, scale_units='xy')\n base = [0 for v in V]\n x = [np.array(v).ravel()[0] for v in V]\n y = [np.array(v).ravel()[1] for v in V]\n plt.quiver(base, base, x, y, color = 'r', angles = 'xy', scale=1.0, scale_units='xy')\n maxy = max([abs(i)+1 for i in y])\n maxx = max([abs(i)+1 for i in y])\n plt.ylim(-1*maxy, maxy)\n plt.xlim(-1*maxx, maxx)\n\n\n# $A : \\left(\\begin{matrix}\n# 3 & 4\\\\\n# 4 & 3\\\\\n# \\end{matrix}\\right) \\hspace{25pt}\n# v0 : \\left(\\begin{matrix}\n# 1\\\\\n# 1\\\\\n# \\end{matrix}\\right) \\hspace{25pt}\n# v1 : \\left(\\begin{matrix}\n# 1\\\\\n# -1\\\\\n# \\end{matrix}\\right)$\n\n# In[ ]:\n\n\nA = np.matrix([[3,4],[4,3]])\nv0 = [[1],[1]]\nv1 = [[1],[-1]]\nx = [[0],[1]]\nfor i in range(400):\n x = A.dot(x)\nplot_vectors([x], [v0,v1])\n\n\n# In[ ]:\n\n\nA = np.matrix([[3,4],[4,3]])\nv0 = [[1],[1]]\nv1 = [[1],[-1]]\nx = [[0],[1]]\nfor i in range(3):\n x = A.dot(x)\n x = x / np.linalg.norm(x,np.inf)\nplot_vectors([x], [v0,v1])\n\n\n# $A : \\left(\\begin{matrix}\n# 2 & 0\\\\\n# 0 & 2\\\\\n# \\end{matrix}\\right) \\hspace{25pt}\n# v0 : \\left(\\begin{matrix}\n# 1\\\\\n# 0\\\\\n# \\end{matrix}\\right) \\hspace{25pt}\n# v1 : \\left(\\begin{matrix}\n# 0\\\\\n# 1\\\\\n# \\end{matrix}\\right)$\n\n# In[ ]:\n\n\nA = np.matrix([[2, 0],[0,2]])\nv0 = [[1],[0]]\nv1 = [[0],[1]]\nx = [[1],[1]]\nfor i in range(100):\n x = A.dot(x)\n x = x / np.linalg.norm(x,np.inf)\nplot_vectors([x], [v0,v1])\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"demos/upload/toshow4/Power Method.py","file_name":"Power Method.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"232943750","text":"import os\nimport urllib\nimport webapp2\nimport json\nfrom google.appengine.ext import ndb\nfrom google.appengine.ext.webapp.mail_handlers import InboundMailHandler\nimport logging\nimport base64\nimport datetime\n\nFILTER_NEWLINES = False\nUSE_HTML = False\n# returns a datastore key for a given message\ndef messagedb_key(messagedb_name):\n return ndb.Key('Message', messagedb_name)\n\n# stores when requests were last received (for determining which ones are new)\nclass TimeStamp(ndb.Model):\n time_stored = ndb.DateTimeProperty()\n name = ndb.StringProperty()\n def update(self):\n self.time_stored = datetime.datetime.now()\n self.put()\n\n# data for each message. time is the time when the message was receieved/stored\nclass Message(ndb.Model):\n subject = ndb.StringProperty(indexed=False)\n content = ndb.TextProperty()\n town = ndb.StringProperty(indexed=True)\n time = ndb.DateTimeProperty()\n\n# returns the new messages that have arrived in JSON since the last request\nclass API(webapp2.RequestHandler):\n def get(self):\n # enable requests from any domain\n self.response.headers['Access-Control-Allow-Origin'] = '*'\n self.response.headers['Access-Control-Allow-Headers'] = 'Origin, X-Requested-With, Content-Type, Accept'\n self.response.headers['Access-Control-Allow-Methods'] = 'POST, GET, PUT'\n town_name = self.request.get('town_name')\n last_time = TimeStamp.query(TimeStamp.name == town_name).fetch(1)\n # making sure only new data was returned\n if len(last_time) == 0: # if there was no \"last request\", make a new timestamp\n last_time = TimeStamp()\n last_time.name = town_name\n last_time.update()\n logging.info('making new timestamp')\n else:\n last_time = last_time[0]\n logging.info('last request made at' + str(last_time.time_stored))\n logging.info('looking for messages for town: ' + town_name)\n # building response\n messages = Message.query(Message.town == town_name, Message.time > last_time.time_stored).fetch(100)\n response = {}\n response['messages'] = []\n num_new = len(messages)\n response['num_new'] = num_new \n for i in range(num_new):\n content = messages[i].content\n if FILTER_NEWLINES:\n content = content.replace('\\n', \" \")\n content = content.replace('\\r', \"\")\n if messages[i].subject is None: #if there was no subject, give this default\n response['messages'].append({'subject': town_name + \" School Update\", 'body':content})\n else:\n response['messages'].append({'subject': messages[i].subject, 'body':content})\n \n self.response.write(json.dumps(response, separators=(',',':'), sort_keys=True))\n last_time.update()\n logging.info('this request made at' + str(last_time.time_stored))\n\n# takes incoming mail, parses it and stores it in the database\nclass LogSenderHandler(InboundMailHandler):\n def receive(self, mail_message):\n logging.info(\"Received a message from: \" + mail_message.sender + \" addressed to \" + mail_message.to)\n # gets the \"pembroke\" in pembroke@biw-school-news.appspot.com\n town_name = mail_message.to.split('@')[0][1:]\n message = Message()\n plain_bodies = mail_message.bodies('text/plain')\n html_bodies = mail_message.bodies('text/html')\n message.content = \"\"\n # parse email body and subject\n if USE_HTML:\n for content_type, body in html_bodies:\n message.content += body.decode()\n else:\n for content_type, body in plain_bodies:\n message.content += body.decode()\n if mail_message.subject is not None:\n message.subject = mail_message.subject\n # put everything in database\n message.time = datetime.datetime.now()\n message.town = town_name\n message.put()\n logging.info('this message stored @ ' + str(message.time))\n\nclass MainPage(webapp2.RequestHandler):\n def get(self):\n self.response.write('

BIW School News Server

contact charles.meyer@tufts.edu if you got here by accident')\n\napp = webapp2.WSGIApplication([\n LogSenderHandler.mapping(),\n ('/api', API),\n ('/', MainPage)\n], debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"137481686","text":"#!/usr/bin/env python3\nfrom collections import OrderedDict\nimport sys\nimport re\nDBG = False\n\n#add_argument, set_defaults only available.\nListPatt = re.compile('(\\[.*?\\])')\nGbgPatt = re.compile('(.*)\\)[A-z0-9*]')\nLpRegex = re.compile('\\({1,}\\s{0,}')\nRpRegex = re.compile('\\s{0,}\\){1,}')\nPrRegex = re.compile('\\((.*)(\\))(?!.*\\))') # from \\( to last \\)\nCmRegex = re.compile('\\s{0,},\\s{0,}')\nStrRegex = re.compile('\\'(.*?)\\'')\n\n# Argument dict : store {arg_name : value}\nargDct=OrderedDict()\n\n# Remove empty line & Concatenate line-separated syntax.\ndef preprocess(fname):\n try :\n with open(fname, 'r', encoding='UTF8') as f:\n txt = f.read()\n t = txt.splitlines(True)\n t = str_list = list( filter(None, t) )\n # remove empty line\n t = [x for x in t if not re.match('\\s{0,}\\n',x)]\n # concatenate multiple lined arguments.\n empl = []\n for i in range(len(t)-1, 0, -1):\n if not re.search('add_argument|set_defaults', t[i]):\n t[i-1] += t[i]\n t[i-1]=re.sub('\\s{0,}\\n{0,}\\s{0,}','',t[i-1])\n empl.append(t[i])\n\n for d in empl:\n t.remove(d)\n for i, line in enumerate(t):\n t[i] = line.replace('\\\"', '\\'')\n return t\n\n except IOError:\n print('IOError : no such file.', fname)\n\n# Handling add_argument()\ndef add_argument(arg_line):\n global argDct\n\n arg_line = arg_line\n if DBG:\n print('in add_argument : **Pr regex : ' + str(arg_line))\n\n #argname = DdRegex.split(arg_line)[1] # Dash or regex for arg name.\n argname = re.search('\\'--(.*?)\\'',arg_line)\n if not argname:\n argname = re.search('\\'-+(.*?)\\'',arg_line)\n if argname:\n argname = argname.group(1).replace('-', '_')\n else :\n argname = StrRegex.search(arg_line).group(1)\n if not argname:\n return # no argument name\n\n argDct[argname]=''\n dtype = re.search(',\\s*type\\s*=(.*)', arg_line)\n if dtype:\n dtype = dtype.group(1)\n dtype = CmRegex.split(dtype)[0]\n else :\n dtype = ''\n\n dfult = re.search(',\\s*default\\s*=(.*)',arg_line)\n rquird = re.search(',\\s*required\\s*=(.*)',arg_line)\n action = re.search(',\\s*action\\s*=(.*)',arg_line)\n\n tval = ''\n if dfult:\n if DBG:\n print('in default ext')\n # type exist\n if re.search('int|float|long|bool|complex', dtype):\n tval = dfult.group(1)\n if DBG:\n print('type exist tval :' +str(tval))\n\n if ListPatt.search(tval):\n tval = ListPatt.search(tval).group(1)\n if DBG:\n print('list exit-list patt : ' + str(tval))\n\n # if not list, use comma as separator.\n else :\n tval = CmRegex.split(tval)[0]\n if DBG:\n print('not list tval :' +str(tval))\n\n if not re.search('int|float|long|bool|complex', tval) and not LpRegex.search(tval):\n tval = re.split('\\s{0,}\\){1,}',tval)[0]\n gbg = re.search(GbgPatt, tval)\n if gbg:\n tval = gbg.group(1)\n\n # type not specified str() assumed.\n else:\n tval = dfult.group(1)\n \n regres = StrRegex.match(tval)\n if regres:\n tval = regres.group(0)\n elif ListPatt.search(tval):\n tval = ListPatt.search(tval).group(1)\n else:\n tval = CmRegex.split(tval)[0]\n \n \n if DBG:\n print('tval : ' + str(tval) +'\\n')\n\n # action or required syntax exist\n elif action or rquird :\n if DBG:\n print('in action handling')\n msg_str = ''\n if action:\n tval = action.group(1)\n msg_str = 'action'\n else :\n tval = rquird.group(1)\n msg_str = 'required'\n\n regres = StrRegex.search(tval)\n if regres:\n tval = regres.group(0)\n else :\n tval = CmRegex.split(tval)[0]\n tval = '## ' + msg_str + ' ' + tval + ' ##'\n \n else :\n argDct[argname] = '## default None ##'\n\n if tval:\n argDct[argname] = tval\n\n# Handling set_default()\ndef set_defaults(arg_line):\n global argDct\n if DBG:\n print('Set_defaults : ' + str(arg_line))\n\n dfult = re.split('\\s{0,}=\\s{0,}', arg_line)\n tn = dfult[0] # arg name\n tv = RpRegex.split(dfult[1])[0] #arg value\n argDct[tn]=tv\n\ndef transform(fname):\n # t : list() contains add_argument|set_defaults lines.\n arg_line_list = preprocess(fname)\n\n for i, arg_line in enumerate(arg_line_list):\n\n t = PrRegex.search(arg_line)\n if t:\n t = t.group(1) # t: content of add_argument Parentheses.\n else :\n continue # nothing to parse.\n\n if re.search('add_argument\\s*\\(', arg_line):\n add_argument(t)\n elif re.search('set_defaults\\s*\\(',arg_line):\n set_defaults(t)\n else :\n # Nothing to parse.\n continue\n\n print('\\nclass args:')\n for i in argDct:\n print(' ',i, '=', argDct[i])\n print()\n\ndef main():\n if len(sys.argv) <2:\n print('Usage : python arg2cls.py [target.py] [target2.py(optional)] ...')\n sys.exit(0)\n sys.argv.pop(0)\n\n #handling multiple file input.\n for fname in sys.argv:\n transform(fname)\n\n# TODO : choices=, multiple keywords occurence fix. \n\nif(__name__ == \"__main__\"):\n main()\n","sub_path":"arg2cls_v0.8.py","file_name":"arg2cls_v0.8.py","file_ext":"py","file_size_in_byte":4957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"215139364","text":"#!/usr/bin/python\n\n# Face Detection using OpenCV. Based on sample code by Roman Stanchak\n# Nirav Patel http://eclecti.cc 5/20/2008\n\nimport sys, os\nfrom opencv.cv import *\nfrom opencv.highgui import *\n\t\ndef detectObject(image):\n grayscale = cvCreateImage(cvSize(640, 480), 8, 1)\n cvCvtColor(image, grayscale, CV_BGR2GRAY)\n storage = cvCreateMemStorage(0)\n cvClearMemStorage(storage)\n cvEqualizeHist(grayscale, grayscale)\n cascade = cvLoadHaarClassifierCascade('haarcascade_frontalface_alt.xml',\n cvSize(1,1))\n faces = cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, \n CV_HAAR_DO_CANNY_PRUNING, cvSize(100,100))\n \n if faces:\n for i in faces:\n cvRectangle(image, cvPoint( int(i.x), int(i.y)),\n cvPoint(int(i.x+i.width), int(i.y+i.height)),\n CV_RGB(0,255,0), 3, 8, 0)\n \ndef displayObject(image):\n cvNamedWindow(\"face\", 1)\n cvShowImage(\"face\", image)\n cvWaitKey(0)\n cvDestroyWindow(\"face\")\n \ndef main():\n # Uses xawtv. Gstreamer can be used instead, but I found it much slower\n os.system(\"v4lctl snap jpeg 640x480 /tmp/face.jpg\")\n image = cvLoadImage(\"/tmp/face.jpg\")\n detectObject(image)\n displayObject(image)\n cvSaveImage(\"/tmp/face.jpg\", image)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"face.py","file_name":"face.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"591659242","text":"__all__ = ['State', 'StateView', 'StateScene', 'StateGItem', 'StateConnItem']\n\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\n\nfrom Framework.Flowchart.State import State as ST\nfrom CRad.View.FCStep.DlgSet.DlgSet import SetsManager\nfrom CRad.Model.ContextMenuCmpt.CRadCtxtMenuCmpt import CRadCtxtMenuCmpt as TP\n\nfrom CRad.View.Resources import pycreme_res\nimport sys\n\n\nclass State(QObject):\n Empty = ST.Empty\n ReadyToRun = ST.ReadyToRun\n WaitJob = ST.WaitJob\n WaitMachine= ST.WaitMachine\n InProgress = ST.InProgress\n Finished = ST.Finished\n RunFailed = ST.RunFailed\n Off = ST.Off # The step's state is Off.\n FinishedOff= ST.FinishedOff # The step's state is Off, but the parent steps are finished and\n # the current step stores the parent step's data.\n Ready = ST.Ready # The current step is Ready, but the target and task is not inited.\n\n def __init__(self, label, imageName, name, fchart, parent=None):\n super(State, self).__init__(parent)\n\n self._label = label\n self._imageName = imageName\n\n self._name = name\n self._fc = fchart\n\n self._EditDlg = None\n self._editDlg = None\n\n self._status = self.Empty\n self._progress = 0\n self._isTargetState = False\n\n fchart.addObserver(self)\n self.update()\n\n # Context menu\n self._acts = {}\n self._iTPToKeys = {} # ItemType to action key mapping\n self._ctxtMenu = None\n\n self.initActions()\n self.initCtxtMenu()\n\n def name(self):\n return self._name\n\n def parents(self):\n return self._parents\n\n def setParents(self, p):\n self._parents = p\n\n def label(self):\n return self._label\n\n def wrapedLabel(self):\n return self._label\n\n def imageName(self):\n return self._imageName\n\n def isTargetState(self):\n return self._isTargetState\n\n def setTargetState(self, val=True):\n self._isTargetState = val\n\n def setEditDlg(self, Dlg):\n self._EditDlg = Dlg\n\n def update(self):\n st, pro = self._fc.status(self.name())\n self._status = st\n self._progress = pro\n self.emit(SIGNAL(\"statusChanged\"), self)\n\n def status(self):\n return self._status\n\n def progress(self):\n return self._progress\n\n def addAction(self, key, text, method, itemType, icon=None):\n if icon is not None:\n act = QAction(icon, text, self.parent())\n else:\n act = QAction(text, self.parent())\n\n self._acts[key] = act\n\n if self._iTPToKeys.has_key(itemType):\n self._iTPToKeys[itemType].add(key)\n else:\n self._iTPToKeys[itemType] = set([key])\n\n self.connect(act, SIGNAL(\"triggered()\"), method)\n\n def updateActions(self):\n itemList = self._iTPToKeys.keys()\n ret = self._fc.state(self.name()).contextMenuCmpt().isEnabled(itemList)\n for i, itemType in enumerate(itemList):\n isEnabled = ret[i]\n actKeys = self._iTPToKeys[itemType]\n for key in actKeys:\n self._acts[key].setEnabled(isEnabled)\n\n def initActions(self):\n self.addAction(\"Run\", self.tr(\"&Run Selected Step\"), self.run, TP.Run, QIcon(\":general/run_sel.png\"))\n self.addAction(\"Edit\", self.tr(\"&Edit Settings\"), self.editSets, TP.SetSettings)\n self.addAction(\"IptSets\", self.tr(\"&Import Settings From Project\"), self.importSets,TP.ImportSettings)\n self.addAction(\"IptRet\", self.tr(\"&Import Result From File\"), self.importRet, TP.ImportResult)\n self.addAction(\"ShowRet\", self.tr(\"&Show Result\"), self.showRet, TP.GetResult)\n self.addAction(\"GenRpt\", self.tr(\"&Generate Report\"), self.genReport, TP.GenReport)\n\n def initCtxtMenu(self):\n menu = QMenu(self.parent())\n menu.addAction(self._acts[\"Run\"])\n menu.addSeparator()\n menu.addAction(self._acts[\"Edit\"])\n menu.addAction(self._acts[\"IptSets\"])\n menu.addAction(self._acts[\"IptRet\"])\n menu.addSeparator()\n menu.addAction(self._acts[\"ShowRet\"])\n menu.addSeparator()\n menu.addAction(self._acts[\"GenRpt\"])\n\n self._ctxtMenu = menu\n\n # slots\n def showCtxtMenu(self):\n self.updateActions()\n self._ctxtMenu.exec_(QCursor.pos())\n\n def run(self):\n self._fc.state(self.name()).run()\n\n def editSets(self):\n if self._EditDlg is not None:\n if self._editDlg is None:\n setsMngr = SetsManager( self._fc.state(self.name()).settingsCmpt() )\n self._editDlg = self._EditDlg(setsMngr)\n\n self._editDlg.fromData()\n self._editDlg.exec_()\n\n def importSets(self):\n raise NotImplementedError\n\n def importRet(self):\n raise NotImplementedError\n\n def showRet(self):\n raise NotImplementedError\n\n def genReport(self):\n raise NotImplementedError\n\n\nclass StateConnItem(QAbstractGraphicsShapeItem):\n def __init__(self, parentNode, childNodes=[], parent=None):\n super(StateConnItem, self).__init__(parent)\n self._children = []\n self.setPos(parentNode)\n for child in childNodes:\n self._children.append(child - parentNode)\n\n def paint(self, painter, option, widget):\n if len(self._children) == 0:\n return\n\n painter.setPen(Qt.darkGray)\n midy = self._children[0].y() / 2\n\n # vertical line from parent\n painter.drawLine(0, 0, 0, int(midy))\n\n # horizontal line\n xmin = 1e100; xmax = -1e100\n for child in self._children:\n xmin = min(child.x(), xmin)\n xmax = max(child.x(), xmax)\n painter.drawLine(int(xmin), int(midy), int(xmax), int(midy))\n\n # vertical line to children\n for child in self._children:\n painter.drawLine(int(child.x()), int(midy), int(child.x()), int(child.y()))\n\n def boundingRect(self):\n # -----\n # xmin = qreal(0.0); qMin(xmin, child.x())\n # -----\n\n xmin = 0.0\n xmax = 0.0\n ymin = 0.0\n ymax = 0.0\n\n for child in self._children:\n xmin = min(xmin, child.x())\n xmax = max(xmax, child.x())\n ymin = min(ymin, child.y())\n ymax = max(ymax, child.y())\n return QRectF(xmin, ymin, xmax-xmin, ymax-ymin)\n\n\nclass StateGItem(QAbstractGraphicsShapeItem):\n ConnTop = 0\n ConnBottom = 1\n stateIconSize = QImage(\":traj.png\").width()\n\n def __init__(self, state, parent=None):\n super(StateGItem, self).__init__(parent)\n self.setFlag(QGraphicsItem.ItemIsSelectable, True)\n self._state = state\n self.__initImage(imageEmpty= QIcon(':'+self._state.imageName()))\n self._iconRect = QRectF(0, 0, StateGItem.stateIconSize-1, StateGItem.stateIconSize-1)\n\n def __initImage(self,\n imageFinished = QImage(\":finished.png\"),\n imageRunning = QImage(\":running.png\"),\n imageFailed = QImage(\":failed.png\"),\n imageEmpty = QImage(\":traj.png\"),\n imageWaiting = QImage(\"\"),\n imageWaitJob = QImage(\"\"),\n imageWaitMachine = QImage(\"\")):\n\n self._imageFinished = imageFinished\n self._imageRunning = imageRunning\n self._imageFailed = imageFailed\n self._imageEmpty = imageEmpty\n self._imageWaiting = imageWaiting\n self._imageWaitJob = imageWaitJob\n self._imageWaitMachine = imageWaitMachine\n\n def boundingRect(self):\n rect = self.textRect() # TODO...\n #return rect.united(self._iconRect)\n return self._iconRect\n\n def paint(self, painter, option, widget):\n if self._state is None:\n # TODO something is wrong, report error\n return\n\n pt = QPoint(0, 0)\n status = self._state.status()\n if status == State.Finished:\n painter.drawPixmap(pt, self._imageEmpty.pixmap(self.stateIconSize,\n self.stateIconSize, mode = QIcon.Normal, state = QIcon.Off))\n painter.drawImage(70, 70, self._imageFinished)\n elif status == State.InProgress:\n painter.drawPixmap(pt, self._imageEmpty.pixmap(self.stateIconSize,\n self.stateIconSize, mode = QIcon.Selected, state = QIcon.Off))\n painter.drawImage(70, 70, self._imageRunning)\n elif status == State.RunFailed:\n painter.drawImage(pt, self._imageFailed)\n painter.drawPixmap(pt, self._imageEmpty.pixmap(self.stateIconSize,\n self.stateIconSize, mode = QIcon.Disabled, state = QIcon.Off))\n painter.drawImage(70, 70, self._imageFailed)\n elif status == State.WaitJob:\n painter.drawImage(pt, self._imageWaitJob)\n elif status == State.WaitMachine:\n painter.drawImage(pt, self._imageWaitMachine)\n elif status in [State.ReadyToRun, State.Ready]:\n painter.drawPixmap(pt, self._imageEmpty.pixmap(self.stateIconSize,\n self.stateIconSize, mode = QIcon.Selected, state = QIcon.Off))\n else:\n # _imageEmpty\n painter.drawPixmap(pt, self._imageEmpty.pixmap(self.stateIconSize,\n self.stateIconSize, mode = QIcon.Disabled, state = QIcon.Off))\n\n label = self._state.wrapedLabel()\n painter.drawText(self.textRect(), Qt.AlignLeft, label)\n\n if status == State.InProgress:\n pen = QPen()\n pen.setStyle(Qt.SolidLine)\n pen.setMiterLimit(0)\n pen.setBrush(Qt.darkMagenta)\n pen.setCapStyle(Qt.FlatCap)\n pen.setJoinStyle(Qt.MiterJoin)\n\n pen.setWidth(2)\n painter.setPen(pen)\n painter.drawLine(QPoint(3, 88), QPoint( 80 * self._state.progress()/100 + 3 , 88))\n painter.drawText(QRectF(40, 73, 70, 79), Qt.AlignLeft, '%3.0f'%self._state.progress()+'%' )\n\n if self._state.isTargetState():\n size = self.stateIconSize\n path = QPainterPath()\n path.moveTo(size, size)\n path.lineTo(size, size/1.5)\n path.lineTo(size/1.5, size)\n path.lineTo(size, size)\n painter.fillPath(path, QBrush(Qt.blue))\n\n # If selected\n if self.isSelected():\n iconRectpen = QPen()\n iconRectpen.setWidth(2)\n iconRectpen.setColor(QColor(255, 0, 0, 127))\n iconRectpen.setStyle(Qt.DashLine)\n painter.setPen(iconRectpen)\n painter.drawRoundedRect(self._iconRect, 6.0, 6.0)\n\n def connectorPos(self, conn):\n if conn == StateGItem.ConnTop:\n return mapToScene(0.5 * StateGItem.stateIconSize, 0)\n else:\n return mapToScene(0.5 * StateGItem.stateIconSize,\n StateConnItem.stateIconSize)\n\n def state(self):\n return self._state\n\n def textRect(self):\n return QRectF(2, 2, 94, 15)\n\n\nclass StateScene(QGraphicsScene):\n def __init__(self, parent=None):\n super(StateScene, self).__init__(parent)\n\n def updateScene(self):\n super(StateScene, self).update(QRectF())\n\n\nclass StateView(QGraphicsView):\n def __init__(self, scene, parent=None):\n super(StateView, self).__init__(parent)\n if scene is not None:\n self.setScene(scene)\n self.setDragMode(self.NoDrag)\n self.setMouseTracking(True)\n self.setAcceptDrops(False)\n\n def mouseDoubleClickEvent(self, event):\n pos = self.mapToScene(event.pos())\n stateItem = self.scene().itemAt(pos)\n\n # if stateItem type is StateConnItem return\n if stateItem is None or (not isinstance(stateItem, StateGItem)):\n return\n\n state = stateItem.state()\n if state is not None:\n event.accept()\n self.emit(SIGNAL('mouseDoubleCliked'), state)\n\n def mousePressEvent(self, event):\n pos = self.mapToScene(event.pos())\n\n stateItem = self.scene().itemAt(pos)\n if stateItem is not None and isinstance(stateItem, StateGItem):\n # Record mouse is on which item\n self._latestItem = stateItem\n else:\n self._latestItem = None\n\n selectedPressed = False\n selectedList = self.scene().selectedItems()\n for item in selectedList:\n if (item.contains(item.mapFromScene(pos))):\n selectedPressed = True\n\n if True == selectedPressed:\n if Qt.RightButton == event.button():\n self.mouseReleaseEvent(event)\n else:\n super(StateView, self).mousePressEvent(event)\n\n def mouseReleaseEvent(self, event):\n pos = self.mapToScene(event.pos())\n stateItem = self.scene().itemAt(pos)\n\n if stateItem is None or (not isinstance(stateItem, StateGItem)):\n if Qt.RightButton == event.button():\n event.accept()\n self.emit(SIGNAL('mouseRightReleased'), None)\n else:\n state = stateItem.state()\n if state is not None:\n if Qt.RightButton == event.button():\n event.accept()\n self.emit(SIGNAL('mouseRightReleased'), state)\n elif Qt.LeftButton == event.button():\n event.accept()\n # TODO.. for drag command\n else:\n super(StateView, self).mouseReleaseEvent(event)\n\n def selectedState(self):\n selectedList = self.scene().selectedItems()\n\n # If not state is selected, return state which mouse is on\n if 0 == len(selectedList):\n return [self.latestSelectedState()]\n\n states = []\n for item in selectedList:\n states.append(item.state())\n return states\n\n def latestSelectedState(self):\n if self._latestItem is not None:\n return self._latestItem.state()\n else:\n return None\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n state = State()\n state.setStatus(State.InProgress)\n state.setLabel(QString(\"label\"))\n state.setTargetState(True)\n\n\n scene = StateScene()\n doc = DocExptEditor()\n doc.setStateScene(scene)\n\n l = []\n l.append(QPointF(100.0, -100.0))\n l.append(QPointF(-100.0, -100.0))\n item = StateConnItem(QPointF(0.0, 0.0), l)\n scene.addItem(item)\n\n item = StateGItem(state)\n item.setPos(-23, 0)\n item.setSelected(True)\n view = StateView(doc)\n scene.addItem(item)\n #view.setScene(scene)\n\n view.show()\n app.exec_()","sub_path":"lib/CRad/View/FCStep/State/State.py","file_name":"State.py","file_ext":"py","file_size_in_byte":14872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"459919321","text":"#!/usr/bin/env python\n'''\nCreated on Dec 3, 2010\n\n@author: ale\n'''\nimport checkpython\nimport optparse\nimport PrawnTools\nimport os\n\ndef main():\n usage = 'usage: %prog [options]'\n parser = optparse.OptionParser(usage)\n\n parser.add_option('--dbpath', dest='database', help='Database path', default=PrawnTools.jmDBPath())\n parser.add_option('-s', '--session', dest='sessionName', help='Name of the session')\n parser.add_option('-g', '--group', dest='sessionGroup', help='Comma separated list of groups')\n\n (opt, args) = parser.parse_args()\n \n if opt.sessionName is None and opt.sessionGroup is None:\n parser.error('The session name is undefined')\n \n dbPath = os.path.abspath(os.path.expanduser(opt.database))\n m = PrawnTools.Manager(dbPath)\n m.connect()\n\n sessions = m.getListOfSessions(opt.sessionName,opt.sessionGroup)\n \n for s in sessions:\n m.removeSession(s.name)\n m.disconnect()\n \n \nif __name__ == \"__main__\":\n main()","sub_path":"src/pwn_RemoveSession.py","file_name":"pwn_RemoveSession.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"341076332","text":"from PIL import Image\ndef sliceImage(im):\n width,height = im.size\n bound = 175#decide if this pixel is black or white\n xStartFlag,xEndFlag,yStartFlag,yEndFlag = False,False,False,False\n xStart,xEnd,yStart,yEnd = 0,0,0,0\n result = []\n\n for i in range(width):\n if(xStartFlag==False):\n #find the start x\n for j in range(height):\n if(im.getpixel((i,j))=2 and yEnd-yStart>=5):\n result.append((xStart,yStart,xEnd,yEnd))\n return result\n","sub_path":"lib/sliceImage.py","file_name":"sliceImage.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"454438082","text":"__author__ = 'andrei'\n\nfrom flask import Flask, jsonify\nfrom SQLengine import andrei_dict\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef hello():\n return \"Hello World!\"\n\n\n@app.route(\"/user/\")\ndef show_user_profile(username):\n if username == 'chiffa' or username == 'andrei':\n return jsonify(andrei_dict)\n else:\n return jsonify({'Error':'No Such User!'})\n\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"source/MainServer.py","file_name":"MainServer.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"356566544","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport wx\r\nimport os\r\nfrom wrapper import Cipher\r\nfrom crypt_bat_bk import AES_Batch\r\n\r\nclass AES_GUI(wx.Frame):\r\n \r\n def __init__(self, parent, title):\r\n super(AES_GUI, self).__init__(parent, title=title,\r\n size=(480, 200))\r\n self.cipher = AES_Batch()\r\n self.InitUI()\r\n self.Centre()\r\n self.Show() \r\n \r\n def InitUI(self):\r\n \r\n panel = wx.Panel(self)\r\n vbox = wx.BoxSizer(wx.VERTICAL)\r\n\r\n hbox1 = wx.BoxSizer(wx.HORIZONTAL)\r\n input_lb = wx.StaticText(panel, label='Batch from:')\r\n hbox1.Add(input_lb, flag=wx.ALIGN_RIGHT|wx.RIGHT, border=10)\r\n self.input = wx.TextCtrl(panel)\r\n hbox1.Add(self.input, proportion=1)\r\n load = wx.Button(panel, label='Load')\r\n load.Bind(wx.EVT_BUTTON, self.onOpenFile)\r\n hbox1.Add(load, flag=wx.ALIGN_RIGHT|wx.LEFT, border=10)\r\n vbox.Add(hbox1, flag=wx.EXPAND|wx.ALL, border=20)\r\n\r\n hbox2 = wx.BoxSizer(wx.HORIZONTAL)\r\n output_lb = wx.StaticText(panel, label='Batch to:')\r\n hbox2.Add(output_lb, flag=wx.ALIGN_RIGHT|wx.RIGHT, border=10)\r\n self.output = wx.TextCtrl(panel)\r\n hbox2.Add(self.output, proportion=1)\r\n load2 = wx.Button(panel, label='Load')\r\n load2.Bind(wx.EVT_BUTTON, self.onOpenFile2)\r\n hbox2.Add(load2, flag=wx.ALIGN_RIGHT|wx.LEFT, border=10)\r\n vbox.Add(hbox2, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, border=20)\r\n\r\n hbox3 = wx.BoxSizer(wx.HORIZONTAL)\r\n process = wx.Button(panel, label='Process')\r\n process.Bind(wx.EVT_BUTTON, self.onProcess)\r\n hbox3.Add(process, flag=wx.ALIGN_CENTER|wx.EXPAND, proportion=1)\r\n vbox.Add(hbox3, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, border=20)\r\n\r\n panel.SetSizer(vbox)\r\n\r\n def onProcess(self, event):\r\n msg = self.cipher.parseBatch(self.input.GetValue(), self.output.GetValue())\r\n if not msg == '':\r\n wx.MessageBox(msg, 'Error', wx.OK | wx.ICON_ERROR)\r\n else:\r\n wx.MessageBox('Completed!', 'Completed', wx.OK | wx.ICON_INFORMATION)\r\n def onOpenFile(self, event):\r\n \"\"\"\r\n Create and show the Open FileDialog\r\n \"\"\"\r\n\r\n wildcard = \"All files (*.csv)|*.csv\"\r\n dlg = wx.FileDialog(\r\n self, message=\"Choose a file\",\r\n defaultDir= os.getcwd(),\r\n defaultFile=\"\",\r\n wildcard=wildcard,\r\n style=wx.OPEN | wx.CHANGE_DIR\r\n )\r\n if dlg.ShowModal() == wx.ID_OK:\r\n self.input.SetValue(dlg.GetPath())\r\n dlg.Destroy()\r\n\r\n def onOpenFile2(self, event):\r\n \"\"\"\r\n Create and show the Open FileDialog\r\n \"\"\"\r\n wildcard = \"All files (*.csv)|*.csv\"\r\n dlg = wx.FileDialog(\r\n self, message=\"Choose a file\",\r\n defaultDir= os.getcwd(),\r\n defaultFile=\"\",\r\n wildcard=wildcard,\r\n style=wx.OPEN | wx.CHANGE_DIR\r\n )\r\n if dlg.ShowModal() == wx.ID_OK:\r\n self.output.SetValue(dlg.GetPath())\r\n dlg.Destroy()\r\n\r\n\r\nif __name__ == '__main__':\r\n app = wx.App()\r\n AES_GUI(None, title='AES Crypto Tool (Batch)')\r\n app.MainLoop()","sub_path":"crypt-bat-GUI.py","file_name":"crypt-bat-GUI.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"153193412","text":"\t#!/usr/bin/env python\r\n# -*- coding: utf8 -*-\r\n#\r\n# Copyright 2007 Google Inc.\r\n#\r\n# Licensed under the Apache License, Version 2.0(the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n#\t http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n\r\nimport webapp2\r\nimport os\r\nimport re\r\nimport jinja2\r\nimport random\r\nimport string\r\nimport hmac\r\nimport logging\r\nimport datetime\r\nfrom google.appengine.ext import db\r\nfrom google.appengine.api import memcache\r\n\r\nfrom pybcrypt import bcrypt\r\nimport pytils\r\nimport gmemsess\r\n\r\ntemplate_dir = os.path.join(os.path.dirname(__file__), 'templates')\r\njinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir))#,\r\n\t\t\t\t\t\t\t #autoescape = True)\r\nSESSION_EXPIRES = 3 #сколько дней храним сессию\r\n\r\npermits = {#!!!!!!!!!!необходимо убрать хардкод и хранить разрешения в бд\r\n\t\t\t'admin'\t\t: {'blog_post':True, 'comment_post':True,'power_control':True},\r\n\t\t\t'blogger'\t: {'blog_post':True, 'comment_post':True,'power_control':False},\r\n\t\t\t'member'\t: {'blog_post':False, 'comment_post':True,'power_control':False},\r\n\t\t\t'guest'\t\t: {'blog_post':False, 'comment_post':False,'power_control':False}\r\n\t\r\n}\r\n\r\n\r\napp_path = {'main'\t: '/',\r\n\t\t\t'login'\t: '/login',\r\n\t\t\t'signup': '/signup',\r\n\t\t\t'logout': '/logout',\r\n\t\t\t'blog'\t: '/blog',\r\n\t\t\t'profile': '/profile',\r\n\t\t\t'comment': '/comment',\r\n\t\t\t'ajax'\t: '/ajx'\r\n\t\t\t}\r\nsecret = '_Long_123_Secret_456_String_789_' #следует сохранить отдельно\r\n\r\n##########################################################################\r\n#Вспомогательные функции #\r\n##########################################################################\r\n\r\ndef make_hash(*args): # создание хеша из полученных аргументов\r\n\tline_for_hashing = \"\"\r\n\tfor arg in args:\r\n\t\tline_for_hashing += str(arg)\r\n\treturn bcrypt.hashpw(line_for_hashing, bcrypt.gensalt())\r\n\r\ndef valid_hash(h, *args): # проверка хеша\r\n\tline_for_hashing = \"\"\r\n\tfor arg in args:\r\n\t\tline_for_hashing += str(arg)\r\n\tif bcrypt.hashpw(line_for_hashing, h) == h:\r\n\t\t\treturn True\r\n\r\ndef make_secure_val(val): #простое хеширование параметра(на выходе параметр|хеш)\r\n\treturn '%s|%s' %(val, hmac.new(secret, val).hexdigest())\r\n\r\ndef check_secure_val(secure_val): #проверка соответствия параметр-хеш\r\n\tval = secure_val.split('|')[0]\r\n\tif secure_val == make_secure_val(val):\r\n\t\treturn val\r\n\r\ndef render_str(template, **params): #подготовка шаблона\r\n\tt = jinja_env.get_template(template)\r\n\treturn t.render(params)\r\n\r\n\r\ndef clone_entity(e, **extra_args): #клон сущности\r\n\tklass = e.__class__ #получаем класс сущности который копируем\r\n\tprops = dict((k, v.__get__(e, klass)) for k, v in klass.properties().iteritems()) #копируем значения свойств из старой сущности в словарь\r\n\tprops.update(extra_args) #обновляем созданный словарь значениями из аргументов функции\r\n\treturn klass(**props) # создаем новую сущность и возвращаем её\r\n\r\nUSER_RE = re.compile(r\"^[\\w-]{3,20}$\")\r\ndef valid_username(username):\r\n\treturn username and USER_RE.match(username)\r\n\r\nPASS_RE = re.compile(r\"^.{3,20}$\")\r\ndef valid_password(password):\r\n\treturn password and PASS_RE.match(password)\r\n\r\nEMAIL_RE = re.compile(r'^[\\S]+@[\\S]+\\.[\\S]+$')\r\ndef valid_email(email):\r\n\treturn email and EMAIL_RE.match(email)\r\n\r\n#########################################################\r\n\r\nclass Nestedobject (object):\r\n\r\n\tdef __init__ (self, m, r, nest_level = 0):\r\n\t\tself.msg = m\r\n\t\tself.replies = r\r\n\t\tself.nest_level = nest_level # иерархический уровень комментария (нужно для определения какого уровня не делаем отступ в html-шаблоне)\r\n\t\t\t\r\ndef nest (flow, root_rep_id_list, deep = 0): #рекурсивное создание древовидной структуры из плоского списка предков и потомков\r\n\tmsglist = []\r\n\tnested_comments = []\r\n\tdeep += 1 #глубина рекурсии = иерархический уровень комментария\r\n\r\n\tfor rep_id in root_rep_id_list: #с помощью полученного списка ключей корневых ответов ветки составляем список объектов-ответов выбирая из плоского списка\r\n\t\tif rep_id in flow:\r\n\t\t\tmsglist.append(flow[rep_id])\t\t\r\n\r\n\tfor msg in msglist: # добавляем к массиву-результату сообщен��я. если у них есть ответы (replies), то вызываем рекурсивно функцию, со списком ключей ответов. если нет ответов то присваеваем значение None\r\n\t\tnested_comments.append(Nestedobject (msg, nest(flow, msg.replies, deep) if msg.replies else None, deep))\r\n\t\tlogging.error(msg.replies)\t\t\r\n\treturn nested_comments\r\n\r\n\r\n#########################################################\r\n\r\n##########################################################################\r\n#Модель пользователя\r\n##########################################################################\r\n\r\ndef users_key(group = 'default'): #задает путь к сущности(для разделения по группам)\r\n\treturn db.Key.from_path('users', group)\r\n\r\n\r\nclass Group (db.Model):\r\n\tname = db.StringProperty(required = True)\r\n\r\n\t@classmethod\r\n\tdef by_name(cls, name): # возвращает объект содержащий сущность из datastore с указанным именем\r\n\t\tu = Group.all().filter('name =', name).get()\r\n\t\treturn u\r\n\r\n\r\nclass User(db.Model):\r\n\t\"\"\"Класс для модели пользователя для сохранения и получения \r\n\t\t\t\t\tданных из datastore(и ни для чего другого)\"\"\"\r\n\r\n\tname = db.StringProperty(required = True)\r\n\tpw_hash = db.StringProperty(required = True)\r\n\tregister = db.DateTimeProperty(auto_now_add = True)\r\n\temail = db.StringProperty()\r\n\tpower = db.StringProperty()\r\n\tcomments = db.IntegerProperty()\r\n\tposts = db.IntegerProperty()\r\n\t\r\n\t@classmethod\r\n\tdef by_id(cls, uid): # возвращает объект содержащий сущность из datastore с указанным id\r\n\t\treturn User.get_by_id(uid, parent = users_key()) \r\n\r\n\t@classmethod\r\n\tdef by_name(cls, name): # возвращает объект содержащий сущность из datastore с указанным именем\r\n\t\tu = User.all().filter('name =', name).get()\r\n\t\treturn u\r\n\r\n\t@classmethod\r\n\tdef register(cls, name, pw, email = None):# создает объект-модель для записи в datastore\r\n\t\tpw_hash = make_hash(name, pw)\r\n\t\treturn User(parent = users_key(),\r\n\t\t\t\t\t name = name,\r\n\t\t\t\t\t pw_hash = pw_hash,\r\n\t\t\t\t\t email = email, \r\n\t\t\t\t\t power = 'member')\r\n\r\n\t@classmethod\r\n\tdef check_user(cls, name, pw): #проверка: 1) пользователь существует 2) пароль совпадает\r\n\t\tu = cls.by_name(name)\r\n\t\tif u and valid_hash(u.pw_hash, name, pw):\r\n\t\t\treturn u\r\n\r\n\t\r\n\tdef set_power (self, power):\r\n\t\t\r\n\t\tself.power = power\r\n\t\tif self.put(): return True\r\n\t\telse: return False\r\n\r\n\tdef check_power (self, action):\r\n\t\tif self.power == None: return False\r\n\t\treturn permits[self.power][action]#!!!!!!!!!!необходимо убрать хардкод и хранить разрешения в бд\r\n\r\n\r\n\t\t\t\r\n##########################################################################\r\n#Модель поста\r\n##########################################################################\r\n\r\nclass BlogEntry (db.Model):\r\n\r\n\tdef make_rudate(self, date_set = None):\t\t\t\r\n\r\n\t\tif not date_set: return pytils.dt.ru_strftime(u\"%d %b %Y %H\"+u\":\"+u\"%M\",inflected=True, date=self.created)\r\n\r\n\t\tday, month, year, hm = re.split(' ', pytils.dt.ru_strftime(u\"%d %b %Y %H\"+u\":\"+u\"%M\",inflected=True, date=self.created))\t\t\r\n\t\tif date_set == 'day' or 'month' or 'year': return vars()[date_set]\r\n\t\telse: return 'Date Error'\t\t\r\n\r\n\tdef make_rucomment(self, com_number):\r\n\t\treturn pytils.numeral.get_plural(0, u\"Комментарий, Комментария, Комментариев\", absence=u\"Комментариев пока нет\")\r\n\r\nclass Post (BlogEntry):\r\n\ttitle = db.StringProperty(required = True)\r\n\ttext = db.TextProperty(required = True)\r\n\tcomments = db.IntegerProperty(default = 0)\r\n\tauthor = db.StringProperty()\r\n\tcreated = db.DateTimeProperty(auto_now_add = True)\r\n\tedited = db.DateTimeProperty(auto_now_add = True)\r\n\treplies = db.ListProperty(int)\r\n\r\nclass Comment (BlogEntry):\r\n\ttext = db.TextProperty(required = True)\r\n\tauthor = db.StringProperty()\r\n\tcreated = db.DateTimeProperty(auto_now_add = True)\r\n\tedited = db.DateTimeProperty(auto_now_add = True)\r\n\treplies = db.ListProperty(int)\r\n##########################################################################\r\n#Модели сраниц\r\n##########################################################################\r\nclass MainHandler(webapp2.RequestHandler):\r\n\t\"\"\"Базовый класс для обработчиков запросов браузера\r\n\t\twrite() - отправляет аргументы на вывод браузеру\r\n\t\trender_str() - перегрузка технической функции(добавление параметра \"имя пользователя\")\r\n\t\trender() - отправляет шаблон на вывод браузера(предварительно вызывает рендер шаблона render_str)\r\n\t\"\"\"\r\n\t\r\n\tdef write(self, *a, **kw): #вывод текста на экран\r\n\t\tself.response.out.write(*a, **kw)\r\n\r\n\tdef render_str(self, template, **params): # добавление различных параметров в рендер шаблона \r\n\t\tparams['user'] = self.user\r\n\t\tparams.update(app_path)\t\t\r\n\t\treturn render_str(template, **params) # вызов технической функции с новым параметром\r\n\r\n\tdef render(self, template, **kw): # вывод шаблона на экран\r\n\t\tself.write(self.render_str(template, **kw))\r\n\r\n\tdef set_cookie(self, name, val, expires): # установка куки для сессии\t\t\t\r\n\t\texpires = (datetime.datetime.now() + datetime.timedelta(days=expires)).strftime('%a, %d %b %Y %H:%M:%S GMT')#Пока нет записи в датастор, а только в мемкэш больше трех дней не стоит делать.\r\n\t\tself.response.headers.add_header(\r\n\t\t\t'Set-Cookie',\r\n\t\t\t'%s=%s; expires=%s; Path=/' %(name, val, expires))\r\n\r\n\tdef read_secure_cookie(self, name): #чтение сессионной куки\r\n\t\tcookie_val = self.request.cookies.get(name)\t\t\r\n\t\treturn cookie_val and check_secure_val(cookie_val)\r\n\r\n\tdef check_session(self):\t\t\r\n\t\tif self.session.is_new():\t\t\t\r\n\t\t\treturn None\r\n\t\tcookie_val = self.request.cookies.get('ssid') #хеш из uid и ip\r\n\t\tif valid_hash(cookie_val, self.session['uid'], self.request.remote_addr):\t\t\t\r\n\t\t\treturn self.session['uid']\r\n\t\telse: \r\n\t\t\treturn None\r\n\t\r\n\tdef login(self, user): #логин пользователя (установка сессионной куки)\r\n\r\n\t\tssid = make_hash(user.key().id(), self.request.remote_addr)\r\n\t\tself.set_cookie('ssid', ssid, expires=SESSION_EXPIRES)\t\t\r\n\t\tself.session['uid'] = user.key().id()\r\n\r\n\t\tself.session['ssid'] = ssid\r\n\t\tself.session.save()\r\n\r\n\tdef logout(self): #логаут\r\n\t\t# self.response.headers.add_header('Set-Cookie', 'uid=; Path=/')\r\n\t\tself.session.invalidate()\r\n\r\n\tdef initialize(self, *a, **kw):\r\n\r\n\t\twebapp2.RequestHandler.initialize(self, *a, **kw)\r\n\t\t#uid = self.read_secure_cookie('user_id')\t\t\r\n\t\tself.session = gmemsess.Session(self)\r\n\t\tuid = self.check_session()\t\t\r\n\t\tself.user = uid and User.by_id(int(uid))\r\n\t\tif self.user is not None: #если пользователь существует сохраняем в объект его uid из датастора\r\n\t\t\tself.user.uid = int(uid)\r\n\t\t\t\r\n\t\r\nclass Blog(MainHandler):\t\r\n\tdef get(self, owner = \"Spinningmill\", page = 1):\t\t\t\t\r\n\t\ttext_flow = Post.all().ancestor(users_key(owner)).order('-created').fetch(10)\t\t\r\n\t\tif text_flow: \r\n\t\t\tfor msg in text_flow:\r\n\t\t\t\tif len(msg.text) > 1000:\r\n\t\t\t\t\tmsg.text = msg.text[0:1000] + \"...\"\r\n\t\t\tself.render(\"blog.html\", text_flow = text_flow, owner = owner)\r\n\t\telse: \r\n\t\t\ttext_flow = {'error':u'Пусто'}\r\n\t\t\tself.render(\"blog.html\", text_flow = text_flow)\r\n\r\n\tdef post(self, owner = \"Spinningmill\", page = 1):\r\n\t\ttitle = self.request.get(\"subject\")\r\n\t\ttext = self.request.get(\"content\")\r\n\t\tif self.user and self.user.check_power('blog_post'): #может ли юзер постить\r\n\t\t\tif title and text:\r\n\t\t\t\ta = Post(parent = users_key(self.user.name), title = title, text = text, author = self.user.name)\r\n\t\t\t\ta.put()\r\n\t\t\t\tmsg_id = str (a.key().id())\t\t\t\t\r\n\t\t\t\tself.redirect(app_path['main'])\r\n\t\t\t\t\r\n\t\t\telse:\r\n\t\t\t\terror = \"We need some text and it's title. Both.\"\r\n\t\t\t\tself.render_front(title = title, text = text, error = error)\r\n\t\telse:\r\n\t\t\tself.redirect(app_path['login'])\r\n\r\nclass PostHandler (MainHandler):\r\n\r\n\tdef make_path(self, post_id, id_string):\r\n\t\tpath = ['Post', int(post_id)] #путь всегда начинается с поста к которому коментарии\r\n\t\tif id_string:#если строка с id пустая, значит родителем будет пост, если не пустая, то добавляем всех по очереди к пути\r\n\t\t\tid_list = re.split(',', id_string)\t\t\r\n\t\t\tfor comm_id in id_list:\r\n\t\t\t\tpath +=['Comment', int(comm_id)]\r\n\t\treturn path\r\n\r\n\tdef add_reply(self, owner, post_id, id_string = None, edit_mode = False):\r\n\t\ttext = self.request.get(\"content\")\t\t\r\n\t\tif text:\r\n\t\t\tparent = users_key(owner)\r\n\t\t\tp = Post.get_by_id(int(post_id), parent = parent)\r\n\r\n\t\t\tif id_string: # если передан id комментария (строка с ид предков и самого коммента), то операция с комментарием\r\n\t\t\t\tlogging.error(\"entry - comment\")\r\n\t\t\t\tentry_path = self.make_path(post_id, id_string)#собираем путь до сущности из id переданных из браузера\r\n\t\t\t\tentry_key = db.Key.from_path(*entry_path, parent = parent)\t#создаем из пути ключ\r\n\t\t\t\tentry = db.Model.get(parent_key) #получаем сущность из датастора\t\t\t\t\r\n\t\t\telse: # если нет строки с id то значит операция с постом\r\n\t\t\t\tlogging.error(\"entry - post\")\r\n\t\t\t\tentry = p\t\t\r\n\r\n\t\t\tif edit_mode: #редактируем сущность\r\n\r\n\t\t\t\tentry.text = new_text\r\n\t\t\t\tentry.edited = datetime.datetime.now()\r\n\t\t\t\tentry.put()\r\n\t\t\t\treturn entry\r\n\r\n\t\t\telse:#добавление комментария, entry - родитель нового комментария\r\n\t\t\t\tc = Comment (parent = entry, text = text, author = self.user.name)# сохраняем комментарий\r\n\t\t\t\tc.put()\t\t\t\r\n\t\t\t\tentry.replies.append(c.key().id())#добавляем id к списку id коментариев-потомков(ответов) родителя\r\n\t\t\t\tp.comments +=1\t\t#увеличиваем счетчик комментариев в посте\r\n\t\t\t\t#!!!сделать проверку успешной записи комментария и если ок, то увеличить счетчик комментариев.\r\n\t\t\t\t\r\n\t\t\t\tp.put()\t\t\t\r\n\t\t\t\tif p != entry: entry.put() #если родитель не пост, то тоже его сохраняем\r\n\t\t\t\treturn c\r\n\t\telse:\r\n\t\t\tself.redirect(app_path['main'])#!!!!!!обработка ошибки пустого текста\r\n\r\n\tdef get (self, owner, post_id, com_id): #выводим пост с комментариями\r\n\t\tp=Post.get_by_id(int(post_id), parent = users_key(owner))\t\t\r\n\t\tif p:\r\n\t\t\tcom_flow = Comment.all().ancestor(p)\r\n\r\n\t\t\tcom_index = {}\r\n\t\t\troot_com_list = []\r\n\t\t\tfor com in com_flow:\r\n\t\t\t\tcom_index[com.key().id()] = com #создаем хеш ключ:объект (индекс по ид)\t\t\t\t\t\r\n\r\n\t\t\tnested_comments = nest (com_index, p.replies)\t\t\t\r\n\t\t\tself.render(\"post.html\", msg = p, com_flow = nested_comments, owner = owner)\r\n\t\t\r\n\r\n\r\n\tdef post (self, owner, post_id, comment_id): #добавляем комментарий\r\n\t\tif self.user and self.user.check_power('comment_post'):\t\t\t\r\n\t\t\tself.add_reply (post_id, comment_id, owner)\t\t\t\t\t\r\n\t\t\tself.redirect('/'+owner+app_path['blog']+'/'+post_id)\r\n\t\telse:\r\n\t\t\tself.redirect(app_path['login'])\r\n\r\nclass Signup(MainHandler):\r\n\t\"\"\"Модель для страницы регистрации\"\"\"\r\n\tdef get(self):\r\n\t\tself.render(\"signup.html\")\r\n\r\n\tdef post(self):\r\n\t\thave_error = False\r\n\t\tself.username = self.request.get('username')\r\n\t\tself.password = self.request.get('password')\r\n\t\tself.verify = self.request.get('verify')\r\n\t\tself.email = self.request.get('email')\r\n\r\n\t\tparams = dict(username = self.username,\r\n\t\t\t\t\t email = self.email) #cохраняем параметры для передачи обратно в форму в случае ошибки\r\n\r\n\t\tif not valid_username(self.username):\r\n\t\t\tparams['error_username'] = True\r\n\t\t\thave_error = True\r\n\r\n\t\tif not valid_password(self.password):\r\n\t\t\tparams['error_password'] = True\r\n\t\t\thave_error = True\r\n\r\n\t\telif self.password != self.verify:\r\n\t\t\tparams['error_verify'] = True\r\n\t\t\thave_error = True\r\n\r\n\t\tif not valid_email(self.email):\r\n\t\t\tparams['error_email'] = True\t\t\t\r\n\t\t\thave_error = True\r\n\r\n\t\tif have_error:\r\n\t\t\tself.render('signup.html', **params)\r\n\t\telse:\r\n\t\t\tself.done()\r\n\r\n\tdef done(self):\r\n\t\t#проверяем что такой пользователь не существует\r\n\t\tu = User.by_name(self.username)\r\n\t\tif u:\r\n\t\t\tmsg = u\"Пользователь с таким именем уже есть.\"\r\n\t\t\tself.render('signup.html', error_username = msg)\r\n\t\telse:\t\t\t\r\n\t\t\tu = User.register(self.username, self.password, self.email)\r\n\t\t\tu.put()\r\n\r\n\t\t\tself.login(u)\r\n\t\t\tself.redirect(app_path['main'])\r\n\r\nclass Login(MainHandler):\r\n\t\"\"\"Модель для страницы входа\"\"\"\r\n\tdef get(self):\r\n\t\tself.render('login.html')\r\n\r\n\tdef post(self):\r\n\t\tusername = self.request.get('username')\r\n\t\tpassword = self.request.get('password')\r\n\r\n\t\tu = User.check_user(username, password)\r\n\t\tif u:\r\n\t\t\tself.login(u)\r\n\t\t\tself.redirect(app_path['main'])\r\n\t\telse:\r\n\t\t\terror = u\"Имя пользователя или пароль введены не верно.\"\r\n\t\t\tself.render ('login.html', error = error)\r\n\r\n\r\nclass Logout(MainHandler):\r\n\t\"\"\"Модель для страницы выхода\"\"\"\r\n\tdef get(self):\r\n\r\n\t\tself.logout()\r\n\t\tself.redirect(app_path['main'])\r\n\r\n\r\nclass AjaxHandler(PostHandler):\r\n\r\n\tdef post(self, case, owner, post_id):\r\n\r\n\t\tif self.user:\r\n\t\t\tif case == 'addreply':\t\t\t\r\n\t\t\t\ttribe_id = self.request.get('ancestors') #список id предков\t\t\t\r\n\t\t\t\tif self.user.check_power('comment_post'):\r\n\t\t\t\t\ttext = self.request.get(\"content\")\t\t\t\t\r\n\t\t\t\t\tc = self.add_reply (owner, post_id, tribe_id)\r\n\t\t\t\t\tself.render('reply.html', com = c, nest_level = len(re.split(',',tribe_id)))\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.render('reply.html', com = \"Error\", nest_level = len(re.split(',',tribe_id)))\r\n\t\t\t\t#self.write('Hello from server! We get and save: '+text)\r\n\t\t\telse:\r\n\t\t\t\t\tself.render('reply.html', com = \"Error\", nest_level = len(re.split(',',tribe_id)))\r\n\r\n\r\nclass Profile(MainHandler):\r\n\r\n\tdef get(self):\r\n\t\t#вывод странички с данными пользователя\r\n\t\tpass\r\n\r\nclass Maintance (MainHandler):\r\n\r\n\tdef get(self):\r\n\t\t\r\n\t\tif self.user and self.user.power == 'admin':\r\n\t\t\tusername = self.request.get('username')\r\n\t\t\tpower = self.request.get('power')\r\n\t\t\tif power and username:\r\n\t\t\t\tuser = User.by_name(username)\r\n\t\t\t\tuser.set_power(power)\t\t\t\r\n\t\t\t\toutput = u\"

Пользователь %s включен в группу %s

\" % (user.name, user.power)\r\n\t\t\t\t\r\n\t\t\telse:\r\n\t\t\t\toutput = u\"

Ничего

\"\r\n\r\n\t\t\tself.render('mnt.html', output = output)\r\n\r\n\t\t####\r\n\t\t#pass\r\n\t\telse:\r\n\t\t\tself.redirect(app_path['main'])\r\n\r\n\t\t\r\n\r\n\r\n\t\t# posts = Post.all()\r\n\t\t# output = ''\r\n\t\t# temp = ''\r\n\t\t# for p in posts:\t\t\t\r\n\t\t# \toutput += u\"
Пост #\"+str(p.key().id())+\"
\"\r\n\t\t# \tcom_flow = Comment.all().ancestor(p)\t\t\t\r\n\r\n\t\t# \tfor com in com_flow:\r\n\t\t# \t\toutput += u\"
Комментарий #\"+str(com.key().id())+\"
\"\r\n\t\t# \t\tdescendants = Comment.all().ancestor(com)\r\n\t\t# \t\tcom.replies = []\r\n\t\t# \t\tfor d in descendants:\r\n\t\t# \t\t\toutput += u\"
Комментарий #\"+str(com.key().id())+\"
\"\r\n\t\t# \t\t\tif com.key() == d.parent_key() :\t\t\t\t\t\t\r\n\t\t# \t\t\t\tcom.replies.append(d.key())\r\n\t\t# \t\t\t\ttemp = com.key()\r\n\t\t\t\t\t\t\r\n\t\t# \t\t\tcom.put()\r\n\r\n\t\t# t = Comment.get(temp)\r\n\t\t# rr = Comment.get(t.replies[0])\r\n\t\t# logging.error(rr.author)\r\n\r\n\r\n\r\n\t\t\r\n\r\n\r\n\t\r\nlogging.getLogger().setLevel(logging.DEBUG)\r\napp = webapp2.WSGIApplication([(app_path['main'], Blog)\r\n\t\t\t\t\t\t\t\t,('/([\\w-]{3,20})' +app_path['blog']+'/*', Blog)\r\n\t\t\t\t\t\t\t\t,('/([\\w-]{3,20})' +app_path['blog']+'/page/([0-9]+)/*', Blog)\r\n\t\t\t\t\t\t\t\t,('/([\\w-]{3,20})'+app_path['blog']+'/([0-9]+)/*(.+)*', PostHandler)\t\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t\t,('/([\\w-]{3,20})' +app_path['profile']+'/*', Profile)\r\n\t\t\t\t\t\t\t\t,(app_path['signup']+'/*',Signup)\r\n\t\t\t\t\t\t\t\t,(app_path['login']+'/*', Login)\t\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t\t,(app_path['logout']+'/*', Logout)\r\n\t\t\t\t\t\t\t\t,(app_path['ajax']+'/(.+)/([\\w-]{3,20})/([0-9]+)', AjaxHandler)\r\n\t\t\t\t\t\t\t\t,('/mnt', Maintance)\r\n\t\t\t\t\t\t\t\t],\r\n\t\t\t\t\t\t\t debug=True)\r\n\r\n\r\n\r\n\r\n ","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":22108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"281130139","text":"# Main file that creates a number of important objects throughout the game.\n\nimport pygame\nfrom pygame.sprite import Group\nfrom settings import Settings # imports settings into the main program\nfrom game_stats import GameStats\nfrom scoreboard import Scoreboard\nfrom button import Button\n\nfrom ship import Ship\nimport game_functions as gf\n\n\ndef run_game():\n \"\"\"Initialize pygame, settings and screen object.\"\"\"\n pygame.init()\n\n ai_settings = Settings() # Settings are stored in ai_settings\n screen = pygame.display.set_mode(\n (ai_settings.screen_width, ai_settings.screen_height)) # Stores the display surface as a (Tuple)!\n pygame.display.set_caption(\"Alien Invasion\")\n\n # Make a Play button\n play_button = Button(ai_settings, screen, \"Play\")\n\n # Create and instance to store game statistics and create a scoreboard\n stats = GameStats(ai_settings)\n sb = Scoreboard(ai_settings, screen, stats)\n\n # Make a ship, a group of bullets, and a group of aliens\n ship = Ship(ai_settings, screen) # Creates the ship instance\n bullets = Group()\n aliens = Group()\n\n # Create a fleet of aliens\n gf.create_fleet(ai_settings, screen, ship, aliens)\n\n while True: # Start the main loop for the game!\n\n gf.check_events(ai_settings, screen, stats, sb, play_button, ship, aliens, bullets)\n\n if stats.game_active:\n ship.update()\n gf.update_bullets(ai_settings, screen, stats, sb, ship, aliens, bullets)\n gf.update_aliens(ai_settings, stats, sb, screen, ship, aliens, bullets)\n\n gf.update_screen(ai_settings, screen, stats, sb, ship, aliens, bullets, play_button)\n\n\nrun_game()\n","sub_path":"Game window.py","file_name":"Game window.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"359949241","text":"import numpy as np \nimport math \nclass Node(object):\n def __init__(self,data):\n self.left = None \n self.right = None \n self.value = data \n \nclass SNode(object):\n def __init__(self,x):\n self.node = x \n self.tag = 1\n\n\nclass Tree(object):\n def __init__(self,data):\n self.List = [None for i in range(len(data))]\n self.node = None\n for i in range(len(data)):\n if i == 0:\n self.List[i] = Node(data[i])\n self.node = self.List[i]\n\n elif (i+1)%2 == 0:\n self.List[i] = Node(data[i])\n self.List[(i-1)//2].left = self.List[i]\n\n elif i%2 == 0 :\n self.List[i]= Node(data[i])\n self.List[i//2-1].right = self.List[i]\n \n def qx(self):\n stack = [SNode(self.node)]\n top = 0\n a = SNode(self.node)\n \n while True:\n while(a.node.left!=None):\n top=top+1\n a = SNode(a.node.left)\n stack.append(a)\n # print(a.node.value) \n \n if(top!=-1 and stack[top].tag == 2):\n top = top - 1\n stack.pop()\n \n if(top!=-1 and stack[top].tag==1):\n if stack[top].node.right!=None:\n print(stack[top].node.value)\n stack[top].tag=2\n a = SNode(stack[top].node.right)\n stack.append(a)\n top = top+1\n #print(\"gg\")\n #print(a.node.value)\n else :\n stack[top].tag=2\n print(stack[top].node.value)\n \n if top==-1:\n break\n\n\n\n \n \n\n\n\n\n\n\n\na = Tree([1,2,3,4,5,6,7])\n\na.qx()\n\n\n\n\n \n \n \n","sub_path":"h.py","file_name":"h.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"466200379","text":"#coding=utf-8\n\nimport datetime\nfrom .. import db\nfrom ..models import Record\n\n\ndef record_sql(user, status, table, table_id, item, value):\n '''记录cmdb操作记录'''\n date = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n record = Record(\n username=user,\n status=status,\n table=table,\n table_id=table_id,\n item=item,\n value=value,\n date=date,\n )\n db.session.add(record)\n","sub_path":"idcms_note/Web/flask-web/app/utils/record.py","file_name":"record.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"52513336","text":"#!/usr/bin/env python\n\"\"\"\nSetups a protein database in MySQL: a database of interesting properties of the proteins based on scripts of this library.\n\nThis should be easy to use script for invoking the most important scripts of the library and store them in DB\nfor easy retrieve.\n\nHow to use:\nCreate a folder and place there some file with list of PDBs to analyze.\nThe program will create the following directory structure in the same directory:\n ./pdbs/ - list of pdbs downloaded\n ./results/ - results of the analysis scripts\n\"\"\"\nfrom __future__ import print_function\nimport argparse\nimport os\nimport subprocess\nimport sys\nimport pkg_resources\nimport requests\nfrom pyPPI import DBConfig\n\nimport pyPPI.surfaceComplementarity.VDW as VDW\nimport pyPPI.surfaceComplementarity.interfaceDepth as Periphery\nfrom pyPPI.ASA import ASA\nfrom pyPPI.hbonds import hbonds\nfrom pyPPI.kdtree import KDTree\nimport pyPPI.pdbReader as pdbReader\nfrom pyPPI.pdbReader import PDBReader\nimport pyPPI.electrostat as electrostat\nfrom pyPPI.cavities import calculateVolume\n\n\n\"\"\"\nDistance in angtroms between the chains that is relevant for defining the interface\n\"\"\"\nINTERFACE_DISTANCE = 4\nWORKING_DIRECTORY = './'\nPDBS_DIR = \"./pdbs/\"\nRESULTS_DIR = \"./results/\"\n\n_remediator = pkg_resources.resource_filename('pyPPI', '/'.join(['molprobity', 'remediator.pl']))\n_reduce_path = pkg_resources.resource_filename('pyPPI', '/'.join(['molprobity', 'reduce']))\n\ndef download_PDB(pdb):\n \"\"\"\n Downloads a PDB from protein data base\n :param pdb: pdb identifier\n \"\"\"\n url = 'http://www.rcsb.org/pdb/files/{0}.pdb'.format(pdb)\n print('downloading %s (%s)' % (pdb, url))\n\n req = requests.get(url)\n with get_file(pdb) as newPDB:\n print(req.text, file=newPDB)\n\n\ndef get_file(name):\n \"\"\"\n Get file for write in the PDBS_DIR\n :param name:\n :return:\n \"\"\"\n global PDBS_DIR\n return open(os.path.join(PDBS_DIR, name + \".pdb\"), \"w\")\n\n\ndef download_DB(pdbList):\n \"\"\"\n Downloads PDB and add hydrogens using molprobity\n :param pdbList: list of pdbs to download\n \"\"\"\n print(\"Downloading pdbs according to list\")\n for pdb in pdbList:\n # don't download twice the same PDB\n if os.path.exists(os.path.join(PDBS_DIR, pdb + \"_FH.pdb\")): continue\n\n # in case the PDB is already in the directory\n if not os.path.exists(os.path.join(PDBS_DIR, pdb + \".pdb\")):\n download_PDB(pdb)\n\n molprobity(pdb)\n print(\"Finished downloading pdbs\")\n\n\ndef molprobity(pdb_name):\n \"\"\"\n runs molprobility on a input protein\n :param pdb_name: name of the PDB file\n :return:\n \"\"\"\n global MOLPROBITY_DIR, PDBS_DIR\n if os.path.exists(os.path.join(PDBS_DIR, pdb_name + \"_FH.pdb\")):\n return True # already exist\n print('Starting molprobity %s' % pdb_name)\n subprocess.check_output('perl ' + _remediator + ' ' + os.path.join(PDBS_DIR,\n pdb_name + \".pdb\") + ' > a',\n shell=True)\n try:\n subprocess.check_output(_reduce_path + ' a > b', shell=True)\n except:\n print('error prasing PDB %s' % pdb_name)\n pass # yakky kaky, but reduce returns 1 exit\n subprocess.check_output(\n 'perl ' + _remediator +' b -oldout> ' + os.path.join(PDBS_DIR, pdb_name + \"_FH.pdb\"),\n shell=True)\n # delete the PDB file - we will work with a file with hydrogens added (_FH create above)\n os.remove(os.path.join(PDBS_DIR, pdb_name + \".pdb\"))\n\n\ndef buildASAperAtomForComplex(pdb, result):\n asaCalc = ASA(pdb)\n asaCalc.execute()\n for atom, asa in asaCalc.interPerAtom.items():\n # complex inter\n res = [pdb.name, atom.chain, atom.residue, atom.resId, atom.symbol, atom.atomType, asa, atom.tempFactor, 0]\n print(','.join([str(a) for a in res]), file=result)\n # complex intra (separated)\n asa = asaCalc.diffASAperAtom[atom] + asa\n res = [pdb.name, atom.chain, atom.residue, atom.resId, atom.symbol, atom.atomType, asa, atom.tempFactor, 1]\n print(','.join([str(a) for a in res]), file=result)\n\n\ndef calcInterfaceDist(pdb, result):\n \"\"\"\n Defines interface by distance\n \"\"\"\n global INTERFACE_DISTANCE\n partA = [a for a in pdb.atoms if a.chain in pdb.interfaceParts[0]]\n partB = [a for a in pdb.atoms if a.chain in pdb.interfaceParts[1]]\n if len(partA) == 0 or len(partB) == 0:\n print('WARNING: %s doesnt have atoms in one its chains' % pdb.name)\n return\n aTree = KDTree.construct_from_data(partA[:])\n bTree = KDTree.construct_from_data(partB[:])\n complexChains = ':'.join(pdb.interfaceParts)\n for part, tree in [(partA, bTree), (partB, aTree)]:\n for atom in part:\n near, dist = tree.findNearest(query_point=atom.coord, num=1)\n if dist < INTERFACE_DISTANCE:\n print(','.join(\n [pdb.name, complexChains, atom.chain, str(atom.resId), atom.symbol, atom.atomType, str(dist)]),\n file=result)\n\n\ndef createInterfaceCSV(pdbsToAnalyze):\n \"\"\"\n interface can be defined by either ASA or distance\n we use both of them\n \"\"\"\n global PDBS_DIR, RESULTS_DIR\n if all(os.path.exists(os.path.join(RESULTS_DIR, resFile)) for resFile in ['PerAtomASA.csv', 'PerAtomASA.csv']):\n print('Data already exist in result directory.')\n return\n\n with open(os.path.join(RESULTS_DIR, 'PerAtomASA.csv'), 'w') as asaPerAtom:\n with open(os.path.join(RESULTS_DIR, 'PerAtomDistance.csv'), 'w') as distancePerAtom:\n pdbs = os.listdir(PDBS_DIR)\n print('PDB,Chains,Chain,ResId,Symbol,Atom,MinDistance', file=distancePerAtom)\n print('PDB,Chain,Residue,ResId,Symbol,AtomType,ASA,tempFactor,Seperated', file=asaPerAtom)\n failedPDBs = []\n pdbsNamesToChains = dict((p[0], p[1].split(':') if len(p) > 1 else None) for p in pdbsToAnalyze)\n for pdbName in pdbs:\n if pdbName[0:4] not in pdbsNamesToChains: continue\n pdb = PDBReader.readFile(os.path.join(PDBS_DIR, pdbName), pdbsNamesToChains[pdbName[0:4]])\n try:\n print('Writing ASA for %s' % pdb.name)\n buildASAperAtomForComplex(pdb, asaPerAtom)\n print('Writing distance for %s' % pdb.name)\n calcInterfaceDist(pdb, distancePerAtom)\n except IndexError:\n failedPDBs.append(pdb.name)\n\n print('Finished')\n if len(failedPDBs) > 0:\n print('Failed to process:', ','.join(failedPDBs))\n\n\ndef createDataBase(pdbsToAnalyzeWithChains):\n \"\"\"Loads teh computations to a new database\n :param pdbsToAnalyzeWithChains:\n \"\"\"\n print('Creating DB: %s' % DBConfig.DB_NAME)\n\n installDB = pkg_resources.resource_filename('pyPPI', '/'.join(['sqls', 'createDB.sql']))\n metadataDB = pkg_resources.resource_filename('pyPPI', '/'.join(['sqls', 'donors2.sql']))\n createInterfaceSql = pkg_resources.resource_filename('pyPPI', '/'.join(['sqls', 'createInterface.sql']))\n\n subprocess.call(\n \"mysql -u %s -p%s -e 'create database if not exists %s'\" % (DBConfig.USER, DBConfig.PASSWD, DBConfig.DB_NAME),\n shell=True)\n # create schema\n subprocess.call('mysql %s -u%s -p%s < %s ' % (DBConfig.DB_NAME, DBConfig.USER, DBConfig.PASSWD, installDB),\n shell=True)\n # insert metadata\n subprocess.call('mysql %s -u%s -p%s < %s ' % (DBConfig.DB_NAME, DBConfig.USER, DBConfig.PASSWD, metadataDB),\n shell=True)\n conn = DBConfig.get_connection()\n cursor = conn.cursor()\n cursor.execute('''\n load data local infile '%s' into table interfaceDist fields terminated by ',' optionally enclosed by '\"' lines terminated by '\\n' ignore 1 lines (PDB,Chains,Chain,ResId,Symbol,Atom,MinDist);\n ''' % (os.path.join(RESULTS_DIR, 'PerAtomDistance.csv')))\n cursor.execute('''\n load data local infile '%s' into table perAtomASA fields terminated by ',' optionally enclosed by '\"' lines terminated by '\\n' ignore 1 lines (PDB,Chain,Residue,ResId,Symbol,Atom,ASA,Bfactor,Seperated);\n ''' % (os.path.join(RESULTS_DIR, 'PerAtomASA.csv')))\n conn.commit()\n\n # create interface table\n subprocess.call('mysql %s -u%s -p%s < %s ' % (DBConfig.DB_NAME, DBConfig.USER, DBConfig.PASSWD, createInterfaceSql),\n shell=True)\n\n # add metadata table with complexs in the database\n pdbsNamesToChains = dict((p[0], p[1].split(':') if len(p) > 1 else None) for p in pdbsToAnalyzeWithChains)\n dataToInsert = []\n for pdbName, chains in pdbsNamesToChains.items():\n pdb = PDBReader.readFile(os.path.join(PDBS_DIR, '%s_FH.pdb' % pdbName), pdbsNamesToChains[pdbName[0:4]])\n if chains is None:\n compunds = pdb.compunds.split(' - ')\n dataToInsert.append((pdbName, pdb.interfaceParts[0], compunds[0] if len(compunds) > 1 else compunds,\n pdb.interfaceParts[1], compunds[1] if len(compunds) > 1 else ''))\n else:\n dataToInsert.append((pdbName, pdb.interfaceParts[0], '', pdb.interfaceParts[1], ''))\n\n cursor = conn.cursor()\n cursor.executemany('''\n INSERT INTO proteinComplex (PDB,UnboundChainA,NameA,UnboundChainB,NameB)\n values (%s,%s,%s,%s,%s)\n ''', dataToInsert)\n conn.commit()\n conn.close()\n print('database created!')\n\n\ndef getInterfaceAtoms(cur, pdb):\n \"\"\"\n Gets interface atoms from database\n :param cur: cursor to database\n :param pdb: pdb object to get atoms from\n :return: list of interface atoms\n \"\"\"\n cur.execute('''\n select Chain,ResId,Symbol from NinterfaceAtoms\n where PDB='%s'\n ''' % pdb.name)\n interfaceAtoms = []\n for chain, resid, symbol in cur.fetchall():\n interfaceAtoms.append(\n next(a for a in pdb.atoms if a.chain == chain and a.resId == resid and a.symbol == symbol))\n return interfaceAtoms\n\n\ndef fillInterfacePeriphrial(pdbsToAnalyze):\n global PDBS_DIR, RESULTS_DIR\n\n if os.path.exists(os.path.join(RESULTS_DIR, 'interfacePeriphrial.csv')):\n print('Data already exist in result directory for interface periphery.')\n return\n\n pdbsNamesToChains = dict((p[0], p[1].split(':') if len(p) > 1 else None) for p in pdbsToAnalyze)\n\n with open(os.path.join(RESULTS_DIR, 'interfacePeriphrial.csv'), 'w') as interfacePeriphrial:\n print('PDB,Chain,ResId,Symbol,Peripherial,PropPeri', file=interfacePeriphrial)\n for pdbName, chains in pdbsNamesToChains.items():\n print('Calculating peripheral table for %s ' % pdbName)\n pdb_path = os.path.join(PDBS_DIR, '%s_FH.pdb' % pdbName)\n depthL, peripherialL = Periphery.calc_peripheral_PDB(pdb_path, chains)\n for atom, peri, propPeri in peripherialL:\n print(','.join([pdbName, atom.chain, str(atom.resId), atom.symbol, str(peri), str(propPeri)]),\n file=interfacePeriphrial)\n\n conn = DBConfig.get_connection()\n cursor = conn.cursor()\n cursor.execute('''\n load data local infile '%s' into table interfacePeriphrial\n fields terminated by ',' optionally enclosed by '\"' lines terminated by '\\n'\n ignore 1 lines (PDB,Chain,ResId,Symbol,Peri,PropPeri);\n ''' % (os.path.join(RESULTS_DIR, 'interfacePeriphrial.csv')))\n conn.commit()\n conn.close()\n\n\ndef calcEnergyTerms(pdbsToAnalyze):\n \"\"\"\n Finds hydrogen bonds near interface atoms and calculates their energy,\n and calculates VDW and electrostatic energy for PDB\n \"\"\"\n global PDBS_DIR, RESULTS_DIR\n\n if all(os.path.exists(os.path.join(RESULTS_DIR, resFile)) for resFile in ['Ndrieding.csv', 'interfaceVDW.csv']):\n print('Data already exist in result directory for energy terms.')\n return\n\n conn = DBConfig.get_connection()\n cursor = conn.cursor()\n pdbsNamesToChains = dict((p[0], p[1].split(':') if len(p) > 1 else None) for p in pdbsToAnalyze)\n with open(os.path.join(RESULTS_DIR, 'Ndrieding.csv'), 'w') as driedingResult:\n print('PDB,DonorChain,DonorResId,DonorSymbol,AccChain,AccResId,AccSymbol,Energy', file=driedingResult)\n pdbs = os.listdir(PDBS_DIR)\n for pdbName in pdbs:\n if pdbName[0:4] not in pdbsNamesToChains: continue\n pdb = PDBReader.readFile(os.path.join(PDBS_DIR, pdbName), pdbsNamesToChains[pdbName[0:4]])\n interfaceAtoms = getInterfaceAtoms(cursor, pdb)\n bonds = hbonds(pdb)\n bonds.HDPlusDefinition = False\n cBondList = bonds.hbonds(interfaceAtoms)\n print('Calcing Hbonds for %s' % pdb.name)\n for donor, acceptor, eng in cBondList:\n toPrint = [pdb.name, donor.chain, donor.resId, donor.symbol, acceptor.chain, acceptor.resId,\n acceptor.symbol, eng]\n print(','.join([str(a) for a in toPrint]), file=driedingResult)\n cursor.execute('''\n load data local infile '%s' into table Ndrieding\n fields terminated by ',' optionally enclosed by '\"' lines terminated by '\\n'\n ignore 1 lines (PDB,DonorChain,DonorResId,DonorSymbol,AccChain,AccResId,AccSymbol,Energy);\n ''' % (os.path.join(RESULTS_DIR, 'Ndrieding.csv')))\n conn.commit()\n\n print('Calculating VDW energy between interfaces')\n with open(os.path.join(RESULTS_DIR, 'interfaceVDW.csv'), 'w') as vdw_result:\n print('PDB,VDV,VDVx,clashV,clashS', file=vdw_result)\n for pdb, chains in pdbsNamesToChains.items():\n print('Calcing VDW for %s' % pdb)\n pdb_path = os.path.join(PDBS_DIR, '%s_FH.pdb' % pdb)\n sumVDW, sumVDWx, clashV, clashS = VDW.calcCompl(pdb_path, chains)\n print(','.join([pdb, str(sumVDW), str(sumVDWx), str(clashV), str(clashS)]), file=vdw_result)\n cursor.execute('''\n load data local infile '%s' into table interfaceVDW\n fields terminated by ',' optionally enclosed by '\"' lines terminated by '\\n'\n ignore 1 lines (PDB,VDV,VDVx6,ClashV,ClashS);\n ''' % (os.path.join(RESULTS_DIR, 'interfaceVDW.csv')))\n conn.commit()\n\n print('Calculating electrostatic charges (Coulomb of paired charges except hydrogen bonds)')\n with open(os.path.join(RESULTS_DIR, 'electrostatic.csv'), 'w') as electro_res:\n print('PDB,eCoulomb,pp,mm,pm', file=electro_res)\n for pdb, chains in pdbsNamesToChains.items():\n pdb_path = os.path.join(PDBS_DIR, '%s_FH.pdb' % pdb)\n pdb = PDBReader.readFile(pdb_path, chains)\n interfaceAtoms = getInterfaceAtoms(cursor, pdb)\n\n e, pp, mm, pm = electrostat.calcElectrostatic(pdb, interfaceAtoms)\n print('%s,%f,%i,%i,%i' % (pdb.name, e, pp, mm, pm), file=electro_res)\n cursor.execute('''\n load data local infile '%s' into table electrostat\n fields terminated by ',' optionally enclosed by '\"' lines terminated by '\\n'\n ignore 1 lines (PDB,electro,pp,mm,pm);\n ''' % (os.path.join(RESULTS_DIR, 'electrostatic.csv')))\n conn.commit()\n\n print('Calculating electrostatic charges contacts with hydrophobic residues')\n with open(os.path.join(RESULTS_DIR, 'electrostatic-hydrophobic.csv'), 'w') as electro_hydro_res:\n print('PDB,positive-hydrophbic,negative-hydrophobic', file=electro_hydro_res)\n for pdb, chains in pdbsNamesToChains.items():\n pdb_path = os.path.join(PDBS_DIR, '%s_FH.pdb' % pdb)\n pdb = PDBReader.readFile(pdb_path, chains)\n interfaceAtoms = getInterfaceAtoms(cursor, pdb)\n\n pos, neg = electrostat.calcElectroHydrophobic(pdb, interfaceAtoms)\n print('%s,%i,%i' % (pdb.name, pos, neg), file=electro_hydro_res)\n\n print('Approximating cavities/gaps volume by monte carlo')\n with open(os.path.join(RESULTS_DIR, 'cavity_vol.csv'), 'w') as cavity_res:\n print('PDB,cavity_vol', file=cavity_res)\n for pdbName in pdbs:\n if pdbName[0:4] not in pdbsNamesToChains: continue\n pdb = PDBReader.readFile(os.path.join(PDBS_DIR, pdbName), pdbsNamesToChains[pdbName[0:4]])\n interfaceAtoms = getInterfaceAtoms(cursor, pdb)\n cavities_vol_approx = calculateVolume(pdb, interfaceAtoms)\n print('%s,%f' % (pdb.name, cavities_vol_approx), file=cavity_res)\n\n cursor.close()\n conn.close()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Setup/download protein database based on PDB\")\n parser.add_argument(\"pdbList\", help=\"A file with a list of PDB to download\")\n parser.add_argument(\"--folder\", help=\"Name of the folder to contain downloaded files\")\n parser.add_argument(\"--dbName\", help=\"Name of the database to create.\")\n args = parser.parse_args()\n if args.pdbList is None:\n sys.exit(\"Please provide a file with list of PDBs to anaylze\")\n\n WORKING_DIRECTORY = args.folder if args.folder is not None else os.path.dirname(os.path.abspath(args.pdbList))\n print('WORKING DIR: %s' % WORKING_DIRECTORY)\n\n PDBS_DIR = os.path.join(WORKING_DIRECTORY, 'pdbs')\n pdbReader.PDBS_DIR = PDBS_DIR\n RESULTS_DIR = os.path.join(WORKING_DIRECTORY, 'results')\n for dir in [PDBS_DIR, RESULTS_DIR]:\n if not os.path.exists(dir):\n os.mkdir(dir)\n\n pdbsToAnalyzeWithChains = [pdb.strip().upper().split(\"_\") for pdb in open(args.pdbList, 'r') if\n pdb[0:1] != '#'] # todo: add treatment for chains specificatin instad of [0:4]\n pdbsToAnalyze = [pdb[0] for pdb in pdbsToAnalyzeWithChains]\n download_DB(pdbsToAnalyze) # download from PDB bank and add hydrogens\n createInterfaceCSV(pdbsToAnalyzeWithChains) # define interface by distance and by asa\n print('''The script will now create DB. DB is required for extra calculations\n including VDW and hydrogen bonds\n ''')\n try:\n if args.dbName:\n DBConfig.DB_NAME = args.dbName\n DBConfig.init_connection()\n createDataBase(pdbsToAnalyzeWithChains)\n\n # post database creation scripts\n fillInterfacePeriphrial(pdbsToAnalyzeWithChains)\n calcEnergyTerms(pdbsToAnalyzeWithChains)\n\n except KeyboardInterrupt:\n print('DB will not be created. Use ./results table to see the results')\n","sub_path":"bin/setupPpiDb.py","file_name":"setupPpiDb.py","file_ext":"py","file_size_in_byte":18276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"617389112","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport io\nimport logging \nfrom ontology import Ontology\nfrom material import Resource\nfrom command import Command\nfrom queue import Job, Task, Scanner\nfrom queue import ResourceTask\nfrom error import *\n\nclass FlowcellJob(Job):\n def __init__(self, queue, node):\n Job.__init__(self, queue, node)\n self.log = logging.getLogger('Bio')\n\n def load(self):\n Job.load(self)\n scanner = Scanner(self.env, self.ontology)\n if scanner.ignored:\n self.node['ignored'].extend(scanner.ignored)\n if scanner.results:\n count = 0\n for location in scanner.results:\n if location['media kind'] == 50:\n if (location['inode type'] == 'directory' and location['kind'] in [ 'ihrf', 'imrf', 'inrf' ]):\n if self.action == 'basecall':\n if 'lane count' in location:\n if self.ontology['basecall implementation'] == 'picard':\n for index in range(location['lane count']):\n lane = Ontology.clone(location)\n lane['lane number'] = index + 1\n self.push(BasecallTask(self, self.ontology, lane))\n count += 1\n elif self.ontology['basecall implementation'] == 'bcl2fastq':\n self.push(BasecallTask(self, self.ontology, location))\n count += 1\n else:\n # for now we use the presence of lane count as a signal that the rule to identify the \n # flowcell model has been triggered. If lane count is missing report an error about the \n # illumina flowcell possibly being novel.\n self.log.error('potentially unknown illumina flowcell model %s', location['illumina flowcell id'])\n\n elif self.action in ['sav', 'implode']:\n self.push(FlowcellTask(self, self.ontology, location))\n count += 1\n\n elif (location['inode type'] == 'file' and location['kind'] in [ 'ihrz', 'imrz', 'inrz' ]):\n if self.action in [ 'explode' ]:\n self.push(FlowcellTask(self, self.ontology, location))\n count += 1\n\n self.log.debug('%d %s tasks queued in job %s', count, self.action, self.uuid)\n\nclass FlowcellTask(ResourceTask):\n def __init__(self, job, ontology, location):\n ResourceTask.__init__(self, job, ontology, location)\n self.log = logging.getLogger('Bio')\n\n def extract_interop(self):\n def find_run_parameters_file(base):\n path = os.path.join(base, 'runParameters.xml')\n if not os.path.exists(path):\n path = os.path.join(base, 'RunParameters.xml')\n if not os.path.exists(path):\n path = None\n return path\n\n if self.resource.node is not None:\n tar = Command('tar', self.context, self.ontology)\n rsync = Command('rsync', self.context)\n if tar.valid:\n if rsync.valid:\n objective = os.path.join(tar.ontology['work directory'], self.resource.location['illumina flowcell id'])\n try:\n self.env.prepare_directory(objective)\n except (NoOverwriteError, PermissionDeniedError) as e:\n self.abort(str(e))\n else:\n interop_path = os.path.join(self.resource.path, 'InterOp')\n rsync.ontology['recursive rsync'] = True\n rsync.ontology['positional'] = [ \n interop_path,\n os.path.join(self.resource.path, 'RunInfo.xml'),\n find_run_parameters_file(self.resource.path),\n objective\n ]\n rsync.execute()\n\n override = {\n 'extension': 'tar',\n 'inode type': 'file',\n 'compression': tar.ontology['compression']\n }\n if self.resource.location['kind'] == 'ihrf':\n override['kind'] = 'ihsz'\n elif self.resource.location['kind'] == 'inrf':\n override['kind'] = 'insz'\n elif self.resource.location['kind'] == 'imrf':\n override['kind'] = 'imsz'\n product = self.produce(self.resource.origin, override)\n try:\n self.env.prepare_to_write_to_path(product.path, self.ontology['overwrite'])\n except (NoOverwriteError, PermissionDeniedError) as e:\n self.abort(str(e))\n else:\n tar.cwd = tar.ontology['work directory']\n tar.ontology['tar create'] = True\n tar.ontology['tar file'] = product.path\n tar.ontology['positional'] = [ self.resource.location['illumina flowcell id'] ]\n self.log.debug('compress {} --> {}'.format(self.resource.location['illumina flowcell id'], product.path))\n tar.execute()\n else:\n self.abort('command {} is invalid'.format(rsync.name))\n else:\n self.abort('command {} is invalid'.format(tar.name))\n\n def tar_flowcell(self):\n if self.resource.node is not None:\n if (self.resource.location['media kind'] == 50 and \n self.resource.location['inode type'] == 'directory'):\n\n tar = Command('tar', self.context, self.ontology)\n if tar.valid:\n override = { 'extension': 'tar', 'inode type': 'file', 'compression': tar.ontology['compression'] }\n if self.resource.location['kind'] == 'ihrf': override['kind'] = 'ihrz'\n elif self.resource.location['kind'] == 'imrf': override['kind'] = 'imrz'\n elif self.resource.location['kind'] == 'inrf': override['kind'] = 'inrz'\n\n product = self.produce(self.resource.origin, override)\n try:\n self.env.prepare_to_write_to_path(product.path, self.ontology['overwrite'])\n except (NoOverwriteError, PermissionDeniedError) as e:\n self.abort(str(e))\n else:\n tar.cwd = self.resource.location['dirname']\n tar.ontology['tar create'] = True\n tar.ontology['tar file'] = product.path\n tar.ontology['positional'] = [ self.resource.location['basename'] ]\n self.log.debug('compress {} --> {}'.format(self.resource.path, product.path))\n tar.execute()\n else:\n self.abort('command {} is invalid'.format(tar.name))\n else:\n self.abort('only compressing flowcell run directory supported')\n else:\n self.abort('could not crawl resource metadata')\n\n def untar_flowcell(self):\n if self.resource.node is not None:\n if (self.resource.location['media kind'] == 50 and \n self.resource.location['inode type'] == 'file' and \n self.resource.location['extension'] == 'tar'):\n\n tar = Command('tar', self.context, self.ontology)\n if 'compression' not in self.ontology:\n tar.ontology['compression'] = self.resource.location['compression']\n\n if tar.valid:\n override = { 'extension': None, 'compression': None, 'inode type': 'directory' }\n if self.resource.location['kind'] == 'ihrz':\n override['kind'] = 'ihrf'\n elif self.resource.location['kind'] == 'imrz':\n override['kind'] = 'imrf'\n elif self.resource.location['kind'] == 'inrz':\n override['kind'] = 'inrf'\n product = self.produce(self.resource.origin, override)\n try:\n self.env.prepare_to_write_to_path(product.path, self.ontology['overwrite'])\n except (NoOverwriteError, PermissionDeniedError) as e:\n self.abort(str(e))\n else:\n tar.cwd = product.location['dirname']\n tar.ontology['tar extract'] = True\n tar.ontology['tar file'] = self.resource.path\n self.log.debug('uncompress {} --> {}'.format(self.resource.path, product.location['dirname']))\n tar.execute()\n else:\n self.abort('command {} is invalid'.format(tar.name))\n else:\n self.abort('only uncompressing flowcell run directory archive supported')\n else:\n self.abort('could not crawl resource metadata')\n\nclass BasecallTask(ResourceTask):\n def __init__(self, job, ontology, location):\n ResourceTask.__init__(self, job, ontology, location)\n self.log = logging.getLogger('Bio')\n if self.valid:\n if self.ontology['basecall implementation'] == 'picard':\n self.ontology['task cores'] = self.env.constant['picard threads per lane']\n\n elif self.ontology['basecall implementation'] == 'bcl2fastq':\n if 'lane count' in self.location:\n self.ontology['task cores'] = self.location['lane count'] * self.env.constant['bcl2fastq threads per lane']\n\n def basecall(self):\n if self.ontology['basecall implementation'] == 'picard':\n self.picard_basecall()\n\n elif self.ontology['basecall implementation'] == 'bcl2fastq':\n self.bcl2fastq_basecall()\n\n def picard_basecall(self):\n if self.resource.node is not None:\n picard = Command('picard illuminabasecallstofastq', self.context, self.resource.location)\n picard.ontology.overlay(self.ontology)\n if picard.valid:\n flowcell = self.resource.node['body']['flowcell']\n picard.ontology['picard read structure'] = ''.join([ '{}T'.format(n['nibble cycle count']) for n in flowcell['nibbles'] ])\n try:\n self.env.prepare_directory(picard.ontology['picard temp directory'])\n except (NoOverwriteError, PermissionDeniedError) as e:\n self.abort(str(e))\n else:\n self.log.debug('basecalling with picard IlluminaBasecallsToFastq to {}'.format(picard.ontology['work directory']))\n picard.execute()\n\n if self.valid:\n # Scan for FASTQ products\n scanner = Scanner(self.env, \n Ontology(self.env, 'ns/system/scanner',\n {\n 'recursive': True,\n 'filter': [ r'+ \\.fastq\\.gz', r'- \\.*' ],\n 'scan path': [ picard.ontology['work directory'] ]\n }\n ) \n )\n\n # Queue tasks to move the FASTQ products to the repository\n for location in scanner.results:\n o = self.job.ontology.project('ns/system/task')\n o['action'] = 'move'\n t = ResourceTask(self.job, o, location)\n t.group = self.uuid\n t.constrain(\n {\n 'condition scope': 'task',\n 'task status': 'pending',\n 'task reference': self.uuid,\n 'task reference status': 'completed',\n 'task status to apply': 'ready'\n }\n )\n\n t.constrain(\n {\n 'condition scope': 'task',\n 'task status': 'pending',\n 'task reference': self.uuid,\n 'task reference status': 'aborted',\n 'task status to apply': 'aborted'\n }\n )\n self.job.push(t)\n else:\n self.abort('command {} is invalid'.format(picard.name))\n else:\n self.abort('could not crawl resource metadata')\n\n def bcl2fastq_basecall(self):\n if self.resource.node is not None:\n if (self.resource.location['kind'] in [ 'ihrf', 'imrf' ]):\n bcl2fastq = Command('bcl2fastq', self.context, self.resource.location)\n if bcl2fastq.valid:\n flowcell = self.resource.node['body']['flowcell']\n expected = []\n # infer the base mask for a pure bcl to fastq conversion\n # we decalre all nibbles as reads with 'Y' so that each gets written, completely, to a separate fastq file \n bcl2fastq.ontology['bcl2fastq use bases mask'] = ','.join([ 'Y{}'.format(n['nibble cycle count']) for n in flowcell['nibbles'] ])\n\n try:\n self.env.prepare_to_write_to_path(bcl2fastq.ontology['bcl2fastq output dir'], self.ontology['overwrite'])\n self.env.prepare_to_write_to_path(bcl2fastq.ontology['bcl2fastq sample sheet'], self.ontology['overwrite'])\n except (NoOverwriteError, PermissionDeniedError) as e:\n self.abort(str(e))\n else:\n # Create a sample sheet csv file for pure bcl to fastq conversion\n self.log.debug('write samplesheet {}'.format(bcl2fastq.ontology['bcl2fastq sample sheet']))\n content = [ self.env.constant['bcl2fastq samplesheet header'] ]\n for index in range(bcl2fastq.ontology['lane count']):\n lane_number = index + 1\n control = 'Y' if ('control lane number' in flowcell and lane_number == flowcell['control lane number']) else 'N'\n content.append('{0},{1},{0},,Undetermined,,{2},,,lane{1}'.format(bcl2fastq.ontology['illumina flowcell id'], lane_number, control))\n for n in range(flowcell['number of nibbles']):\n nibble_number = n + 1\n product = self.produce(self.resource.origin,\n {\n 'lane number': lane_number, \n 'nibble number': nibble_number,\n 'media kind': 51,\n 'extension': 'fastq',\n 'kind': 'fastq',\n 'compression': 'gz',\n 'inode type': 'file'\n }\n )\n self.env.prepare_to_write_to_path(product.path, self.ontology['overwrite'])\n expected.append(product)\n try:\n with io.open(bcl2fastq.ontology['bcl2fastq sample sheet'], 'wb') as w:\n w.write('\\n'.join(content).encode('utf8')) \n except OSError as error:\n self.abort('writing samplesheet file to {} failed'.format(bcl2fastq.ontology['bcl2fastq sample sheet']))\n self.log.error(str(error))\n\n if self.valid:\n self.log.debug('configure BCL to FASTQ {}'.format(bcl2fastq.ontology['work directory']))\n bcl2fastq.execute()\n else:\n self.abort('command {} is invalid'.format(bcl2fastq.name))\n\n if self.valid:\n make = Command('bcl2fastq make', self.context)\n if make.valid:\n # Potentially override values provided by the task on the command line\n make.ontology.overlay(self.ontology)\n make.cwd = bcl2fastq.ontology['bcl2fastq output dir']\n self.log.debug('convert BCL to FASTQ {}'.format(bcl2fastq.ontology['work directory']))\n make.execute()\n else:\n self.abort('command {} is invalid'.format(make.name))\n\n if self.valid:\n # Scan for FASTQ products\n scanner = Scanner(self.env, \n Ontology(self.env, 'ns/system/scanner',\n {\n 'recursive': True,\n 'filter': [ r'+ \\.fastq\\.gz', r'- \\.*' ],\n 'scan path': [ bcl2fastq.ontology['bcl2fastq output dir'] ]\n }\n ) \n )\n\n # Queue tasks to move the FASTQ products to the repository\n for location in scanner.results:\n o = self.job.ontology.project('ns/system/task')\n o['action'] = 'move'\n t = ResourceTask(self.job, o, location)\n t.group = self.uuid\n t.constrain(\n {\n 'condition scope': 'task',\n 'task status': 'pending',\n 'task reference': self.uuid,\n 'task reference status': 'completed',\n 'task status to apply': 'ready'\n }\n )\n\n t.constrain(\n {\n 'condition scope': 'task',\n 'task status': 'pending',\n 'task reference': self.uuid,\n 'task reference status': 'aborted',\n 'task status to apply': 'aborted'\n }\n )\n self.job.push(t)\n else:\n self.abort('{} not implemented with for bcl2fastq'.format(self.resource.location['kind']))\n else:\n self.abort('could not crawl resource metadata')\n\n","sub_path":"module/bio/queue/flowcell.py","file_name":"flowcell.py","file_ext":"py","file_size_in_byte":19597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"463770746","text":"import requests\nimport base64\nimport json.decoder\nimport json\n\nimport pprint\n\nimport fixIP\n\nclass AllDict:\n def __init__(self):\n self.tenants = {}\n self.columns = set()\n\n def ensureKey(self,key):\n if not key in self.tenants:\n self.tenants[key] = {}\n\n def noteColumn(self,column):\n if not column in self.columns:\n self.columns.add(column)\n\n def addTenantAttrs(self,key,dict, cat):\n #key = key+\"_\"+cat\n self.ensureKey(key)\n self.tenants[key].update( dict )\n for c in dict:\n self.noteColumn(c)\n\n def fixupIPAddr(self):\n fixIP.fixIPs(self) #.tenants)\n\n def slugIsKnown(self, slug):\n return (slug in self.tenants)\n\n def addTenantSlug(self,slug):\n self.ensureKey(slug)\n slugDict = self.tenants[slug]\n slugDict['slug'] = slug # + 'was_missing'\n\n\n\ndef getDataFromConsul():\n tenantDict = AllDict()\n #url = \"http://localhost:8500/v1/kv/tenants/103?recurse\"\n url = \"http://localhost:8500/v1/kv/tenants?recurse\"\n r = requests.get(url)\n \n try:\n items = r.json()\n except json.decoder.JSONDecodeError as e:\n print('bad json:[ %s ]' % r.text)\n print(e)\n raise\n \n for i in items:\n key_parts = i['Key'].split('/')\n cat = key_parts[-1]\n k = key_parts[1]\n v = i['Value']\n s = base64.b64decode(v)\n ss = s.decode('utf8') # this is BYTES.\n ss=ss.replace( '\\u200b', '??') # zwsp causing problems.\n dict = json.loads(ss)\n tenantDict.addTenantAttrs(k,dict, cat)\n\n tenantDict.fixupIPAddr()\n\n return tenantDict\n\n\nisLaunchedStandlone = (__name__ == \"__main__\")\nif isLaunchedStandlone:\n dataDict = getDataFromConsul() # dictConsul.\n if 0:\n print(dataDict.__dict__)\n else:\n key1 = list(dataDict.tenants.keys())[0]\n pprint.pprint(dataDict.tenants[key1])\n","sub_path":"consul/dictConsul.py","file_name":"dictConsul.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"552878799","text":"#!/usr/bin/env python\n# author ejrbuss\nimport os\nimport time\nimport random\n\n# default LOC for a new source file\nbaseLines = 200\n# default number of LOC to modify\nmodifyLines = 50\n# default number of LOC to append\nappendLines = 50\n\ndef code():\n \"\"\"\n Returns a line of \"code\".\n \"\"\"\n return str({ 'time' : time.time(), 'hash' : random.random() })\n\n\nclass program():\n\n def __init__(self, sim):\n self.sim = sim\n self.sim.meta['#projects'] = 0\n self.sim.meta['#files'] = 0\n self.sim.repo['dirs'] = ['repository', os.path.join('repository', 'common')]\n self.sim.repo['files'] = []\n\n def project(self):\n \"\"\"\n Creates a new project directory in a random directory.\n \"\"\"\n # create new directory\n self.sim.meta['#projects'] += 1\n parent = random.choice(self.sim.repo['dirs'])\n name = 'project_' + str(self.sim.meta['#projects'])\n path = os.path.join(parent, name)\n os.makedirs(path)\n # update directory list\n self.sim.repo['dirs'] += [path]\n # create index file\n with open(os.path.join(path, 'gitnotignore.txt'), 'w') as file:\n file.write('Created to prevent git from ignoring directory.')\n self.sim.log('created project: {}'.format(path))\n\n def create(self):\n \"\"\"\n Creates a new file in a random directory.\n \"\"\"\n # create new source file\n self.sim.meta['#files'] += 1\n parent = random.choice(self.sim.repo['dirs'])\n name = 'source_' + str(self.sim.meta['#files']) + '.sim'\n path = os.path.join(parent, name)\n with open(path, 'w') as file:\n for _ in range(random.randrange(baseLines)):\n file.write(code() + '\\n')\n # update file list\n self.sim.repo['files'] += [path]\n self.sim.log('created source: {}'.format(path))\n\n def modify(self):\n \"\"\"\n Modifies a random source file.\n \"\"\"\n if len(self.sim.repo['files']) == 0:\n self.create()\n path = random.choice(self.sim.repo['files'])\n with open(path, 'r') as file:\n source = file.read()\n lines = source.split('\\n')\n for _ in range(modifyLines):\n # Sometimes we just change lines of code\n if random.choice([True, False]) or len(lines) < 10:\n lines[random.randrange(len(lines))] = code()\n # Other times we remove lines of code\n else:\n lines.pop(random.randrange(len(lines)))\n with open(path, 'w') as file:\n file.write('\\n'.join(lines))\n self.sim.log('modified source: {}'.format(path))\n\n def append(self):\n \"\"\"\n Appends to a random source file.\n \"\"\"\n if len(self.sim.repo['files']) == 0:\n self.create()\n path = random.choice(self.sim.repo['files'])\n with open(path, 'a') as file:\n for _ in range(random.randrange(appendLines)):\n file.write(code() + '\\n')\n self.sim.log('appended source: {}'.format(path))","sub_path":"simgit/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":3100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"115880442","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('django_save_logger', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='systemeventmodel',\n options={'ordering': ('created_at',)},\n ),\n migrations.RenameField(\n model_name='systemeventmodel',\n old_name='user_pk',\n new_name='user_id',\n ),\n ]\n","sub_path":"django_save_logger/migrations/0002_auto_20170415_0603.py","file_name":"0002_auto_20170415_0603.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"320474630","text":"import ast\nfrom collections import defaultdict\nfrom sklearn.svm import SVC\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn import preprocessing\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom random import shuffle\n\nimport random\nrandom.seed(0)\n\nX = []\nY = []\nK = 20\nfor k in range(K):\n feats = pd.read_csv('data/features_Read_%d.csv' % k)\n X.append(feats[['popularity', 'jaccard_book', 'jaccard_user', 'cosine_book', 'cosine_user', 'gamma']])\n Y.append(np.ravel(feats['label']))\n\nif True:\n scaler = []\n clf_SVC = []\n for k in range(K):\n print(k)\n scaler = preprocessing.StandardScaler().fit(X[k])\n clf_SVC.append(SVC(C=0.1, gamma='auto', kernel='rbf'))\n clf_SVC[k].fit(scaler.transform(X[k], copy=True), Y[k])\n print('training complete')\n with open(\"data/predictions_Read_SVM_RBF.txt\", 'w') as predictions:\n pred = []\n users = []\n books = []\n for k in range(K):\n data_pred = pd.read_csv('data/features_Read_Predict_%d.csv' % k)\n users = data_pred['user']\n books = data_pred['book']\n X_pred = data_pred[['popularity', 'jaccard_book', 'jaccard_user', 'cosine_book', 'cosine_user', 'gamma']]\n y_pred = clf_SVC[k].predict(scaler.transform(X_pred, copy=True))\n pred.append(y_pred)\n predictions.write('userID-bookID,prediction\\n')\n for i in range(len(pred[0])):\n u = users[i]\n b = books[i]\n y = [t[i] for t in pred]\n if sum(y) / len(y) > 0.5:\n predictions.write(u + '-' + b + \",1\\n\")\n else:\n predictions.write(u + '-' + b + \",0\\n\")\n","sub_path":"wouldread_classify.py","file_name":"wouldread_classify.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"250417576","text":"import dataclasses\nimport pytest\n\nfrom yamlparser.yamlparser import YamlParser, YamlParserException\nfrom tests.unit.simple_dataclass import SimpleDataclass, SimpleDataclassException\nfrom tests.conftest import TEST_ROOT_DIR\n\n\ndef test_invalid_filepath():\n\n filepath = \"./resources/file.yaml\"\n model = SimpleDataclass\n\n with pytest.raises(YamlParserException) as exception:\n YamlParser(\n filepath=filepath,\n model=model\n )\n\n assert str(exception.value) == f\"{filepath} is not a valid filepath\"\n\n\ndef test_model_not_dataclass():\n\n filepath = f\"{TEST_ROOT_DIR}/resources/valid.yaml\"\n model = \"Not a dataclass type\"\n\n with pytest.raises(YamlParserException) as exception:\n YamlParser(\n filepath=filepath,\n model=model\n )\n\n assert str(exception.value) == f\"model expects a 'dataclass' type, but input has type: \"\n\n\ndef test_read_valid_yaml():\n\n yaml_parser = YamlParser(\n filepath=f\"{TEST_ROOT_DIR}/resources/valid.yaml\",\n model=SimpleDataclass\n )\n\n actual_dataclass = yaml_parser.read()\n\n expected_dictionary = {\n \"application_name\": \"Test\",\n \"memory\": 2.5,\n \"workers\": 1,\n \"log_level\": \"INFO\",\n \"dynamic_scaling\": False,\n \"system_variables\": {}\n }\n\n assert dataclasses.asdict(actual_dataclass) == expected_dictionary\n\n\ndef test_read_invalid_yaml():\n\n yaml_parser = YamlParser(\n filepath=f\"{TEST_ROOT_DIR}/resources/invalid.yaml\",\n model=SimpleDataclass\n )\n\n with pytest.raises(SimpleDataclassException) as exception:\n yaml_parser.read()\n\n assert str(exception.value) == f\"memory expects a 'float' type, but input has type: \"\n","sub_path":"tests/unit/yamlparser_test.py","file_name":"yamlparser_test.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"126739019","text":"import pandas as pd\nimport os\nimport logging\nfrom datetime import datetime\n\nlogger = logging.getLogger('nodes.data_transform')\n\n\ndef feat_novelas(client, params, feature):\n \"\"\"\"\n This function get the table from data base and transform to clean csv file\n \"\"\"\n table_casting = params.casting.split('.')[0]\n table_features = params.features.split('.')[0]\n table_novelas = params.novelas.split('.')[0]\n\n # Read table from SQL\n cast_df = pd.read_sql_query(f'SELECT * FROM {table_casting};', con=client.conn)\n feat_df = pd.read_sql_query(f'SELECT * FROM {table_features};', con=client.conn)\n novelas_df = pd.read_sql_query(f'SELECT * FROM {table_novelas};', con=client.conn)\n\n # Replace the name of casting to their features\n f = feat_df[feature].unique()\n feature_novela = pd.DataFrame(index=cast_df.columns, columns=f).sort_index()\n for i, item in feat_df.iterrows():\n cast_df.replace(item['name'], item[feature], inplace=True)\n\n # The total of ehtnicity by soap operas\n for i in f:\n feature_novela[i] = cast_df[cast_df == i].count().sort_index()\n\n feature_novela.reset_index(inplace=True)\n feature_novela.rename(columns={'index': 'novela_name'}, inplace=True)\n\n # Insert the aired year to new table\n feature_novela = feature_novela.merge(novelas_df, left_on='novela_name', right_on='novela_name')\n feature_novela['aired_year'] = feature_novela['aired'].apply(\n lambda x: datetime.strptime(x, '%d %B %Y').strftime('%Y'))\n\n # Write the file to disk\n filename = feature + '_novela.csv'\n file_path = os.path.join(params.processed_data, filename)\n logger.info(f'Storing results at {file_path}')\n feature_novela.to_csv(file_path, index=False)\n\n return feature_novela\n\n\ndef clean_ibge_data(client, params):\n \"\"\"\n This function get the IBGE dataframe from database and clean it\n \"\"\"\n file_path = os.path.join(params.external_data, params.ibge)\n ibge_data = pd.read_csv(file_path, sep=';')\n\n # Use the 2018 column\n ibge_data.drop(columns=['2015', '2016', '2017'], axis=1, inplace=True)\n\n # Split by indicators\n if not os.path.exists(os.path.join(params.intermediate_data, 'Geral.csv')):\n for i in range(1, 6):\n category_df = ibge_data.loc[ibge_data['Nível'].str.startswith(str(i))].reset_index(drop=True)\n filename = category_df.loc[0, 'Indicador'] + '.csv'\n\n file_path = os.path.join(params.intermediate_data, filename)\n logger.info(f'Storing results at {file_path}')\n category_df.to_csv(file_path, index=False)\n\n # Population dataframe\n file_path = os.path.join(params.intermediate_data, 'Geral.csv')\n df = pd.read_csv(file_path)\n df = df.iloc[[3, 4], 1:]\n file_path = os.path.join(params.processed_data, 'population.csv')\n logger.info(f'Storing results at {file_path}')\n df.to_csv(file_path, index=False)\n\n # Education dataframe\n file_path = os.path.join(params.intermediate_data, 'Educação.csv')\n education_df = pd.read_csv(file_path)\n df = education_df.iloc[[4, 5], 1:]\n file_path = os.path.join(params.processed_data, 'illiteracy.csv')\n logger.info(f'Storing results at {file_path}')\n df.to_csv(file_path, index=False)\n\n df = education_df.iloc[[34, 35], 1:]\n file_path = os.path.join(params.processed_data, 'higher_education.csv')\n logger.info(f'Storing results at {file_path}')\n df.to_csv(file_path, index=False)\n\n # Employment dataframe\n file_path = os.path.join(params.intermediate_data, 'Trabalho.csv')\n employment_df = pd.read_csv(file_path)\n df = employment_df.iloc[[11, 12], 1:]\n file_path = os.path.join(params.processed_data, 'unemployment.csv')\n logger.info(f'Storing results at {file_path}')\n df.to_csv(file_path, index=False)\n\n df = employment_df.iloc[[38, 39], 1:]\n file_path = os.path.join(params.processed_data, 'income.csv')\n logger.info(f'Storing results at {file_path}')\n df.to_csv(file_path, index=False)\n\n # Join IBGE data base\n\n\ndef join_ibge_data(params):\n \"\"\"\n Join the files that it will use for reporting\n \"\"\"\n filenames = ['higher_education', 'illiteracy', 'income', 'population', 'unemployment']\n\n df = pd.DataFrame({'Indicador': ['Brancos', 'Pretos ou pardos']})\n for f in filenames:\n new_df = pd.read_csv(os.path.join(params.processed_data, f'{f}.csv'), usecols=['Indicador', '2018'])\n new_df.rename(columns={'2018': f}, inplace=True)\n\n df = new_df.merge(df)\n\n # Replace percentage to numbers\n df['unemployment'] = df['unemployment'] * df['population']\n df['illiteracy'] = df['illiteracy'] * df['population']\n df['higher_education'] = df['higher_education'] * df['population']\n\n # Set the table in params for storage later\n params.ibge_df = df\n\n # Write to log file and save the file to disk\n file_path = os.path.join(params.processed_data, params.ibge)\n logger.info(f'Storing results at {file_path}')\n df.to_csv(file_path, index=False)\n\n\ndef update(client, params):\n\n params.race_novela_df = feat_novelas(client, params, 'race')\n params.color_novela_df = feat_novelas(client, params, 'color')\n\n clean_ibge_data(client, params)\n\n join_ibge_data(params)\n\n\ndef done(client, params):\n \"\"\"\n Return whether the file to be downloaded already exists or not.\n \"\"\"\n # Soap Operas data\n file_path = os.path.join(params.processed_data, params.ibge)\n\n if os.path.exists(file_path):\n logger.info(f'{file_path} found.')\n\n return os.path.exists(file_path)\n","sub_path":"src/nodes/data_transform.py","file_name":"data_transform.py","file_ext":"py","file_size_in_byte":5558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"109585836","text":"# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom typing import TYPE_CHECKING, Tuple\n\nfrom aws_orbit.models.context import ContextSerDe\nfrom aws_orbit.remote_files import env\nfrom aws_orbit.services import ecr\n\nif TYPE_CHECKING:\n from aws_orbit.models.context import Context\n\n_logger: logging.Logger = logging.getLogger(__name__)\n\n\ndef delete_image(args: Tuple[str, ...]) -> None:\n _logger.debug(\"args %s\", args)\n context: \"Context\" = ContextSerDe.load_context_from_ssm(env_name=args[0], type=Context)\n _logger.debug(\"context.name %s\", context.name)\n if len(args) == 2:\n image_name: str = args[1]\n else:\n raise ValueError(\"Unexpected number of values in args.\")\n\n env.deploy(context=context, eks_system_masters_roles_changes=None)\n _logger.debug(\"Env changes deployed\")\n ecr.delete_repo(repo=f\"orbit-{context.name}-{image_name}\")\n _logger.debug(\"Docker Image Destroyed from ECR\")\n","sub_path":"cli/aws_orbit/remote_files/delete.py","file_name":"delete.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"23494729","text":"# A Pulsator is a Black_Hole; it updates as a Black_Hole\r\n# does, but also by growing/shrinking depending on\r\n# whether or not it eats Prey (and removing itself from\r\n# the simulation if its dimension becomes 0), and displays\r\n# as a Black_Hole but with varying dimensions\r\nfrom blackhole import Black_Hole\r\n\r\nclass Pulsator(Black_Hole):\r\n \r\n def __init__(self, x, y):\r\n Black_Hole.__init__(self, x, y)\r\n self.radius = 10\r\n self.color = 'black' \r\n self.cycles = 0 \r\n self.counter = 30\r\n \r\n# def dim(self):\r\n# return self.get_dimension()[0]/2\r\n# \r\n# def contains(self, i):\r\n# return self.distance(i) <= self.dim()\r\n\r\n def dim(self):\r\n return self.get_dimension()[0]\r\n \r\n def update(self, m): \r\n self.cycles = self.cycles +1\r\n goner = Black_Hole.update(self, m)\r\n if goner:\r\n self.cycles = 0\r\n x = len(goner)\r\n self.change_dimension(x, x)\r\n elif self.cycles == self.counter:\r\n #if the cycle counter reaches 30 then decrease the dimension\r\n #of the pulsator by -1\r\n y = -1\r\n self.change_dimension(y, y)\r\n #if the dimensions of the pulsator becomes 0, you must remove it\r\n if self.dim() == 0: \r\n m.remove(self)\r\n #reset the cycle counter once the object is removed \r\n self.cycles = 0\r\n return goner\r\n #writing the display and contains functions aren't necessary because the \r\n #Black_Hole class is being inherited\r\n \r\n# def display(self, canvas):\r\n# width, height = self.get_dimension()\r\n# canvas.create_oval(self._x-width/2,self._y-height/2,self._x+width/2,self._y+height/2, fill = self.color)\r\n ","sub_path":"program5/pulsator.py","file_name":"pulsator.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"301300140","text":"import board\nimport busio\nimport time\nimport paho.mqtt.client as mqtt\nimport uuid\nimport signal\n\nimport digitalio\nfrom PIL import Image, ImageDraw, ImageFont\nimport adafruit_rgb_display.st7789 as st7789\n\nimport adafruit_mpr121\n\n\nclass Game:\n def __init__(self) -> None:\n self.myMove = None\n self.opponentMove = None\n self.counter = {\"rock\":\"paper\", \"paper\":\"scissors\",\"scissors\":\"rock\", \"quit\":None}\n \n def reset(self):\n self.myMove = None\n self.opponentMove = None\n \n def needLogic(self):\n return self.opponentMove and self.myMove\n \n def isValidInput(self, move):\n return move == \"rock\" or move == \"paper\" or move == \"scissors\" or move == \"I QUIT!\"\n \n\ngame = Game()\n\n\n# Configuration for CS and DC pins (these are FeatherWing defaults on M0/M4):\ncs_pin = digitalio.DigitalInOut(board.CE0)\ndc_pin = digitalio.DigitalInOut(board.D25)\nreset_pin = None\n\n# Config for display baudrate (default max is 24mhz):\nBAUDRATE = 64000000\n\nbacklight = digitalio.DigitalInOut(board.D22)\nbacklight.switch_to_output()\nbacklight.value = True\nbuttonA = digitalio.DigitalInOut(board.D23)\nbuttonB = digitalio.DigitalInOut(board.D24)\nbuttonA.switch_to_input()\nbuttonB.switch_to_input()\n\n# Setup SPI bus using hardware SPI:\nspi = board.SPI()\n\n# Create the ST7789 display:\ndisp = st7789.ST7789(\n spi,\n cs=cs_pin,\n dc=dc_pin,\n rst=reset_pin,\n baudrate=BAUDRATE,\n width=135,\n height=240,\n x_offset=53,\n y_offset=40,\n)\ncounterMove = None\nopponentMove = None\n\nheight = disp.height\nwidth = disp.width \nimage = Image.new(\"RGB\", (width, height))\ndraw = ImageDraw.Draw(image)\ndisp.image(image)\nrotation = 90\n\ni2c = busio.I2C(board.SCL, board.SDA)\n\nsensor = adafruit_mpr121.MPR121(i2c)\n\ntopic = 'IDD/aAndAlAb6/player2'\ntopic2 = 'IDD/aAndAlAb6/player1'\n\n\n\ndef on_connect(client, userdata, flags, rc):\n print(f\"connected with result code {rc}\")\n client.subscribe(topic)\n client.subscribe(topic2)\n\ndef on_message(client, userdata, msg):\n # if a message is recieved on the colors topic, parse it and set the color\n if msg.topic == topic2:\n opponentMove = msg.payload.decode('UTF-8').rstrip()\n if game.isValidInput(opponentMove):\n if opponentMove == \"I QUIT!\":\n game.opponentMove = \"quit\"\n else:\n game.opponentMove = opponentMove\n \n\nclient = mqtt.Client(str(uuid.uuid1()))\nclient.tls_set()\nclient.username_pw_set('idd', 'device@theFarm')\nclient.on_connect = on_connect\nclient.on_message = on_message\n\n\nclient.connect(\n 'farlab.infosci.cornell.edu',\n port=8883)\n\nclient.loop_start()\n\n# this lets us exit gracefully (close the connection to the broker)\ndef handler(signum, frame):\n print('exit gracefully')\n client.loop_stop()\n exit (0)\n\n# hen sigint happens, do the handler callback function\nsignal.signal(signal.SIGINT, handler)\n\n\ndef gameLogic():\n if game.myMove == \"quit\" or game.opponentMove == \"quit\":\n resImage = game.opponentMove+game.myMove+\".png\"\n elif game.opponentMove == game.counter[game.myMove]:\n client.publish(topic, \":(\")\n resImage = \"winlose.png\"\n elif game.opponentMove == game.myMove:\n client.publish(topic, \"DRAW\")\n resImage = \"draw.png\"\n elif game.myMove == game.counter[game.opponentMove]:\n client.publish(topic, \"Ha I win\")\n resImage = \"losewin.png\"\n else:\n client.publish(topic, \"you're missing an edge case dum dum\")\n \n draw.rectangle((0, 0, width, height))\n resIm = Image.open(\"imgs/\"+game.opponentMove+game.myMove+\".png\")\n disp.image(resIm, rotation)\n game.reset()\n time.sleep(2)\n draw.rectangle((0, 0, width, height))\n resIm = Image.open(\"imgs/\" + resImage)\n disp.image(resIm, rotation)\n time.sleep(1)\n \n# our main loop\nwhile True:\n move = None\n if sensor[1].value:\n move = \"rock\"\n client.publish(topic, move)\n game.myMove = move\n image2 = Image.open(\"imgs/\"+\"rock.png\")\n draw.rectangle((0, 0, width, height))\n disp.image(image2, rotation)\n if sensor[2].value:\n move = \"paper\"\n client.publish(topic, move)\n game.myMove = move\n image2 = Image.open(\"imgs/\"+\"paper.png\")\n draw.rectangle((0, 0, width, height))\n disp.image(image2, rotation)\n if sensor[3].value:\n move = \"scissors\"\n client.publish(topic, move)\n game.myMove = move\n image2 = Image.open(\"imgs/\"+\"scissors.png\")\n draw.rectangle((0, 0, width, height))\n disp.image(image2, rotation)\n if sensor[11].value:\n move = \"I QUIT!\"\n client.publish(topic, move)\n image2 = Image.open(\"imgs/\"+\"quit.png\")\n draw.rectangle((0, 0, width, height))\n disp.image(image2, rotation)\n \n if game.needLogic():\n gameLogic()\n\n time.sleep(1)\n","sub_path":"Lab 6/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"280397449","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis module implements community detection.\n\"\"\"\nfrom __future__ import print_function\n\nimport array\n\nimport numbers\nimport warnings\n\nimport networkx as nx\nimport numpy as np\n\nfrom .louvain_modified_status import Status\n\n__author__ = \"\"\"Thomas Aynaud (thomas.aynaud@lip6.fr)\"\"\"\n# Copyright (C) 2009 by\n# Thomas Aynaud \n# All rights reserved.\n# BSD license.\n\n__PASS_MAX = -1\n__MIN = 0.0000001\n\n\ndef check_random_state(seed):\n \"\"\"Turn seed into a np.random.RandomState instance.\n\n Parameters\n ----------\n seed : None | int | instance of RandomState\n If seed is None, return the RandomState singleton used by np.random.\n If seed is an int, return a new RandomState instance seeded with seed.\n If seed is already a RandomState instance, return it.\n Otherwise raise ValueError.\n\n \"\"\"\n if seed is None or seed is np.random:\n return np.random.mtrand._rand\n if isinstance(seed, (numbers.Integral, np.integer)):\n return np.random.RandomState(seed)\n if isinstance(seed, np.random.RandomState):\n return seed\n raise ValueError(\"%r cannot be used to seed a numpy.random.RandomState\"\n \" instance\" % seed)\n\n\ndef partition_at_level(dendrogram, level):\n \"\"\"Return the partition of the nodes at the given level\n\n A dendrogram is a tree and each level is a partition of the graph nodes.\n Level 0 is the first partition, which contains the smallest communities,\n and the best is len(dendrogram) - 1.\n The higher the level is, the bigger are the communities\n\n Parameters\n ----------\n dendrogram : list of dict\n a list of partitions, ie dictionnaries where keys of the i+1 are the\n values of the i.\n level : int\n the level which belongs to [0..len(dendrogram)-1]\n\n Returns\n -------\n partition : dictionnary\n A dictionary where keys are the nodes and the values are the set it\n belongs to\n\n Raises\n ------\n KeyError\n If the dendrogram is not well formed or the level is too high\n\n See Also\n --------\n best_partition which directly combines partition_at_level and\n generate_dendrogram to obtain the partition of highest modularity\n\n Examples\n --------\n >>> G=nx.erdos_renyi_graph(100, 0.01)\n >>> dendrogram = generate_dendrogram(G)\n >>> for level in range(len(dendrogram) - 1) :\n >>> print(\"partition at level\", level, \"is\", partition_at_level(dendrogram, level)) # NOQA\n \"\"\"\n partition = dendrogram[0].copy()\n for index in range(1, level + 1):\n for node, community in partition.items():\n partition[node] = dendrogram[index][community]\n return partition\n\n\ndef modularity(partition, graph_s, graph_a, alpha, weight='weight'):\n \"\"\"Compute the modularity of a partition of a graph\n\n Parameters\n ----------\n partition : dict\n the partition of the nodes, i.e a dictionary where keys are their nodes\n and values the communities\n graph : networkx.Graph\n the networkx graph which is decomposed\n weight : str, optional\n the key in graph to use as weight. Default to 'weight'\n\n\n Returns\n -------\n modularity : float\n The modularity\n\n Raises\n ------\n KeyError\n If the partition is not a partition of all graph nodes\n ValueError\n If the graph has no link\n TypeError\n If graph is not a networkx.Graph\n\n References\n ----------\n .. 1. Newman, M.E.J. & Girvan, M. Finding and evaluating community\n structure in networks. Physical Review E 69, 26113(2004).\n\n Examples\n --------\n >>> G=nx.erdos_renyi_graph(100, 0.01)\n >>> part = best_partition(G)\n >>> modularity(part, G)\n \"\"\"\n if graph_s.is_directed() or graph_a.is_directed():\n raise TypeError(\"Bad graph type, use only non directed graph\")\n\n inc_s, inc_a = dict([]), dict([])\n deg_s, deg_a = dict([]), dict([])\n links_s = graph_s.size(weight=weight)\n links_a = graph_a.size(weight=weight)\n if links_s == 0 or links_a == 0:\n raise ValueError(\"A graph without link has an undefined modularity\")\n\n res_s = cycle(graph_s, weight, deg_s, inc_s, links_s, partition)\n res_a = cycle(graph_a, weight, deg_a, inc_a, links_a, partition)\n res = res_s * alpha + res_a * (1-alpha)\n return res\n\n\ndef cycle(graph, weight, deg, inc, links, partition):\n for node in graph:\n com = partition[node]\n deg[com] = deg.get(com, 0.) + graph.degree(node, weight=weight)\n for neighbor, datas in graph[node].items():\n edge_weight = datas.get(weight, 1)\n if partition[neighbor] == com:\n if neighbor == node:\n inc[com] = inc.get(com, 0.) + float(edge_weight)\n else:\n inc[com] = inc.get(com, 0.) + float(edge_weight) / 2.\n res = 0.\n for com in set(partition.values()):\n res += (inc.get(com, 0.) / links) - \\\n (deg.get(com, 0.) / (2. * links)) ** 2\n return res\n\n\ndef best_partition(graph_s,\n graph_a,\n alpha,\n partition=None,\n weight='weight',\n resolution=1.,\n randomize=None,\n random_state=None):\n \"\"\"Compute the partition of the graph nodes which maximises the modularity\n (or try..) using the Louvain heuristices\n\n This is the partition of highest modularity, i.e. the highest partition\n of the dendrogram generated by the Louvain algorithm.\n\n Parameters\n ----------\n graph : networkx.Graph\n the networkx graph which is decomposed\n partition : dict, optional\n the algorithm will start using this partition of the nodes.\n It's a dictionary where keys are their nodes and values the communities\n weight : str, optional\n the key in graph to use as weight. Default to 'weight'\n resolution : double, optional\n Will change the size of the communities, default to 1.\n represents the time described in\n \"Laplacian Dynamics and Multiscale Modular Structure in Networks\",\n R. Lambiotte, J.-C. Delvenne, M. Barahona\n randomize : boolean, optional\n Will randomize the node evaluation order and the community evaluation\n order to get different partitions at each call\n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n Returns\n -------\n partition : dictionnary\n The partition, with communities numbered from 0 to number of communities\n\n Raises\n ------\n NetworkXError\n If the graph is not Eulerian.\n\n See Also\n --------\n generate_dendrogram to obtain all the decompositions levels\n\n Notes\n -----\n Uses Louvain algorithm\n\n References\n ----------\n .. 1. Blondel, V.D. et al. Fast unfolding of communities in\n large networks. J. Stat. Mech 10008, 1-12(2008).\n\n Examples\n --------\n >>> #Basic usage\n >>> G=nx.erdos_renyi_graph(100, 0.01)\n >>> part = best_partition(G)\n\n >>> #other example to display a graph with its community :\n >>> #better with karate_graph() as defined in networkx examples\n >>> #erdos renyi don't have true community structure\n >>> G = nx.erdos_renyi_graph(30, 0.05)\n >>> #first compute the best partition\n >>> partition = community.best_partition(G)\n >>> #drawing\n >>> size = float(len(set(partition.values())))\n >>> pos = nx.spring_layout(G)\n >>> count = 0.\n >>> for com in set(partition.values()) :\n >>> count += 1.\n >>> list_nodes = [nodes for nodes in partition.keys()\n >>> if partition[nodes] == com]\n >>> nx.draw_networkx_nodes(G, pos, list_nodes, node_size = 20,\n node_color = str(count / size))\n >>> nx.draw_networkx_edges(G, pos, alpha=0.5)\n >>> plt.show()\n \"\"\"\n dendo = generate_dendrogram(graph_s,\n graph_a,\n alpha,\n partition,\n weight,\n resolution,\n randomize,\n random_state)\n return partition_at_level(dendo, len(dendo) - 1)\n\n\ndef generate_dendrogram(graph_s,\n graph_a,\n alpha,\n part_init=None,\n weight='weight',\n resolution=1.,\n randomize=None,\n random_state=None):\n \"\"\"Find communities in the graph and return the associated dendrogram\n\n A dendrogram is a tree and each level is a partition of the graph nodes.\n Level 0 is the first partition, which contains the smallest communities,\n and the best is len(dendrogram) - 1. The higher the level is, the bigger\n are the communities\n\n\n Parameters\n ----------\n graph : networkx.Graph\n the networkx graph which will be decomposed\n part_init : dict, optional\n the algorithm will start using this partition of the nodes. It's a\n dictionary where keys are their nodes and values the communities\n weight : str, optional\n the key in graph to use as weight. Default to 'weight'\n resolution : double, optional\n Will change the size of the communities, default to 1.\n represents the time described in\n \"Laplacian Dynamics and Multiscale Modular Structure in Networks\",\n R. Lambiotte, J.-C. Delvenne, M. Barahona\n\n Returns\n -------\n dendrogram : list of dictionaries\n a list of partitions, ie dictionnaries where keys of the i+1 are the\n values of the i. and where keys of the first are the nodes of graph\n\n Raises\n ------\n TypeError\n If the graph is not a networkx.Graph\n\n See Also\n --------\n best_partition\n\n Notes\n -----\n Uses Louvain algorithm\n\n References\n ----------\n .. 1. Blondel, V.D. et al. Fast unfolding of communities in large\n networks. J. Stat. Mech 10008, 1-12(2008).\n\n Examples\n --------\n >>> G=nx.erdos_renyi_graph(100, 0.01)\n >>> dendo = generate_dendrogram(G)\n >>> for level in range(len(dendo) - 1) :\n >>> print(\"partition at level\", level,\n >>> \"is\", partition_at_level(dendo, level))\n :param weight:\n :type weight:\n \"\"\"\n if graph_s.is_directed() or graph_a.is_directed():\n raise TypeError(\"Bad graph type, use only non directed graph\")\n\n # Properly handle random state, eventually remove old `randomize` parameter\n # NOTE: when `randomize` is removed, delete code up to random_state = ...\n if randomize is not None:\n warnings.warn(\"The `randomize` parameter will be deprecated in future \"\n \"versions. Use `random_state` instead.\", DeprecationWarning)\n # If shouldn't randomize, we set a fixed seed to get determinisitc results\n if randomize is False:\n random_state = 0\n\n # We don't know what to do if both `randomize` and `random_state` are defined\n if randomize and random_state is not None:\n raise ValueError(\"`randomize` and `random_state` cannot be used at the \"\n \"same time\")\n\n random_state = check_random_state(random_state)\n\n # special case, when there is no link\n # the best partition is everyone in its community\n # if graph1.number_of_edges() == 0:\n # part = dict([])\n # for i, node in enumerate(graph1.nodes()):\n # part[node] = i\n # return [part]\n\n current_graph_s = graph_s.copy()\n current_graph_a = graph_a.copy()\n status_s = Status()\n status_a = Status()\n status_s.init(current_graph_s, weight, part_init)\n status_a.init(current_graph_a, weight, part_init)\n status_list = list()\n __one_level(current_graph_s, current_graph_a, status_s, status_a, alpha, weight, resolution, random_state)\n new_mod_s = __modularity(status_s, resolution)\n new_mod_a = __modularity(status_a, resolution)\n new_mod = new_mod_s * alpha + new_mod_a * (1-alpha)\n partition = __renumber(status_s.node2com) # ----------------------------------------------------------------------- ЧТО ЗА RENUMBER???\n status_list.append(partition)\n mod = new_mod\n current_graph_s = induced_graph(partition, current_graph_s, weight)\n current_graph_a = induced_graph(partition, current_graph_a, weight)\n status_s.init(current_graph_s, weight)\n status_a.init(current_graph_a, weight)\n\n while True:\n __one_level(current_graph_s, current_graph_a, status_s, status_a, alpha, weight, resolution, random_state)\n new_mod_s = __modularity(status_s, resolution)\n new_mod_a = __modularity(status_a, resolution)\n new_mod = new_mod_s * alpha + new_mod_a * (1-alpha)\n if new_mod - mod < __MIN:\n break\n partition = __renumber(status_s.node2com) # ------------------------------------------------------------------- ВОТ ТУТ ЕЩЕ ЭТОТ RENUMBER\n status_list.append(partition)\n mod = new_mod\n current_graph_s = induced_graph(partition, current_graph_s, weight)\n current_graph_a = induced_graph(partition, current_graph_a, weight)\n status_s.init(current_graph_s, weight)\n status_a.init(current_graph_a, weight)\n return status_list[:]\n\n\ndef induced_graph(partition, graph, weight=\"weight\"):\n \"\"\"Produce the graph where nodes are the communities\n\n there is a link of weight w between communities if the sum of the weights\n of the links between their elements is w\n\n Parameters\n ----------\n partition : dict\n a dictionary where keys are graph nodes and values the part the node\n belongs to\n graph : networkx.Graph\n the initial graph\n weight : str, optional\n the key in graph to use as weight. Default to 'weight'\n\n\n Returns\n -------\n g : networkx.Graph\n a networkx graph where nodes are the parts\n\n Examples\n --------\n >>> n = 5\n >>> g = nx.complete_graph(2*n)\n >>> part = dict([])\n >>> for node in g.nodes() :\n >>> part[node] = node % 2\n >>> ind = induced_graph(part, g)\n >>> goal = nx.Graph()\n >>> goal.add_weighted_edges_from([(0,1,n*n),(0,0,n*(n-1)/2), (1, 1, n*(n-1)/2)]) # NOQA\n >>> nx.is_isomorphic(ind, goal)\n True\n \"\"\"\n ret = nx.Graph()\n ret.add_nodes_from(partition.values())\n\n for node1, node2, datas in graph.edges(data=True):\n edge_weight = datas.get(weight, 1)\n com1 = partition[node1]\n com2 = partition[node2]\n w_prec = ret.get_edge_data(com1, com2, {weight: 0}).get(weight, 1)\n ret.add_edge(com1, com2, **{weight: w_prec + edge_weight})\n\n return ret\n\n\ndef __renumber(dictionary):\n \"\"\"Renumber the values of the dictionary from 0 to n\n \"\"\"\n count = 0\n ret = dictionary.copy()\n new_values = dict([])\n\n for key in dictionary.keys():\n value = dictionary[key]\n new_value = new_values.get(value, -1)\n if new_value == -1:\n new_values[value] = count\n new_value = count\n count += 1\n ret[key] = new_value\n\n return ret\n\n\ndef __one_level(graph_s, graph_a, status_s, status_a, alpha, weight_key, resolution, random_state):\n \"\"\"Compute one level of communities\n \"\"\"\n modified = True\n nb_pass_done = 0\n cur_mod_s = __modularity(status_s, resolution)\n cur_mod_a = __modularity(status_a, resolution)\n cur_mod = cur_mod_s * alpha + cur_mod_a * (1-alpha)\n new_mod = cur_mod\n\n while modified and nb_pass_done != __PASS_MAX:\n cur_mod = new_mod\n modified = False\n nb_pass_done += 1\n\n for node in __randomize(graph_s.nodes(), random_state): # ----------------------------------------------------- ТУТ ВОТ ЕЩЕ RANDOMIZE\n com_node = status_s.node2com[node] # ---------------------------------------------------------------------- ТОЧНО МОГУ ТОЛЬКО СО СТАТУСОМ?\n degc_totw_s = status_s.gdegrees.get(node, 0.) / (status_s.total_weight * 2.) # NOQA\n degc_totw_a = status_a.gdegrees.get(node, 0.) / (status_a.total_weight * 2.)\n neigh_communities_s = __neighcom(node, graph_s, status_s, weight_key)\n neigh_communities_a = __neighcom(node, graph_a, status_a, weight_key)\n\n s_neighbors = neigh_communities_s.keys()\n a_neighbors = neigh_communities_a.keys()\n intersection_neighbors = [value for value in s_neighbors if value in a_neighbors]\n s_neighbors_only = [value for value in s_neighbors if value not in intersection_neighbors]\n a_neighbors_only = [value for value in a_neighbors if value not in intersection_neighbors]\n neigh_communities_s.update({key: 0 for key in a_neighbors_only})\n neigh_communities_a.update({key: 0 for key in s_neighbors_only})\n\n remove_cost_s = - resolution * neigh_communities_s.get(com_node,0) + \\\n (status_s.degrees.get(com_node, 0.) - status_s.gdegrees.get(node, 0.)) * degc_totw_s\n remove_cost_a = - resolution * neigh_communities_a.get(com_node, 0) + \\\n (status_a.degrees.get(com_node, 0.) - status_a.gdegrees.get(node, 0.)) * degc_totw_a\n __remove(node, com_node,\n neigh_communities_s.get(com_node, 0.), status_s)\n __remove(node, com_node,\n neigh_communities_a.get(com_node, 0.), status_a)\n best_com = com_node\n best_increase = 0\n for com, dnc_s in __randomize(neigh_communities_s.items(), random_state):\n dnc_a = neigh_communities_a[com]\n incr_s = remove_cost_s + resolution * dnc_s - \\\n status_s.degrees.get(com, 0.) * degc_totw_s\n incr_a = remove_cost_a + resolution * dnc_a - \\\n status_a.degrees.get(com, 0.) * degc_totw_a\n incr = incr_s * alpha + incr_a * (1-alpha)\n if incr > best_increase:\n best_increase = incr\n best_com = com\n __insert(node, best_com,\n neigh_communities_s.get(best_com, 0.), status_s)\n __insert(node, best_com,\n neigh_communities_a.get(best_com, 0.), status_a)\n if best_com != com_node:\n modified = True\n new_mod_s = __modularity(status_s, resolution)\n new_mod_a = __modularity(status_a, resolution)\n new_mod = new_mod_s * alpha + new_mod_a * (1-alpha)\n if new_mod - cur_mod < __MIN:\n break\n\n\ndef __neighcom(node, graph, status, weight_key):\n \"\"\"\n Compute the communities in the neighborhood of node in the graph given\n with the decomposition node2com\n \"\"\"\n weights = {}\n for neighbor, datas in graph[node].items():\n if neighbor != node:\n edge_weight = datas.get(weight_key, 1)\n neighborcom = status.node2com[neighbor]\n weights[neighborcom] = weights.get(neighborcom, 0) + edge_weight\n\n return weights\n\n\ndef __remove(node, com, weight, status):\n \"\"\" Remove node from community com and modify status\"\"\"\n status.degrees[com] = (status.degrees.get(com, 0.)\n - status.gdegrees.get(node, 0.))\n status.internals[com] = float(status.internals.get(com, 0.) -\n weight - status.loops.get(node, 0.))\n status.node2com[node] = -1\n\n\ndef __insert(node, com, weight, status):\n \"\"\" Insert node into community and modify status\"\"\"\n status.node2com[node] = com\n status.degrees[com] = (status.degrees.get(com, 0.) +\n status.gdegrees.get(node, 0.))\n status.internals[com] = float(status.internals.get(com, 0.) +\n weight + status.loops.get(node, 0.))\n\n\ndef __modularity(status, resolution):\n \"\"\"\n Fast compute the modularity of the partition of the graph using\n status precomputed\n \"\"\"\n links = float(status.total_weight)\n result = 0.\n for community in set(status.node2com.values()):\n in_degree = status.internals.get(community, 0.)\n degree = status.degrees.get(community, 0.)\n if links > 0:\n result += in_degree * resolution / links - ((degree / (2. * links)) ** 2)\n return result\n\n\ndef __randomize(items, random_state):\n \"\"\"Returns a List containing a random permutation of items\"\"\"\n randomized_items = list(items)\n random_state.shuffle(randomized_items)\n return randomized_items","sub_path":"louvain_modified/louvain_modified_algorithm.py","file_name":"louvain_modified_algorithm.py","file_ext":"py","file_size_in_byte":20934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"52239408","text":"import tkinter as tk\nimport random\nimport math\n\n\nclass Player:\n def __init__(self):\n self.total = 0\n self.aceCount = 0\n self.gameover = False\n\n def add(self, value):\n self.total = self.total + value\n\n def ace_drawn(self):\n self.aceCount = self.aceCount + 1\n\n def hard_ace(self):\n self.aceCount = self.aceCount - 1\n\n def reset(self):\n self.total = 0\n self.aceCount = 0\n self.gameover = False\n\n\nclass Dealer:\n def __init__(self):\n self.total = 0\n self.aceCount = 0\n\n def add(self, value):\n self.total = self.total + value\n\n def ace_drawn(self):\n self.aceCount = self.aceCount + 1\n\n def hard_ace(self):\n self.aceCount = self.aceCount - 1\n\n def reset(self):\n self.total = 0\n self.aceCount = 0\n\n\ndef deal():\n card()\n card()\n blackjack()\n dealer_card()\n\n\ndef stand():\n if not player.gameover:\n while dealer.total < 17:\n dealer_card()\n if dealer.total > 21:\n dialogLabel['text'] = \"DEALER BUST! YOU WIN!\"\n elif dealer.total > player.total:\n dialogLabel['text'] = \"DEALER WINS!\"\n elif dealer.total == player.total:\n dialogLabel['text'] = \"PUSH!\"\n else:\n dialogLabel['text'] = \"YOU WIN!\"\n player.gameover = True\n\n\ndef card():\n if not player.gameover:\n number = 13 * random.random()\n number = math.ceil(number)\n suit = 4 * random.random()\n suit = math.ceil(suit)\n name = card_name(number)\n suit = card_suit(suit)\n value = card_value(number)\n player.add(value)\n if value == 11:\n player.ace_drawn()\n print(name, ' of ', suit)\n bust()\n if player.aceCount > 0:\n playerScore['text'] = \"You have a soft \" + str(player.total)\n else:\n playerScore['text'] = \"You have a hard \" + str(player.total)\n\n\ndef dealer_card():\n number = 13 * random.random()\n number = math.ceil(number)\n suit = 4 * random.random()\n suit = math.ceil(suit)\n name = card_name(number)\n suit = card_suit(suit)\n value = card_value(number)\n dealer.add(value)\n if value == 11:\n dealer.ace_drawn()\n print(name, ' of ', suit)\n dealerScore['text'] = \"Dealer has \" + str(dealer.total)\n print('Dealer has ' + str(dealer.total))\n\n\ndef card_name(value):\n card_mapping = {\n 1: \"Ace\",\n 2: \"Two\",\n 3: \"Three\",\n 4: \"Four\",\n 5: \"Five\",\n 6: \"Six\",\n 7: \"Seven\",\n 8: \"Eight\",\n 9: \"Nine\",\n 10: \"Ten\",\n 11: \"Jack\",\n 12: \"Queen\",\n 13: \"King\"\n }\n\n return card_mapping.get(value, \"nothing\")\n\n\ndef card_value(value):\n card_mapping = {\n 1: 11,\n 2: 2,\n 3: 3,\n 4: 4,\n 5: 5,\n 6: 6,\n 7: 7,\n 8: 8,\n 9: 9,\n 10: 10,\n 11: 10,\n 12: 10,\n 13: 10\n }\n return card_mapping.get(value, \"nothing\")\n\n\ndef card_suit(value):\n card_mapping = {\n 1: \"Clubs\",\n 2: \"Spades\",\n 3: \"Hearts\",\n 4: \"Diamonds\"\n }\n return card_mapping.get(value, \"nothing\")\n\n\ndef bust():\n if player.total > 21 and player.aceCount == 0:\n print('You have', player.total)\n print(\"BUST!\")\n dialogLabel['text'] = \"BUST! You lose.\"\n dealer.reset()\n player.reset()\n player.gameover = True\n elif player.total > 21 and player.aceCount > 0:\n player.hard_ace()\n player.total = player.total - 10\n print('You have', player.total)\n else:\n print('You have', player.total)\n\n\ndef blackjack():\n if player.total == 21:\n print('BLACKJACK!')\n dialogLabel['text'] = \"BLACKJACK! YOU WIN!\"\n player.gameover = True\n\n\ndef new_hand():\n dealer.reset()\n player.reset()\n dialogLabel['text'] = \"Hit or Stand\"\n deal()\n\n\nroot = tk.Tk()\nroot.title(\"Blackjack\")\n\nframe = tk.Frame(root)\nframe.pack()\n\nplayer = Player()\ndealer = Dealer()\n\nplayerScore = tk.Label(frame, text=\"You have \" + str(player.total))\nplayerScore.pack()\ndealerScore = tk.Label(frame, text=\"Dealer has \" + str(dealer.total))\ndealerScore.pack()\ndialogLabel = tk.Label(frame, text=\"Hit or Stand\")\ndialogLabel.pack()\nhitButton = tk.Button(frame, text=\"Hit\", fg=\"black\", command=card)\nhitButton.pack()\nstandButton = tk.Button(frame, text=\"Stand\", fg=\"black\", command=stand)\nstandButton.pack()\nnewGameButton = tk.Button(frame, text=\"New Game\", fg=\"red\", command=new_hand)\nnewGameButton.pack()\nquitButton = tk.Button(frame, text=\"QUIT\", fg=\"red\", command=quit)\nquitButton.pack()\n\ndeal()\n\nroot.mainloop()\n","sub_path":"blackjack.py","file_name":"blackjack.py","file_ext":"py","file_size_in_byte":4684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"379906068","text":"from pico2d import *\n\nopen_canvas()\n\ngra = load_image('grass.png')\nch = load_image('animation_sheet.png')\n\nx = 0 \nframe_index = 0 # 소스 이미지\naction = 3\nwhile x < 800:\n clear_canvas()\n gra.draw(400, 30)\n ch.clip_draw(100 * frame_index,100 * action,100,100,x, 85) # 이미지를 잘라서 그린다.\n update_canvas() # 백퍼퍼에 있는 내용을 프론트 버퍼로 옮긴다.\n\n get_events() # 쌓인 이벤트를 처리\n\n x += 2\n # frame_index += 1\n # if frame_index >= 8: frame_index = 0\n # cpu는 분기를 싫어하기 때문에 아래 코드가 더 유용하다. -> 분기를 최소화하는 것이 좋다.\n frame_index = (frame_index + 1) % 8\n\n if x % 100 == 0:\n action = (action + 1) % 4\n\n delay(0.02)\n \n\ndelay(1) \n\nclose_canvas()\n\n\n# game lopp\n# - logic = update\n# - \n# - now : 프론트 버퍼에 작업","sub_path":"수업내용/0914/move2.py","file_name":"move2.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"350870734","text":"import os\nimport pandas as pd\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport lightgbm as lgb\nfrom sklearn.externals import joblib\nimport sys\n\nfor arg in sys.argv: \n temp = arg\ndelay = int(temp)\nlocation = 'BJ'\nf = open('/home/dedekinds/aqi_data_submission_'+location+'.csv')\n\ndf =pd.read_csv(f)\ntotal_data = df.values[:,:[37,32][location == 'LD']]\n\ndef smape(actual, predicted):\n a = np.abs(np.array(actual) - np.array(predicted))\n b = np.array(actual) + np.array(predicted)\n \n return 2 * np.mean(np.divide(a, b, out=np.zeros_like(a), where=b!=0, casting='unsafe'))\n\ndef chooseType(arr,loacation):\n if location == 'BJ':\n if arr == 'temperature':\n return [0,1,3,4,2,10,11,9,17,18,16,24,25,23,31,32,30]\n if arr == 'humidity':\n return [0,1,2,4,9,11,16,18,23,25,30,32]\n if arr == 'pressure':\n return [0,1, 2,5,3, 9,12,10, 16,19,17, 23,26,24, 30,33,31]\n if arr == 'windspeed':\n return [0,1, 2,3,5 ,9,10,12, 16,17,19, 23,24,26, 30,31,33]\n else:\n if arr == 'temperature':\n return [0,1, 3,4,2, 9,10,8, 15,16,14, 21,22,20, 27,28,26]\n if arr == 'humidity':\n return [0,1, 2,4, 8,10, 14,16, 20,22, 26,28]\n if arr == 'pressure':\n return [0,1, 2,5,3, 8,11,9, 14,17,15, 20,23,21, 26,29,27]\n if arr == 'windspeed':\n return [0,1, 2,3,5 ,8,9,11, 14,15,17, 20,21,23, 26,27,29] \n \ndef recall_meo(Type,location,data):\n num = 6\n res = 0\n temp = data[chooseType(Type,location)]\n \n test_x = temp.reshape(-1,len(temp))\n for i in range(1,num+1):\n model_dir = \"/home/dedekinds/A_meo_code/lightgbm_merge_model\"\n model_name = Type+'_lightgbm_merge_'+location+str(i)+'.txt'\n gbm = joblib.load(os.path.join(model_dir, model_name))\n res = res + gbm.predict(test_x, num_iteration=gbm.best_iteration_)\n res=res/i\n return res[0]\n\n#recall_meo('windspeed','BJ',total_data[0])\n\n\ndef recall_aqi(Type,location,data):\n if location == 'LD' and Type == 'o3':\n return 0\n num = 6\n res = 0\n test_x = data.reshape(-1,len(data))\n for i in range(1,num+1):\n model_dir = \"/home/dedekinds/A_meo_code/lightgbm_merge_model\"\n model_name = Type+'_lightgbm_merge_'+location+str(i)+'.txt'#o3_lightgbm_merge_BJ1.txt\n gbm = joblib.load(os.path.join(model_dir, model_name))\n res = res + gbm.predict(test_x, num_iteration=gbm.best_iteration_)\n res=res/i\n return res[0]\n\n#recall_aqi('o3','BJ',total_data[0][:-3])\n\n\nans = np.zeros((1,3))\n\nfor j in range(len(total_data)):\n data = total_data[j]\n temp_ans =[]\n for t in range(delay+48):\n temperature = recall_meo('temperature',location,data)\n humidity = recall_meo('humidity',location,data)\n pressure = recall_meo('pressure',location,data)\n windspeed = recall_meo('windspeed',location,data)\n \n pm25 = recall_aqi('pm25',location,data)\n pm10 = recall_aqi('pm10',location,data)\n o3 = recall_aqi('o3',location,data)\n \n #ans = np.row_stack((ans,np.array([pm25,pm10,o3])))\n temp_ans.append(pm25)\n temp_ans.append(pm10)\n temp_ans.append(o3)\n \n if location == 'BJ':\n temp = list(data)\n data = np.array(temp[0:2]+temp[9:]+[temperature,pressure,\n humidity,windspeed,pm25,pm10,o3])\n \n #经度、纬度、温度、压强、湿度、风速、PM2.5、PM10、O3\n if location == 'LD':\n temp = list(data)\n data = np.array(temp[0:2]+temp[8:]+[temperature,pressure,\n humidity,windspeed,pm25,pm10])\n temp2_ans = np.array(temp_ans[3*delay:]).reshape(-1,3)\n ans = np.row_stack((ans,temp2_ans))\n \nans = np.delete(ans,[0],axis=0)\n\n#处理预测中可能出现的负数\ndef remove_nagative(ans):\n location = np.where(ans<0)\n row = location[0]\n col = location[1]\n for i in range(len(row)):\n if row[i]-1>0 and row[i]+10 and ans[row[i]+1][col[i]]>0:\n ans[row[i]][col[i]] = ( ans[row[i]-1][col[i]]+ans[row[i]+1][col[i]] )/2\n \n for j in range(len(ans[0])):#处理第一行\n if ans[0][j]<0:\n t = 0\n while True:\n if ans[0+t][j]>0:\n ans[0][j] = ans[0+t][j]\n break\n else:\n t+=1\n \n location = np.where(ans<0)#处理连续的负数\n row = location[0]\n col = location[1]\n for i in range(len(row)):\n t = 0\n while True:\n if ans[row[i]-t][col[i]]>0:\n ans[row[i]][col[i]] = ans[row[i]-t][col[i]]\n break\n else:\n t+=1\n return ans \n\nans = remove_nagative(ans)\n\n#f = open('total_pre.csv')#打开19、20。21。22。23时候的总表:station_num*35\n#df =pd.read_csv(f)\n#data = df.values\n\n#smape(ans, data)\n\npd_data = pd.DataFrame(ans)\nprint(pd_data)\nmodel_dir = \"result\"\nmodel_name = 'result_'+location+'_Iteration_ans.csv'\npd_data.to_csv(os.path.join(model_dir, model_name))","sub_path":"Model_related_code/submission/BJ_iteration.py","file_name":"BJ_iteration.py","file_ext":"py","file_size_in_byte":5171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"610032466","text":"\"\"\"\nA basic implementation of K-Means clustering algorithm\n\n\"\"\"\n\nimport numpy as np\n\n\nclass KMeans(object):\n\n \"\"\"KMeans implementation.\n Requires K for initialisation\"\"\"\n\n def __init__(self, k, max_iter=20):\n self._k = k\n self._max_iter = max_iter\n\n def train(self, X):\n \"\"\"Performs clustering\n\n Parameters:\n X : Numpy Array of form [num_samples, dimension]\n \"\"\"\n\n # Generate K random centroids\n random_nos = np.random.uniform(0, X.shape[0] - 1, self._k).astype(int)\n centroids = X[random_nos]\n old_centroids = np.zeros(centroids.shape)\n\n # create a vector of labels (0..k-1 for k-clusters)\n c = np.zeros(shape=(X.shape[0], 1))\n\n iters = 1\n while not self._should_stop(iters, old_centroids, centroids):\n iters += 1\n # loop through all points - NP-Hard !!!\n for i in range(X.shape[0]):\n old_dist = 99999\n for j in range(0, self._k):\n dist = np.linalg.norm(X[i] - centroids[j])\n if dist < old_dist:\n # assign the jth cluster\n old_dist = dist\n c[i] = j\n\n # Update centroid to new values\n for i in range(self._k):\n # get all point with label i\n label_j = X[np.where(c == i)[0]]\n centroids[i] = np.sum(label_j, axis=0) / label_j.shape[0]\n\n # return all new labels for each point\n return c\n\n def _should_stop(self, iters, old_centroids, centroids):\n if iters > self._max_iter:\n return True\n return np.array_equal(old_centroids, centroids)\n","sub_path":"KMeans.py","file_name":"KMeans.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"543363356","text":"import csv\nimport random\nimport operator\nimport numpy\ndef dataload(file,split,train=[],test=[]):\n with open(file,'r') as csvfile:\n lines=csv.reader(csvfile)\n data=list(lines)\n for x in range(len(data)-1):\n for y in range(4):\n data[x][y]=float(data[x][y])\n # introducing randomness here\n if(random.random() bytes:\n builder = flatbuffers.Builder(1024)\n\n if value_timestamps is not None:\n used_timestamps = np.atleast_1d(np.asarray(value_timestamps)).astype(np.uint64)\n timestamps_offset = builder.CreateNumpyVector(used_timestamps)\n\n numpy_type_map = {\n np.dtype(\"int8\"): ValueUnion.Int8Array,\n np.dtype(\"uint8\"): ValueUnion.UInt8Array,\n np.dtype(\"int16\"): ValueUnion.Int16Array,\n np.dtype(\"uint16\"): ValueUnion.UInt16Array,\n np.dtype(\"int32\"): ValueUnion.Int32Array,\n np.dtype(\"uint32\"): ValueUnion.UInt32Array,\n np.dtype(\"int64\"): ValueUnion.Int64Array,\n np.dtype(\"uint64\"): ValueUnion.UInt64Array,\n }\n\n temp_values = np.atleast_1d(np.asarray(values))\n\n value_array_offset = builder.CreateNumpyVector(temp_values)\n\n # Some flatbuffer fu in order to avoid >200 lines of code\n builder.StartObject(1)\n builder.PrependUOffsetTRelativeSlot(\n 0, flatbuffers.number_types.UOffsetTFlags.py_type(value_array_offset), 0\n )\n value_offset = builder.EndObject()\n\n name_offset = builder.CreateString(name)\n\n SampleEnvironmentDataStart(builder)\n SampleEnvironmentDataAddName(builder, name_offset)\n SampleEnvironmentDataAddTimeDelta(builder, sample_ts_delta)\n SampleEnvironmentDataAddTimestampLocation(builder, ts_location)\n SampleEnvironmentDataAddMessageCounter(builder, message_counter)\n SampleEnvironmentDataAddChannel(builder, channel)\n SampleEnvironmentDataAddPacketTimestamp(builder, int(timestamp.timestamp() * 1e9))\n SampleEnvironmentDataAddValues(builder, value_offset)\n SampleEnvironmentDataAddValuesType(builder, numpy_type_map[temp_values.dtype])\n if value_timestamps is not None:\n SampleEnvironmentDataAddTimestamps(builder, timestamps_offset)\n\n SE_Message = SampleEnvironmentDataEnd(builder)\n\n builder.Finish(SE_Message, file_identifier=FILE_IDENTIFIER)\n return bytes(builder.Output())\n\n\nResponse = NamedTuple(\n \"SampleEnvironmentData\",\n (\n (\"name\", str),\n (\"channel\", int),\n (\"timestamp\", datetime),\n (\"sample_ts_delta\", int),\n (\"ts_location\", Location),\n (\"message_counter\", int),\n (\"values\", np.ndarray),\n (\"value_ts\", Optional[np.ndarray]),\n ),\n)\n\n\ndef deserialise_senv(buffer: Union[bytearray, bytes]) -> Response:\n check_schema_identifier(buffer, FILE_IDENTIFIER)\n\n SE_data = SampleEnvironmentData.GetRootAsSampleEnvironmentData(buffer, 0)\n\n max_time = datetime(\n year=3001, month=1, day=1, hour=0, minute=0, second=0\n ).timestamp()\n used_timestamp = SE_data.PacketTimestamp() / 1e9\n if used_timestamp > max_time:\n used_timestamp = max_time\n\n value_timestamps = None\n if not SE_data.TimestampsIsNone():\n value_timestamps = SE_data.TimestampsAsNumpy()\n\n from flatbuffers.number_types import (\n Int8Flags,\n Int16Flags,\n Int32Flags,\n Int64Flags,\n Uint8Flags,\n Uint16Flags,\n Uint32Flags,\n Uint64Flags,\n )\n\n flag_map = {\n ValueUnion.Int8Array: Int8Flags,\n ValueUnion.UInt8Array: Uint8Flags,\n ValueUnion.Int16Array: Int16Flags,\n ValueUnion.UInt16Array: Uint16Flags,\n ValueUnion.Int32Array: Int32Flags,\n ValueUnion.UInt32Array: Uint32Flags,\n ValueUnion.Int64Array: Int64Flags,\n ValueUnion.UInt64Array: Uint64Flags,\n }\n\n # Some flatbuffers fu in order to avoid >200 lines of code\n value_offset = SE_data.Values()\n value_type = SE_data.ValuesType()\n values = value_offset.GetVectorAsNumpy(flag_map[value_type], 4)\n\n return Response(\n name=SE_data.Name().decode(),\n channel=SE_data.Channel(),\n timestamp=datetime.fromtimestamp(used_timestamp, tz=timezone.utc),\n sample_ts_delta=SE_data.TimeDelta(),\n ts_location=SE_data.TimestampLocation(),\n message_counter=SE_data.MessageCounter(),\n values=values,\n value_ts=value_timestamps,\n )\n","sub_path":"streaming_data_types/sample_environment_senv.py","file_name":"sample_environment_senv.py","file_ext":"py","file_size_in_byte":5207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"330433911","text":"#!/usr/bin/python\n\nfrom __future__ import print_function\n\nimport sys\nimport json\nimport time\nimport socket\nimport struct\nimport tempfile\n\nclass Connection:\n def __init__(self, name = \"avogadro\"):\n # create socket\n self.sock = socket.socket(socket.AF_UNIX,\n socket.SOCK_STREAM)\n\n # connect\n self.sock.connect(tempfile.gettempdir() + '/' + name)\n\n def send_json(self, obj):\n self.send_message(json.dumps(obj))\n\n def send_message(self, msg):\n sz = len(msg)\n hdr = struct.pack('>I', sz)\n pkt = hdr + msg.encode('ascii')\n self.sock.send(pkt)\n\n def recv_message(self, size = 1024):\n pkt = self.sock.recv(size)\n\n return pkt[4:]\n\n def recv_json(self):\n msg = self.recv_message()\n\n try:\n return json.loads(msg)\n except Exception as e:\n print('error: ' + str(e))\n return {}\n\n def close(self):\n # close socket\n self.sock.close()\n\nif __name__ == '__main__':\n conn = Connection()\n\n method = sys.argv[1]\n\n if method == 'openFile':\n conn.send_json(\n {\n 'jsonrpc' : '2.0',\n 'id' : 0,\n 'method' : 'openFile',\n 'params' : {\n 'fileName' : str(sys.argv[2])\n }\n }\n )\n\n elif method == 'kill':\n conn.send_json(\n {\n 'jsonrpc' : '2.0',\n 'id' : 0,\n 'method' : 'kill'\n }\n )\n\n else:\n print('unknown method: ' + method)\n conn.close()\n sys.exit(-1)\n\n print('reply: ' + str(conn.recv_message()))\n conn.close()\n","sub_path":"scripts/avogadro-remote.py","file_name":"avogadro-remote.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"474101610","text":"'''\nJaSONx GUI Version\n@author = \"Jury Francia, Simone Olivieri, Vic Zagranowski\"\n@version = \"3.1.2.0\"\n@email = \"j.francia@reply.it, s.olivieri@reply.it, v.zagranowski@reply.it\"\n'''\n\nimport json\nfrom xml.dom import minidom\nimport os\n\ndef getConfigurationPath():\n return readJsonFile(os.path.join(os.path.realpath(''), \"configuration\", \"pathConfiguration.json\"))\n\n'''Function for read json files'''\ndef readJsonFile(path): \n with open(path, 'r') as file: \n return json.load(file)\n\n \n'''Configuration path'''\ndict_configuration_path = getConfigurationPath()\njson_templates_path = dict_configuration_path[\"json_templates_path\"]\nexcel_templates_path = dict_configuration_path[\"excel_templates_path\"]\njson_files_path = dict_configuration_path[\"json_files_path\"]\nexcel_files_path = dict_configuration_path[\"excel_files_path\"]\nhierarchy_path = dict_configuration_path[\"hierarchy_path\"]\n\n \n'''Function for save json files'''\ndef saveJsonFile(path, element):\n try:\n with open(path, 'w') as file:\n json.dump(element, file, indent=4)\n return True\n except:\n return False\n \n'''Function for read xml files'''\ndef readXmlFile(path):\n return minidom.parse(path)\n\n'''Function for create list of files'''\ndef createFileList(path, ext):\n lis = []\n for node in os.listdir(path):\n if(node.startswith(\"~$\") or node.startswith(\".\")):\n continue\n if(node.endswith(ext)):\n fullpath = os.path.join(path, node)\n if(os.path.isfile(fullpath)): \n lis.append(node)\n return lis\n \n'''Function for create list of directories'''\ndef createDirectoryList(path):\n lis = []\n for node in os.listdir(path):\n if(node.startswith(\".\")):\n continue\n fullpath = os.path.join(path, node)\n if(os.path.isdir(fullpath)): \n lis.append(node)\n return lis\n \n'''Function return substring'''\ndef getSubstring(string, start='', stop=''): \n if(start != '' and stop != ''):\n return string[string.find(start)+len(start) : string.find(stop)]\n \n elif(start != '' and stop == ''):\n return string[string.find(start)+len(start) : ]\n \n elif(start == '' and stop != ''):\n return string[ : string.find(stop)] \n else:\n return string\n \n'''Refresh template configuration''' \ndef refreshTemplateConfiguration():\n list_templates = createFileList(json_templates_path, \".json\")\n add_measures_template(list_templates)\n \n'''Count measures in file json template'''\ndef add_measures_template(list_templates):\n if(type(list_templates) == list):\n diz_templates = {}\n for template in list_templates:\n list_measures = createModelList(json_templates_path, template) \n diz_templates[getSubstring(template, stop=\".\")] = {\"measuressMax\":len(list_measures), \"measuresSelected\":len(list_measures)} \n saveJsonFile(os.path.join(os.path.realpath (''),\"configuration\", \"meterMeasuresConfiguration.json\"), diz_templates) \n\n'''Create dict model''' \ndef createModelList(path, model):\n file_model = readJsonFile(os.path.join(path, model))\n list_measures = []\n for model in file_model[\"parameters\"][\"filter_tag\"]:\n if(model[\"tag\"] != \"CommunicationCode\"):\n list_measures.append(getSubstring(model[\"tag\"], start=\".\")) \n return(list_measures)\n\n'''Convert class bytes object into json dictionary'''\ndef binary_to_dict(the_binary):\n a = the_binary\n print(\"BINARIO -> \",a)\n dic = json.loads(a.decode(\"utf-8\"))\n print(\"DIC -> \", dic)\n return dic","sub_path":"Versione-3.3.0.0/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":3613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"485021658","text":"# coding=utf-8\n\n\nfrom django.urls import path\nfrom . import views\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.auth.decorators import login_required\nfrom django.urls import reverse_lazy\n\napp_name = 'sistema'\n\nurlpatterns = [\n # Temporáriamente comentado, ainda plajejando qual será a tela pricipal do sistema\n # path('', login_required(views.IndexListView.as_view(), login_url=reverse_lazy('sistema:login')), name='index'),\n path('', login_required(views.ChamadoListView.as_view(), login_url=reverse_lazy('sistema:login')), name='index'),\n\n path('login/', auth_views.login, {'template_name': 'sistema/login.html'}, name='login'),\n path('logout/', auth_views.logout, {'next_page': reverse_lazy('sistema:index')}, name='logout'),\n\n path('empreendimentos/listarempreendimentos/', login_required(\n views.EmpreendimentoListView.as_view(), login_url=reverse_lazy('sistema:login')\n ), name='listarempreendimentos'),\n path('empreendimentos/cadastrarempreendimento/', login_required(\n views.EmpreendimentoCreateView.as_view(), login_url=reverse_lazy('sistema:login')\n ), name='cadastrarempreendimento'),\n path('empreendimentos/editarempreendimento//', login_required(\n views.EmpreendimentoUpdateView.as_view(), login_url=reverse_lazy('sistema:login')\n ), name='editarempreendimento'),\n path('empreendimentos/deletarempreendimento//', login_required(\n views.EmpreendimentoDeleteView.as_view(), login_url=reverse_lazy('sistema:login')\n ), name='deletarempreendimento'),\n\n path('empreendimentos/blocos/listarblocos/', login_required(\n views.BlocoListView.as_view(), login_url=reverse_lazy('sistema:login')\n ), name='listarblocos'),\n path('empreendimentos/blocos/cadastrarbloco/', login_required(\n views.BlocoCreateView.as_view(), login_url=reverse_lazy('sistema:login')\n ), name='cadastrarbloco'),\n path('empreendimentos/blocos/editarbloco//', login_required(\n views.BlocoUpdateView.as_view(), login_url=reverse_lazy('sistema:login')\n ), name='editarbloco'),\n path('empreendimentos/blocos/deletarbloco//', login_required(\n views.BlocoDeleteView.as_view(), login_url=reverse_lazy('sistema:login')\n ), name='deletarbloco'),\n\n path('empreendimentos/blocos/listarapartamentos/', login_required(\n views.ApartamentoListView.as_view(), login_url=reverse_lazy('sistema:login')\n ), name='listarapartamentos'),\n path('empreendimentos/blocos/cadastrarapartamento/', login_required(\n views.ApartamentoCreateView.as_view(), login_url=reverse_lazy('sistema:login')\n ), name='cadastrarapartamento'),\n path('empreendimentos/blocos/editarapartamento//', login_required(\n views.ApartamentoUpdateView.as_view(), login_url=reverse_lazy('sistema:login')\n ), name='editarapartamento'),\n path('empreendimentos/blocos/deletarapartamento//', login_required(\n views.ApartamentoDeleteView.as_view(), login_url=reverse_lazy('sistema:login')\n ), name='deletarapartamento'),\n\n path('problemas/listarcategorias/', login_required(\n views.CategoriaDeProblemaListView.as_view(), login_url=reverse_lazy('sistema:login')\n ), name='categoriasdeproblemas'),\n path('problemas/cadastrarcategoria/', login_required(\n views.CategoriaDeProblemaCreateView.as_view(), login_url=reverse_lazy('sistema:login')\n ), name='cadastrarcategoriadeproblema'),\n path('problemas/editarcategoria//', login_required(\n views.CategoriaDeProblemaUpdateView.as_view(), login_url=reverse_lazy('sistema:login')\n ), name='editarcategoriadeproblema'),\n path('problemas/deletarcategoria//', login_required(\n views.CategoriaDeProblemaDeleteView.as_view(), login_url=reverse_lazy('sistema:login')\n ), name='deletarcategoriadeproblema'),\n\n path('problemas/listarsubcategorias/', login_required(\n views.SubcategoriaDeProblemaListView.as_view(), login_url=reverse_lazy('sistema:login')\n ), name='subcategoriasdeproblemas'),\n path('problemas/cadastrarsubcategoria/', login_required(\n views.SubcategoriaDeProblemaCreateView.as_view(), login_url=reverse_lazy('sistema:login')\n ), name='cadastrarsubcategoriadeproblema'),\n path('problemas/editarsubcategoria//', login_required(\n views.SubcategoriaDeProblemaUpdateView.as_view(), login_url=reverse_lazy('sistema:login')\n ), name='editarsubcategoriadeproblema'),\n path('problemas/removersubcategoria//', login_required(\n views.SubcategoriaDeProblemaDeleteView.as_view(), login_url=reverse_lazy('sistema:login')\n ), name='deletarsubcategoriadeproblema'),\n\n path('problemas/listarproblemas/', login_required(\n views.ProblemaListView.as_view(), login_url=reverse_lazy('sistema:login')\n ), name='listarproblemas'),\n path('problemas/cadastrarproblema/', login_required(\n views.ProblemaCreateView.as_view(), login_url=reverse_lazy('sistema:login')\n ), name='cadastrarproblema'),\n path('problemas/editarproblema//', login_required(\n views.ProblemaUpdateView.as_view(), login_url=reverse_lazy('sistema:login')\n ), name='editarproblema'),\n path('problemas/deletarproblema//', login_required(\n views.ProblemaDeleteView.as_view(), login_url=reverse_lazy('sistema:login')\n ), name='deletarproblema'),\n\n path('chamados/listarchamados/', login_required(\n views.ChamadoListView.as_view(), login_url=reverse_lazy('sistema:login')\n ), name='listarchamados'),\n path('chamados/cadastrarchamado/', login_required(\n views.ChamadoCreateView.as_view(), login_url=reverse_lazy('sistema:login')\n ), name='cadastrarchamado'),\n path('chamados/editarchamado//', login_required(\n views.ChamadoUpdateView.as_view(), login_url=reverse_lazy('sistema:login')\n ), name='editarchamado'),\n path('chamados/deletarchamado//', login_required(\n views.ChamadoDeleteView.as_view(), login_url=reverse_lazy('sistema:login')\n ), name='deletarchamado'),\n]\n","sub_path":"sistema/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":6089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"634411585","text":"\"\"\"\nProvides the optimizer creation, loss computation, back-propagation, and scoring functionalities\n on the input batches for structured prediction training tasks in which the input is composed of at least two sequences\n\"\"\"\nimport os\nimport math\nfrom typing import Tuple, List, Type\n\nfrom translate.backend.utils import backend\nfrom translate.configs.loader import ConfigLoader\nfrom translate.learning.modelling import AbsCompleteModel\nfrom translate.readers.constants import ReaderType\nfrom translate.logging.utils import logger\n\n__author__ = \"Hassan S. Shavarani\"\n\n\ndef create_optimizer(optimizer_name, unfiltered_params, lr, warmup_wrapper_needed=False, configs=None):\n \"\"\"\n The method to create the optimizer object given the desired optimizer name (:param optimizer_name:) and the expected\n learning late (:param lr:) for the set of model parameters (:param unfiltered_params:)\n In case the learning rate warmup wrapper is required you can set the :param warmup_wrapper_needed: to True and \n pass the configs object for the wrapper to get configured. \n :return: the created optimizer object\n :raises ValueError if the requested optimizer name is not defined\n \"\"\"\n if warmup_wrapper_needed:\n # the learning rate would gradually increase during the warmup\n lr = 0.0\n params = filter(lambda x: x.requires_grad, unfiltered_params)\n if optimizer_name == \"adam\":\n optim = backend.optim.Adam(params, lr=lr, betas=(0.9, 0.98), eps=1e-9)\n elif optimizer_name == \"adadelta\":\n optim = backend.optim.Adadelta(params, lr=lr)\n elif optimizer_name == \"sgd\":\n optim = backend.optim.SGD(params, lr=lr, momentum=0.9)\n else:\n raise ValueError(\"No optimiser found with the name {}\".format(optimizer_name))\n if not warmup_wrapper_needed:\n return optim\n else:\n return OptimizerWrapperWithWarmUpSteps(configs, optim)\n\n\ndef create_scheduler(scheduler_name, optimizer, configs):\n if scheduler_name.lower() == \"cosine\":\n n_epochs = configs.get(\"trainer.optimizer.epochs\", must_exist=True)\n n_epochs = n_epochs if n_epochs > 0 else 1\n eta_min = float(configs.get(\"trainer.optimizer.scheduler.eta_min\", must_exist=True))\n return backend.optim.lr_scheduler.CosineAnnealingLR(optimizer, n_epochs, eta_min)\n elif scheduler_name.lower() == \"step\":\n step_size = configs.get(\"trainer.optimizer.scheduler.step_size\", must_exist=True)\n gamma = float(configs.get(\"trainer.optimizer.scheduler.gamma\", must_exist=True))\n return backend.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)\n else:\n raise NotImplementedError\n\n\nclass StatCollector:\n \"\"\"\n The loss, score, and result size container, used for storing the run stats of the training/testing iterations\n \"\"\"\n\n def __init__(self, train_size, model_batch_size, higher_score_is_better):\n self._eps = 7. / 3. - 4. / 3. - 1.\n self._higher_score_is_better = higher_score_is_better\n self._test_total = 0.0\n self._test_score = 0.0\n self._test_loss = 0.0\n self._dev_total = 0.0\n self._dev_score = 0.0\n self._dev_loss = 0.0\n self._train_total = 0.0\n self._train_score = 0.0\n self._train_loss = 0.0\n\n self._best_train_loss = float('+inf')\n self._best_dev_loss = float('+inf')\n self._best_dev_score = float('-inf') if higher_score_is_better else float('+inf')\n self.global_step = 0.0\n\n # the value which is used for performing the dev set evaluation steps\n self._trainset_size = train_size\n self._training_batch_size = model_batch_size\n self._print_every = math.ceil(0.25 * int(math.ceil(float(train_size) / float(model_batch_size))))\n self._train_iter_step = 0.0\n\n def zero_step(self):\n self._train_iter_step = 0.0\n self._train_score = 0.0\n self._train_loss = 0.0\n self._train_total = 0.0\n\n def step(self):\n self._train_iter_step += 1.0\n\n def validation_required(self):\n return self._train_iter_step % self._print_every == 0\n\n @property\n def test_score(self) -> float:\n return self._test_score / (self._test_total + self._eps)\n\n @property\n def test_loss(self) -> float:\n return self._test_loss / (self._test_total + self._eps)\n\n @property\n def dev_score(self) -> float:\n return self._dev_score / (self._dev_total + self._eps)\n\n @property\n def dev_loss(self) -> float:\n return self._dev_loss / (self._dev_total + self._eps)\n\n @property\n def train_score(self) -> float:\n return self._train_score / (self._train_total + self._eps)\n\n @property\n def train_loss(self) -> float:\n return self._train_loss / (self._train_total + self._eps)\n\n def update(self, score: float, loss: float, stat_type: ReaderType):\n if stat_type == ReaderType.TRAIN:\n self._train_score += score\n self._train_loss += loss\n self._train_total += 1.0\n elif stat_type == ReaderType.TEST:\n self._test_score += score\n self._test_loss += loss\n self._test_total += 1.0\n elif stat_type == ReaderType.DEV:\n self._dev_score += score\n self._dev_loss += loss\n self._dev_total += 1.0\n else:\n raise NotImplementedError\n\n def reset(self, stat_type: ReaderType):\n if stat_type == ReaderType.TRAIN:\n self._train_score = 0.0\n self._train_loss = 0.0\n self._train_total = 0.0\n elif stat_type == ReaderType.TEST:\n self._test_score = 0.0\n self._test_loss = 0.0\n self._test_total = 0.0\n elif stat_type == ReaderType.DEV:\n self._dev_score = 0.0\n self._dev_loss = 0.0\n self._dev_total = 0.0\n else:\n raise NotImplementedError\n\n def improved_recently(self) -> bool:\n \"\"\"\n Checks whether the stat collector has seen any loss improvements from the last time it was asked\n \"\"\"\n improved = False\n self.global_step += 1.0\n if self.dev_loss < self._best_dev_loss:\n self._best_dev_loss = self.dev_loss\n improved = True\n if self.train_loss < self._best_train_loss:\n self._best_train_loss = self.train_loss\n # improved = True\n if not self._higher_score_is_better and self.dev_score < self._best_dev_score:\n self._best_dev_score = self.dev_score\n improved = True\n if self._higher_score_is_better and self.dev_score > self._best_dev_score:\n self._best_dev_score = self.dev_score\n improved = True\n return improved\n\n\nclass OptimizerWrapperWithWarmUpSteps:\n def __init__(self, configs: ConfigLoader, optimizer_instance: Type[backend.optim.Optimizer]):\n \"\"\"\n :param configs: an instance of ConfigLoader which has been loaded with a yaml config file\n :param optimizer_instance: The optimizer instance the learning rate of which is supposed to be updated with \n after every single loss backward step. \n \"\"\"\n super(OptimizerWrapperWithWarmUpSteps, self).__init__()\n self.optimizer = optimizer_instance\n self._step = 0\n # the number of warmup steps\n self.warmup = configs.get(\"trainer.optimizer.warmup_steps\", must_exist=True)\n # the rate update factor used in learning rate update computation\n self.factor = configs.get(\"trainer.optimizer.lr_update_factor\", must_exist=True)\n # the size of the source embedding layer of the model\n # TODO the value must be able to be taken from a non-transformer model as well!\n self.model_size = configs.get(\"trainer.optimizer.d_model\", must_exist=True)\n self._rate = 0\n logger.info(\"Optimizer loaded into the learning rate warmup wrapper for the model size: {} with {} warm-up \"\n \"states and the learning rate updates of factor {}\".format(\n self.model_size, self.warmup, self.factor))\n\n def step(self):\n \"\"\"\n Performs a single optimization step with the warmup strategy stated in the \"attention is all you need\" paper.\n \"\"\"\n self._step += 1\n rate = self._rate_()\n for p in self.optimizer.param_groups:\n p['lr'] = rate\n self._rate = rate\n self.optimizer.step()\n\n def zero_grad(self):\n self.optimizer.zero_grad()\n\n def _rate_(self, step=None):\n \"\"\"\n Class method in charge of updating the learning rate considering the :param step: number passed to it.\n The update will be performed according to the \"lrate\" formula (Equation 3, Page 7 of the \"attention is all you \n need\") paper. \n \"\"\"\n if step is None:\n step = self._step\n return self.factor * (self.model_size ** (-0.5) * min(step ** (-0.5), step * self.warmup ** (-1.5)))\n\n\nclass Estimator:\n def __init__(self, configs: ConfigLoader, model: Type[AbsCompleteModel]):\n \"\"\"\n :param configs: an instance of ConfigLoader which has been loaded with a yaml config file\n :param model: the sequence to sequence model instance object which will be used for computing the model\n predictions and parameter optimization\n \"\"\"\n self.optim_name = configs.get(\"trainer.optimizer.name\", must_exist=True)\n self.learning_rate = float(configs.get(\"trainer.optimizer.lr\", must_exist=True))\n self.grad_clip_norm = configs.get(\"trainer.optimizer.gcn\", 5)\n warmup_needed = configs.get(\"trainer.optimizer.needs_warmup\", False)\n self.experiment_name = configs.get(\"trainer.experiment.name\", \"unnamed\")\n self.model = model\n logger.info('Loading {} optimizer(s) of type \\\"{}\\\" for training the model'.format(\n len(model.optimizable_params_list()), self.optim_name.upper()))\n self.optimizers = [create_optimizer(self.optim_name, x, self.learning_rate, warmup_needed, configs)\n for x in model.optimizable_params_list()]\n if configs.get(\"trainer.optimizer.scheduler\", None) is not None and not warmup_needed:\n self.scheduler_name = configs.get(\"trainer.optimizer.scheduler.name\", must_exist=True)\n self.schedulers = [create_scheduler(self.scheduler_name, opt, configs) for opt in self.optimizers]\n else:\n self.scheduler_name = None\n self.schedulers = []\n\n def step_schedulers(self):\n if len(self.schedulers):\n logger.info(\"Updating the learning rates through {} scheduler ...\".format(self.scheduler_name))\n for scheduler in self.schedulers:\n scheduler.step()\n\n def step(self, *args, **kwargs) -> Tuple[float, List[List[int]]]:\n \"\"\"\n The step function which takes care of computing the loss, gradients and back-propagating them given\n the input tensors (the number of them could vary based on the application).\n :return: the average loss value for the batch instances plus the decoded output computed over the batch\n \"\"\"\n for opt in self.optimizers:\n opt.zero_grad()\n _loss_, _loss_size_, computed_output = self.model.forward(*args, *kwargs)\n _loss_.backward()\n if self.grad_clip_norm > 0.0:\n [backend.nn.utils.clip_grad_norm_(x, self.grad_clip_norm) for x in self.model.optimizable_params_list()]\n loss_value = _loss_.item() / _loss_size_ if _loss_size_ > 0.0 else 0.0\n for opt in self.optimizers:\n opt.step()\n return loss_value, computed_output\n\n def step_no_grad(self, *args, **kwargs):\n \"\"\"\n The function which given a pair of input tensors, freezes the model parameters then computes the model loss over\n its predictions.\n :return: the average loss value for the batch instances plus the decoded output computed over the batch\n \"\"\"\n with backend.no_grad():\n _loss_, _loss_size_, computed_output = self.model.forward(*args, *kwargs)\n loss_value = _loss_.item() / _loss_size_ if _loss_size_ > 0.0 else 0.0\n return loss_value, computed_output\n\n def save_checkpoint(self, stat_collector: StatCollector) -> str:\n \"\"\"\n Saves the model and returns the saved checkpoint address, the function uses the collected stats during training \n to form the saving checkpoint address\n \"\"\"\n checkpoint = {'global_step': stat_collector.global_step, 'model_state_dict': self.model.state_dict()}\n checkpoint_path = 'checkpoints/%s_acc_%.2f_loss_%.2f_step_%d.pt' % (\n self.experiment_name, stat_collector.dev_score, stat_collector.dev_loss, stat_collector.global_step)\n directory, filename = os.path.split(os.path.abspath(checkpoint_path))\n if not os.path.exists(directory):\n os.makedirs(directory)\n backend.save(checkpoint, checkpoint_path)\n return checkpoint_path\n\n def load_checkpoint(self, checkpoint_path: str) -> Type[AbsCompleteModel]:\n \"\"\"\n Loades the model from the :param checkpoint_path: and returns the loaded model object \n \"\"\"\n # It's weird that if `map_location` is not given, it will be extremely slow.\n ckpt = backend.load(checkpoint_path, map_location=lambda storage, loc: storage)\n self.model.load_state_dict(ckpt['model_state_dict'])\n return self.model\n","sub_path":"src/translate/learning/estimator.py","file_name":"estimator.py","file_ext":"py","file_size_in_byte":13537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"74553984","text":"import torch.nn as nn\r\n\r\nclass Discriminator(nn.Module):\r\n def __init__(self, ngpu, dim_d, num_channel):\r\n super(Discriminator, self).__init__()\r\n self.ngpu = ngpu\r\n self.main = nn.Sequential(\r\n # input is num_channel * 64 * 64\r\n nn.Conv2d(num_channel, dim_d, 4, 2, 1, bias=False),\r\n nn.LeakyReLU(0.2, inplace=True),\r\n # state size, dim_d * 32 * 32\r\n nn.Conv2d(dim_d, dim_d*2, 4, 2, 1, bias=False),\r\n nn.LeakyReLU(0.2, inplace=True),\r\n # state size, (dim_d*2) * 16 * 16\r\n nn.Conv2d(dim_d*2, dim_d*4, 4, 2, 1, bias=False),\r\n nn.LeakyReLU(0.2, inplace=True),\r\n # state size, (dim_d*4) * 8 * 8\r\n nn.Conv2d(dim_d*4, dim_d*8, 4, 2, 1, bias=False),\r\n nn.LeakyReLU(0.2, inplace=True),\r\n # state size, (dim_d*8) * 4 * 4\r\n nn.Conv2d(dim_d*8, 1, 4, 1, 0, bias=False),\r\n nn.Sigmoid(),\r\n # state size, 1 * 1 * 1\r\n )\r\n\r\n def forward(self, input):\r\n if input.is_cuda and self.ngpu > 1:\r\n output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))\r\n else:\r\n output = self.main(input)\r\n return output.view(-1, 1).squeeze(1)","sub_path":"Discriminator.py","file_name":"Discriminator.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"133222019","text":"import os\nimport sys\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom nfft import nfft_adjoint, ndft\nfrom matplotlib.ticker import StrMethodFormatter\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\nimport definitions\n\n\nparser = argparse.ArgumentParser(description='Displays the test accuracy of the adversarial curve between two models.')\n\nparser.add_argument('--dir', type=str, default='model_data/adversarial_curves/', metavar='DIR',\n help='directory for saved evaluated curves (default: /model_data/adversarial_curves/)')\nparser.add_argument('--dir_training', type=str, default='model_data/training/adversarial_curve_models/', metavar='DIR',\n help='directory for saved curve training data '\n '(default: model_data/training/adversarial_curve_models/)')\n\nparser.add_argument('--dataset', type=str, default='CIFAR10', metavar='DATASET',\n help='dataset name (default: CIFAR10)')\nparser.add_argument('--model', nargs='+', type=str, default=['TinyTen', 'ResNet32', 'GoogLeNet'], metavar='MODEL',\n help='model name (default: None)')\nparser.add_argument('--seed_a', nargs='+', type=int, default=[1, 3, 5], metavar='S',\n help='random seed for model 0 (default: 1)')\nparser.add_argument('--seed_b', nargs='+',\n type=int, default=[2, 4, 6], metavar='S', help='random seed for model 1(default: 2)')\nparser.add_argument('--alignment', nargs='+', type=str, default=['corr', 'null'],\n help='specify an alignment if the models are to be aligned before curve finding (default: None)')\nparser.add_argument('--epochs',\n type=int, nargs='+', default=[200, 200, 20], metavar='S',\n help='Number of epochs the curve was trained for')\nargs = parser.parse_args()\n\n\nproject_root = definitions.get_project_root()\nos.chdir(project_root)\n\nnum_models = len(args.model)\nnum_alignments = len(args.alignment)\nnum_curves = len(args.seed_a)\nif num_curves == 1:\n ddof = 0\nelse:\n ddof = 1\n\nif args.alignment is None:\n args.alignment = ['null']\n\nalign_dict = {'null': 'Unaligned', 'corr': 'Aligned', 'pam_': 'PAM Unaligned', 'corr_quad': 'Quadratic Assignment',\n 'pam_corr': 'PAM Aligned'}\nalign_dict2 = {'null': 'Val: Unaligned', 'corr': 'Val: Aligned', 'pam_': 'Val: PAM Unaligned',\n 'corr_quad': 'Val: Quadratic Assignment', 'pam_corr': 'Val: PAM Aligned'}\nepoch_dict = {'0': 'Linear Interpolation', '250': 'Trained Bezier Curve', '200': 'Trained Bezier Curve', '240': 'PAM'}\n\nfig1 = plt.figure(1, figsize=[1.75 * 6.4, 1.0 * 4.8])\nfig2 = plt.figure(2, figsize=[1.75 * 6.4, 1.0 * 4.8])\nfig3 = plt.figure(3, figsize=[1.5 * 6.4, 1.2 * 4.8])\nfig4 = plt.figure(4, figsize=[1.5 * 6.4, 1.2 * 4.8])\nfig5 = plt.figure(5, figsize=[1.5 * 6.4, 1 * 4.8])\nfig6 = plt.figure(6, figsize=[1.75 * 6.4, 1.0 * 4.8])\nfig7 = plt.figure(7, figsize=[1.75 * 6.4, 1.0 * 4.8])\n\nfor model_idx, model in enumerate(args.model):\n epoch = args.epochs[model_idx]\n dir_model = ('%s%s/%s/' % (args.dir, model, args.dataset))\n dir_training = ('%s%s/%s/' % (args.dir_training, model, args.dataset))\n\n curve_len = [None] * num_alignments\n loss = [None] * num_alignments\n acc = [None] * num_alignments\n loss_robust = [None] * num_alignments\n acc_robust = [None] * num_alignments\n loss_t_train = [None] * num_alignments\n acc_t_train = [None] * num_alignments\n loss_t_test = [None] * num_alignments\n acc_t_test = [None] * num_alignments\n\n loss_time = [None] * num_alignments\n acc_time = [None] * num_alignments\n\n loss_line_integral = np.zeros([num_alignments, num_curves])\n acc_line_integral = np.zeros([num_alignments, num_curves])\n loss_r_line_integral = np.zeros([num_alignments, num_curves])\n acc_min = np.zeros([num_alignments, num_curves])\n acc_max = np.zeros([num_alignments, num_curves])\n\n acc_min_r = np.zeros([num_alignments, num_curves])\n acc_max_r = np.zeros([num_alignments, num_curves])\n for j, alignment in enumerate(args.alignment):\n if alignment == '':\n alignment = 'null'\n for i, (seed_a, seed_b) in enumerate(zip(args.seed_a, args.seed_b)):\n curve_dict = np.load('%scurve_align_%s_seeds_%02d_%02d-%d.npy' %\n (dir_model, alignment, seed_a, seed_b, epoch), allow_pickle=True)\n curve_dict = curve_dict[()]\n if loss[j] is None:\n curve_len[j] = np.zeros([num_curves, 33])\n loss[j] = np.zeros([num_curves, 33])\n acc[j] = np.zeros([num_curves, 33])\n loss_robust[j] = np.zeros([num_curves, 33])\n acc_robust[j] = np.zeros([num_curves, 33])\n if len(curve_dict['curve_len']) == 65:\n curve_len[j][i] = curve_dict['curve_len'][::2]\n loss[j][i] = curve_dict['test_loss'][::2]\n acc[j][i] = curve_dict['test_acc'][::2]\n loss_robust[j][i] = curve_dict['test_loss_robust'][::2]\n acc_robust[j][i] = curve_dict['test_acc_robust'][::2]\n else:\n curve_len[j][i] = curve_dict['curve_len']\n loss[j][i] = curve_dict['test_loss']\n acc[j][i] = curve_dict['test_acc']\n loss_robust[j][i] = curve_dict['test_loss_robust']\n acc_robust[j][i] = curve_dict['test_acc_robust']\n\n curve_train_dict = np.load('%scurve_align_%s_seeds_%02d_%02d.npy' %\n (dir_training, alignment, seed_a, seed_b), allow_pickle=True)\n curve_train_dict = curve_train_dict[()]\n\n if loss_t_train[j] is None:\n loss_t_train[j] = np.zeros([num_curves, len(curve_train_dict['loss_train'])])\n acc_t_train[j] = np.zeros([num_curves, len(curve_train_dict['loss_train'])])\n loss_t_test[j] = np.zeros([num_curves, len(curve_train_dict['loss_train'])])\n acc_t_test[j] = np.zeros([num_curves, len(curve_train_dict['loss_train'])])\n\n loss_time[j] = [None] * num_curves\n acc_time[j] = [None] * num_curves\n\n loss_t_train[j][i] = curve_train_dict['loss_train']\n acc_t_train[j][i] = curve_train_dict['acc_train']\n loss_t_test[j][i] = curve_train_dict['loss_test']\n acc_t_test[j][i] = curve_train_dict['acc_test']\n\n loss_line_integral[j, i] = curve_dict['loss_line_integral']\n x = np.pad(curve_len[j][i], (1, 1), 'edge')\n acc_line_integral[j, i] = np.sum(((x[1:-1] - x[:-2])/2 + (x[2:] - x[1:-1])/2) * acc[j][i]) / x[-1]\n loss_r_line_integral[j, i] = np.sum(((x[1:-1] - x[:-2]) / 2 + (x[2:] - x[1:-1]) / 2) * loss_robust[j][i]) \\\n / x[-1]\n\n acc_min_r[j, i] = np.min(acc_robust[j][i])\n acc_max_r[j, i] = np.max(acc_robust[j][i])\n\n acc_min[j, i] = acc[j][i][np.argmin(acc_robust[j][i])]\n acc_max[j, i] = acc[j][i][np.argmax(acc_robust[j][i])]\n\n loss_time = np.asarray(loss_time)\n\n loss_fft = np.fft.rfft(loss_time, axis=-1).real\n\n curve_len = np.stack(curve_len)\n loss = np.stack(loss)\n loss_robust = np.stack(loss_robust)\n loss_fft = np.zeros([loss.shape[0], loss.shape[1], loss.shape[2] - 1])\n for i in range(loss.shape[0]):\n for j in range(loss.shape[1]):\n t_fft = curve_len[i, j] / curve_len[i, j, -1] - 1/2\n loss_fft[i, j] = ndft(t_fft[:-1], loss[i, j, :-1]).real\n loss_fft = loss_fft[:, :, 32:]\n loss_fft = np.real(np.fft.rfft(loss, axis=-1))\n loss_fft = loss_fft[:, :, loss.shape[2] // 2:]\n loss_base = np.zeros_like(loss)\n for i in range(loss.shape[-1]):\n i_t = i / (loss.shape[-1] - 1)\n loss_base[:, :, i] = loss[:, :, i] - ((1 - i_t) * loss[:, :, 0] + i_t * loss[:, :, -1])\n loss_fft = np.fft.rfft(loss, axis=-1)\n loss_fft = loss_fft / np.sum(loss_fft ** 2, axis=-1, keepdims=True) ** 0.5\n loss_fft = np.abs(loss_fft)\n\n acc = np.stack(acc)\n acc_robust = np.stack(acc_robust)\n loss_t_train = np.stack(loss_t_train)\n acc_t_train = np.stack(acc_t_train)\n loss_t_test = np.stack(loss_t_test)\n acc_t_test = np.stack(acc_t_test)\n\n curve_len = np.mean(curve_len, axis=1)\n t = np.linspace(0, 1, len(curve_len[0]))\n\n def compute_stats(signal):\n signal_mu = np.mean(signal, axis=1)\n signal_sigma = np.std(signal, axis=1, ddof=ddof)\n signal_stderr = signal_sigma / signal.shape[1] ** 0.5\n return signal_mu, signal_sigma, signal_stderr\n\n loss_mu, loss_sigma, loss_stderr = compute_stats(loss)\n acc_mu, acc_sigma, acc_stderr = compute_stats(acc)\n\n loss_r_mu, loss_r_sigma, loss_r_stderr = compute_stats(loss_robust)\n acc_r_mu, acc_r_sigma, acc_r_stderr = compute_stats(acc_robust)\n\n loss_fft_mu, loss_fft_sigma, loss_fft_stderr = compute_stats(loss_fft)\n\n loss_t_train_mu, loss_t_train_sigma, loss_t_train_stderr = compute_stats(loss_t_train)\n acc_t_train_mu, acc_t_train_sigma, acc_t_train_stderr = compute_stats(acc_t_train)\n loss_t_test_mu, loss_t_test_sigma, loss_t_test_stderr = compute_stats(loss_t_test)\n acc_t_test_mu, acc_t_test_sigma, acc_t_test_stderr = compute_stats(acc_t_test)\n\n loss_line_integral_mu, loss_line_integral_sigma, loss_line_integral_stderr = compute_stats(loss_line_integral)\n loss_r_line_integral_mu, loss_r_line_integral_sigma, loss_r_line_integral_stderr \\\n = compute_stats(loss_r_line_integral)\n acc_line_integral_mu, acc_line_integral_sigma, acc_line_integral_stderr = compute_stats(acc_line_integral)\n\n acc_min_mu, acc_min_sigma, acc_min_stderr = compute_stats(acc_min)\n acc_max_mu, acc_max_sigma, acc_max_stderr = compute_stats(acc_max)\n\n acc_min_r_mu, acc_min_r_sigma, acc_min_r_stderr = compute_stats(acc_min_r)\n acc_max_r_mu, acc_max_r_sigma, acc_max_r_stderr = compute_stats(acc_max_r)\n\n print('Model: %s' % model)\n print('Endpoint')\n print('%0.1f pm %0.1f' % (0.5*(acc_mu[0, 0] + acc_mu[0, -1]), 0.5*(acc_sigma[0, 0]**2 + acc_sigma[0, -1]**2)**0.5 ))\n print('%0.2f' % np.maximum(acc_mu[0, 0],acc_mu[0, -1]))\n print('%0.1f pm %0.1f' % (0.5 * (acc_r_mu[0, 0] + acc_r_mu[0, -1]), 0.5 * (acc_r_sigma[0, 0] ** 2 + acc_r_sigma[0, -1] ** 2) ** 0.5))\n print('%0.2f' % np.maximum(acc_r_mu[0, 0], acc_r_mu[0, -1]))\n for j, alignment in enumerate(args.alignment):\n print('Line Integrated Loss (%s): %0.3f +/- %0.3f' %\n (align_dict[alignment], loss_r_line_integral_mu[j], loss_r_line_integral_sigma[j]))\n print('Line Integrated Accuracy (%s): %0.3f +/- %0.3f' %\n (align_dict[alignment], acc_line_integral_mu[j], acc_line_integral_sigma[j]))\n print('Worst accuracy along the curve (%s): %0.3f +/- %0.3f' %\n (align_dict[alignment], acc_min_mu[j], acc_min_sigma[j]))\n print('Best accuracy along the curve (%s): %0.3f +/- %0.3f' %\n (align_dict[alignment], acc_max_mu[j], acc_max_sigma[j]))\n print('Worst robust accuracy along the curve (%s): %0.3f +/- %0.3f' %\n (align_dict[alignment], acc_min_r_mu[j], acc_min_r_sigma[j]))\n print('Best robust accuracy along the curve (%s): %0.3f +/- %0.3f' %\n (align_dict[alignment], acc_max_r_mu[j], acc_max_r_sigma[j]))\n print('Training Loss (%s): %0.3f +/- %0.3f' %\n (align_dict[alignment], loss_t_train_mu[j][-1], loss_t_train_sigma[j][-1]))\n\n colors = ['blue', 'green', 'red', 'purple']\n markers = ['o', 'v']\n plt.figure(1)\n plt.subplot(1, num_models, model_idx + 1)\n for j, alignment in enumerate(args.alignment):\n if alignment == '':\n alignment = 'null'\n if num_curves > 1:\n plt.errorbar(t, loss_mu[j], loss_sigma[j], label=align_dict[alignment],\n color=colors[j], marker=markers[j], markevery=8)\n else:\n plt.plot(t, loss_mu[j], label=align_dict[alignment], color=colors[j], marker=markers[j], markevery=8)\n plt.title('%s' % model, fontsize='xx-large')\n plt.xlabel('t', fontsize='xx-large')\n plt.ylabel('Clean Loss', fontsize='xx-large')\n plt.xticks(fontsize='x-large')\n plt.yticks(fontsize='x-large')\n plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.3f}'))\n\n plt.legend(fontsize='x-large')\n\n mask = np.isfinite(loss_t_test_mu[0])\n mask[0] = False\n plt.figure(5)\n plt.subplot(1, num_models, model_idx + 1)\n for j, alignment in enumerate(args.alignment):\n if alignment == '':\n alignment = 'null'\n if num_curves > 1:\n plt.errorbar(np.arange(1, 9), loss_fft_mu[j, 1:9], loss_fft_sigma[j, 1:9],\n label=align_dict[alignment],\n color=colors[j])\n else:\n plt.plot(np.arange(1, 9), loss_fft_mu[j, 1:9], label=align_dict[alignment],\n color=colors[j])\n plt.title('%s' % model)\n plt.xlabel('Wavenumber, k')\n plt.ylabel('|F(L)|')\n plt.legend()\n\n plt.figure(6)\n plt.subplot(1, num_models, model_idx + 1)\n for j, alignment in enumerate(args.alignment):\n if alignment == '':\n alignment = 'null'\n if num_curves > 1:\n plt.errorbar(t, loss_r_mu[j], loss_r_sigma[j], label=align_dict[alignment],\n color=colors[j], marker=markers[j], markevery=8)\n else:\n plt.plot(t, loss_r_mu[j], label=align_dict[alignment], color=colors[j], marker=markers[j], markevery=8)\n plt.title('%s' % model, fontsize='xx-large')\n plt.xlabel('t', fontsize='xx-large')\n plt.ylabel('Robust Loss', fontsize='xx-large')\n plt.xticks(fontsize='x-large')\n plt.yticks(fontsize='x-large')\n plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.3f}'))\n plt.legend(fontsize='large')\n\n plt.figure(2)\n plt.subplot(1, num_models, model_idx + 1)\n for j, alignment in enumerate(args.alignment):\n if alignment == '':\n alignment = 'null'\n if num_curves > 1:\n plt.errorbar(t, acc_mu[j], acc_sigma[j], label=align_dict[alignment],\n color=colors[j], marker=markers[j], markevery=8)\n else:\n plt.plot(t, acc_mu[j], label=align_dict[alignment], color=colors[j], marker=markers[j], markevery=8)\n plt.title('%s' % model, fontsize='xx-large')\n plt.xlabel('t', fontsize='xx-large')\n plt.ylabel('Clean Accuracy', fontsize='xx-large')\n plt.xticks(fontsize='x-large')\n plt.yticks(fontsize='x-large')\n plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.1f}'))\n\n plt.legend(fontsize='large')\n\n plt.figure(7)\n plt.subplot(1, num_models, model_idx + 1)\n for j, alignment in enumerate(args.alignment):\n if alignment == '':\n alignment = 'null'\n if num_curves > 1:\n plt.errorbar(t, acc_r_mu[j], acc_r_sigma[j], label=align_dict[alignment],\n color=colors[j], marker=markers[j], markevery=8)\n else:\n plt.plot(t, acc_r_mu[j], label=align_dict[alignment], color=colors[j], marker=markers[j], markevery=8)\n plt.title('%s' % model, fontsize='xx-large')\n plt.xlabel('t', fontsize='xx-large')\n plt.ylabel('Robust Accuracy', fontsize='xx-large')\n plt.xticks(fontsize='x-large')\n plt.yticks(fontsize='x-large')\n plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.1f}'))\n\n plt.legend(fontsize='large')\n\n x = np.asarray([i+1 for i in range(len(loss_t_train_mu[0]))])\n mask = np.isfinite(loss_t_test_mu[0])\n plt.figure(3)\n plt.subplot(1, num_models, model_idx + 1)\n for j, alignment in enumerate(args.alignment):\n if alignment == '':\n alignment = 'null'\n if num_curves > 1:\n plt.errorbar(x, loss_t_train_mu[j], loss_t_train_sigma[j],\n label=align_dict[alignment], color=colors[j], marker=markers[j], markevery=20)\n else:\n plt.plot(x, loss_t_train_mu[j], label=align_dict[alignment], color=colors[j], marker=markers[j], markevery=20)\n plt.title('%s' % model, fontsize='xx-large')\n plt.xlabel('Epoch', fontsize='xx-large')\n plt.ylabel('Loss', fontsize='xx-large')\n plt.legend(fontsize='large')\n x1, x2, y1, y2 = plt.axis()\n y_max = np.min(loss_t_train_mu[:, 1])\n plt.axis((x1, x2, y1, y_max))\n\n plt.figure(4)\n plt.subplot(1, num_models, model_idx + 1)\n for j, alignment in enumerate(args.alignment):\n if alignment == '':\n alignment = 'null'\n if num_curves > 1:\n plt.errorbar(x, acc_t_train_mu[j], acc_t_train_sigma[j],\n label=align_dict[alignment], color=colors[j], marker=markers[j], markevery=20)\n else:\n plt.plot(x[mask], acc_t_train_mu[j, mask], label=align_dict[alignment], color=colors[j], marker=markers[j], markevery=20)\n plt.title('%s' % model, fontsize='xx-large')\n plt.xlabel('Epoch', fontsize='xx-large')\n plt.ylabel('Accuracy', fontsize='xx-large')\n plt.legend(fontsize='large')\n x1, x2, y1, y2 = plt.axis()\n y_min = np.max(acc_t_train_mu[:, 5])\n plt.axis((x1, x2, y_min, y2))\n\nplt.figure(1)\nplt.tight_layout(rect=[0, 0.03, 1, 0.95])\n# plt.savefig('figures/fin_curve_loss.png', bbox_inches='tight', pad_inches=0.1, dpi=300)\n\nplt.figure(2)\nplt.tight_layout(rect=[0, 0.03, 1, 0.95])\n# plt.savefig('figures/fin_curve_acc.png', bbox_inches='tight', pad_inches=0.1, dpi=300)\n\nplt.figure(3)\nplt.tight_layout(rect=[0, 0.03, 1, 0.9])\n# plt.savefig('figures/fin_train_loss_stl10.png', bbox_inches='tight', pad_inches=0.1, dpi=300)\nplt.figure(4)\nplt.tight_layout(rect=[0, 0.03, 1, 0.9])\n# plt.savefig('figures/fin_train_acc_stl10.png', bbox_inches='tight', pad_inches=0.1, dpi=300)\n\nplt.figure(5)\nplt.tight_layout(rect=[0, 0.03, 1, 0.95])\n# plt.savefig('figures/fin_fft.png', bbox_inches='tight', pad_inches=0.1, dpi=300)\n\nplt.figure(6)\nplt.tight_layout(rect=[0, 0.03, 1, 0.95])\n# plt.savefig('figures/fin_curve_loss_robust.png', bbox_inches='tight', pad_inches=0.1, dpi=300)\n\nplt.figure(7)\nplt.tight_layout(rect=[0, 0.03, 1, 0.95])\n# plt.savefig('figures/fin_curve_acc_robust.png', bbox_inches='tight', pad_inches=0.1, dpi=300)\n\nplt.show()\n","sub_path":"visualization/viz_adversarial_curve.py","file_name":"viz_adversarial_curve.py","file_ext":"py","file_size_in_byte":18309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"285703567","text":"from __future__ import print_function\nfrom keras.preprocessing.image import ImageDataGenerator\nimport numpy as np \nimport os\nimport glob\nimport cv2\nimport six\nimport random\n\nBackground = [0,0,0]\nIntraoralArea = [255,255,255]\nIntraoralArea_1 = [255,0,0]\n\nclass_colors = np.array([Background, IntraoralArea])\n#class_colors = [(random.randint(0, 255), random.randint(\n# 0, 255), random.randint(0, 255)) for _ in range(5000)]\n\ndef adjustData(img,mask,flag_multi_class,num_class):\n if(flag_multi_class):\n img = img / 255\n mask = mask[:,:,:,0] if(len(mask.shape) == 4) else mask[:,:,0]\n new_mask = np.zeros(mask.shape + (num_class,))\n for i in range(num_class):\n #for one pixel in the image, find the class in mask and convert it into one-hot vector\n #index = np.where(mask == i)\n #index_mask = (index[0],index[1],index[2],np.zeros(len(index[0]),dtype = np.int64) + i) if (len(mask.shape) == 4) else (index[0],index[1],np.zeros(len(index[0]),dtype = np.int64) + i)\n #new_mask[index_mask] = 1\n new_mask[mask == i,i] = 1\n new_mask = np.reshape(new_mask,(new_mask.shape[0],new_mask.shape[1]*new_mask.shape[2],new_mask.shape[3])) if flag_multi_class else np.reshape(new_mask,(new_mask.shape[0]*new_mask.shape[1],new_mask.shape[2]))\n mask = new_mask\n elif(np.max(img) > 1):\n img = img / 255\n mask = mask /255\n mask[mask > 0.5] = 1\n mask[mask <= 0.5] = 0\n return (img,mask)\n\n\n\ndef trainGenerator(batch_size,train_path,image_folder,mask_folder,aug_dict,n_classes=100,image_color_mode = \"grayscale\",\n mask_color_mode = \"grayscale\",image_save_prefix = \"image\",mask_save_prefix = \"mask\",\n flag_multi_class = False,num_class = 2,save_to_dir = None,target_size = (256,256), mask_target_size = (256,256), seed = 1):\n '''\n can generate image and mask at the same time\n use the same seed for image_datagen and mask_datagen to ensure the transformation for image and mask is the same\n if you want to visualize the results of generator, set save_to_dir = \"your path\"\n '''\n image_datagen = ImageDataGenerator(**aug_dict)\n mask_datagen = ImageDataGenerator(**aug_dict)\n image_generator = image_datagen.flow_from_directory(\n train_path,\n classes = [image_folder],\n class_mode = None,\n color_mode = \"rgb\",\n target_size = target_size,\n batch_size = batch_size,\n save_to_dir = save_to_dir,\n save_prefix = image_save_prefix,\n seed = seed)\n mask_generator = mask_datagen.flow_from_directory(\n train_path,\n classes = [mask_folder],\n class_mode = None,\n color_mode = \"rgb\",\n target_size = mask_target_size,\n batch_size = batch_size,\n save_to_dir = save_to_dir,\n save_prefix = mask_save_prefix,\n seed = seed)\n train_generator = zip(image_generator, mask_generator)\n for (img,mask) in train_generator:\n img,mask = adjustData(img,mask,flag_multi_class,num_class)\n Y = []\n for i in range(mask.shape[0]):\n Y.append(get_segmentation_array(mask[i], n_classes, mask_target_size[0], mask_target_size[1]))\n yield (img, np.array(Y))\n\ndef get_segmentation_array(image_input, nClasses, width, height, no_reshape=False):\n \"\"\" Load segmentation array from input \"\"\"\n\n seg_labels = np.zeros((height, width, nClasses))\n\n if type(image_input) is np.ndarray:\n # It is already an array, use it as it is\n img = image_input\n elif isinstance(image_input, six.string_types) :\n if not os.path.isfile(image_input):\n raise DataLoaderError(\"get_segmentation_array: path {0} doesn't exist\".format(image_input))\n img = cv2.imread(image_input, 1)\n else:\n raise DataLoaderError(\"get_segmentation_array: Can't process input type {0}\".format(str(type(image_input))))\n\n img = cv2.resize(img, (width, height), interpolation=cv2.INTER_NEAREST)\n img = img[:, :, 0]\n\n for c in range(nClasses):\n seg_labels[:, :, c] = (img == c).astype(int)\n\n if not no_reshape:\n seg_labels = np.reshape(seg_labels, (width*height, nClasses))\n\n return seg_labels\n\nclass DataLoaderError(Exception):\n pass\n\ndef testGenerator(test_path,num_image = 30,target_size = (256,256),flag_multi_class = False,as_gray = True):\n for i in range(num_image):\n img = cv2.imread(os.path.join(test_path,\"%d.png\"%i), cv2.IMREAD_COLOR)\n #img = io.imread(os.path.join(test_path,\"%d.png\"%i),as_gray = as_gray)\n img = img / 255\n img = cv2.resize(img, target_size)\n #img = trans.resize(img,target_size)\n #img = np.reshape(img,img.shape+(1,)) if (not flag_multi_class) else img\n img = np.reshape(img,(1,)+img.shape)\n yield img\n\ndef testGeneratorForImg(img,target_size = (256,256),flag_multi_class = False,as_gray = True):\n if as_gray:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n else:\n img = cv2.cvtColor(img, cv2.IMREAD_COLOR)\n\n img = img / 255\n img = cv2.resize(img, target_size)\n #img = trans.resize(img,target_size)\n #img = np.reshape(img,img.shape+(1,)) if (not flag_multi_class) else img\n img = np.reshape(img,(1,)+img.shape)\n yield img\n\ndef saveResultForOtherModel(save_path,npyfile, output_width = 264, output_height = 264, n_classes = 100, flag_multi_class = False,num_class = 2):\n for i,item in enumerate(npyfile):\n seg_img = getResultForSingleImg(item, output_width = output_width, output_height = output_height, n_classes = n_classes)\n cv2.imwrite(os.path.join(save_path,\"%d_predict.png\"%i),seg_img)\n\ndef getResultForSingleImg(img, output_width = 264, output_height = 264, n_classes = 100):\n img = img.reshape((output_height, output_width, n_classes)).argmax(axis=2)\n\n seg_img = np.zeros((output_height, output_width, 3))\n colors = class_colors\n\n for c in range(n_classes):\n seg_img[:, :, 0] += ((img[:, :] == c) * (colors[c][0])).astype('uint8')\n seg_img[:, :, 1] += ((img[:, :] == c) * (colors[c][1])).astype('uint8')\n seg_img[:, :, 2] += ((img[:, :] == c) * (colors[c][2])).astype('uint8')\n\n # seg_img = cv2.resize(seg_img, (256, 256))\n seg_img = cv2.resize(seg_img, (output_width,output_height))\n return seg_img","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":6337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"464458167","text":"from dimagi.utils.dates import force_to_datetime\n\nfrom corehq.apps.cleanup.management.commands.populate_sql_model_from_couch_model import PopulateSQLCommand\n\n\nclass Command(PopulateSQLCommand):\n @classmethod\n def couch_db_slug(cls):\n return \"users\"\n\n @classmethod\n def couch_doc_type(self):\n return 'Invitation'\n\n @classmethod\n def sql_class(self):\n from corehq.apps.users.models import Invitation\n return Invitation\n\n @classmethod\n def commit_adding_migration(cls):\n return \"3c6e3ea5b42834ac78b266b4e340af3d9a10481e\"\n\n def update_or_create_sql_object(self, doc):\n model, created = self.sql_class().objects.update_or_create(\n couch_id=doc['_id'],\n defaults={\n \"email\": doc.get(\"email\"),\n \"invited_by\": doc.get(\"invited_by\"),\n \"invited_on\": force_to_datetime(doc.get(\"invited_on\")),\n \"is_accepted\": doc.get(\"is_accepted\", False),\n \"domain\": doc.get(\"domain\"),\n \"role\": doc.get(\"role\"),\n \"program\": doc.get(\"program\"),\n \"supply_point\": doc.get(\"supply_point\"),\n })\n return (model, created)\n","sub_path":"corehq/apps/users/management/commands/populate_usersinvitation.py","file_name":"populate_usersinvitation.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"108616734","text":"import errno\nimport glob\n\nfiles = glob.glob('*.fasta.eg')\nx = 1\n\nfor file in files:\n\twith open(file, 'r') as myfile:\n\t\tmylines = []\n\t\tfor a_line in myfile:\n\t\t\tif a_line == '>2 chr1:1-X\\n':\n\t\t\t\tbreak\n\t\t\tmylines.append(a_line.rstrip('\\n')) \n\n\twith open(\"chrMchr1New-\"+str(x)+\".fasta.eg\", 'w') as writer:\n \t\twriter.writelines(\"%s\\n\" % i for i in mylines)\n \t\tx += 1\n","sub_path":"data/gen/nguyen_nc_2018/20190613-fastas/mitolin_split.py","file_name":"mitolin_split.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"326791523","text":"\"\"\"This file is a BostonMarket spider created on top of the ATSSpider\nscrapy crawl bostonmarket -a url=\"http://bostonmarketjobs.com\" -a extract=1\n\nsample url:\n http://bostonmarketjobs.com\n\"\"\"\nimport json\n\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\nfrom scrapy.exceptions import CloseSpider\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.lib.utils import add_get_params\nfrom brightcorp.processors import Prefix\nfrom math import ceil\n\n\nclass BostonMarket(ATSSpider):\n\n \"\"\" Boston Market Crawler. \"\"\"\n\n name = \"bostonmarket\"\n\n url_fragmentanchor = \"/php/searchJob.php?outputjson=true\"\n\n def parse(self, response):\n \"\"\" Parse ResponseJobSearch (json) and extract total pages. \"\"\"\n self.set_meta_language(response)\n\n total_pages = 0\n\n json_output = json.loads(response.body)\n\n if 'ResponseJobSearch' in json_output:\n content = json_output['ResponseJobSearch']\n try:\n # Divide total job count by total jobs per page to get the real total page number\n total_pages = int(ceil(int(content['TotalCount']) / 5.0))\n except IndexError:\n total_pages = 0\n\n if total_pages > 0:\n for page in xrange(1, total_pages):\n url = add_get_params(response.url, {'PageNumber': page})\n yield Request(url, callback=self.parse_page)\n\n if total_pages == 0 :\n raise CloseSpider(\"No jobs available.\")\n\n def parse_page(self, response):\n \"\"\" Parse job items (json) per page. \"\"\"\n json_output = json.loads(response.body)\n\n if 'ResponseJobSearch' in json_output:\n content = json_output['ResponseJobSearch']\n try:\n jobs = content['Results']['JobSearchResult']\n except IndexError:\n jobs = None\n\n if jobs:\n for job in jobs:\n request = Request(\n job['JobServiceURL'].strip(),\n callback=self.parse_job_callback(), dont_filter=True)\n request.meta['apply_url'] = job['JobDetailsURL']\n\n yield request\n\n def parse_job(self, response):\n \"\"\" Parse job item. \"\"\"\n sel = Selector(response)\n loader = BrightcorpItemLoader(selector=sel)\n\n # Get attribute data\n job_id = self.get_item_data(sel, 'DID')\n description = self.get_item_data(sel, 'JobDescription')\n requirements = self.get_item_data(sel, 'JobRequirements')\n location = self.get_item_data(sel, 'LocationFormatted')\n company = self.get_item_data(sel, 'Company')\n title = self.get_item_data(sel, 'JobTitle')\n industry = self.get_item_data(sel, 'Categories')\n jobtype = self.get_item_data(sel, 'EmploymentType')\n\n # Load the data into an item\n loader.add_value(\"referencenumber\", job_id, Prefix(\"%s-\" % self.name))\n loader.add_value(\"url\", response.meta['apply_url'])\n loader.add_value(\"description\", description)\n loader.add_value(\"requirements\", requirements)\n loader.add_value(\"location\", location)\n loader.add_value(\"company\", company)\n loader.add_value(\"title\", title)\n loader.add_value(\"industry\", industry)\n loader.add_value(\"jobtype\", jobtype)\n\n return loader.load_item()\n\n def get_item_data(self, sel, attribute_name):\n \"\"\"\n Get data for a particular attribute from the xml feed\n Return an empty string if nothing is found\n \"\"\"\n xpath = '//ResponseJob/Job/'+attribute_name+'/text()'\n item = sel.xpath(xpath).extract()\n if item:\n return item[0]\n return ''\n\n def set_custom_item(self, response):\n \"\"\"Raw Mode: set field value.\"\"\"\n\n job_dict = response.meta['job_dict']\n\n self.loader.add_value(\"referencenumber\", job_dict['referencenumber'],\n Prefix(\"%s-\" % self.name))","sub_path":"brightcorp/brightcorp/spiders/bostonmarket.py","file_name":"bostonmarket.py","file_ext":"py","file_size_in_byte":4076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"627835634","text":"import csv\nimport requests\nimport PIL\nimport tkinter as tk\nfrom PIL import Image\nfrom io import BytesIO\nfrom tkinter import filedialog\n\nclass ImageHandler:\n\tcache = []\n\n\tdef selectNames(names, nums):\n\t\tcheat = [[0,2,4,12,14],[1,3,5,13,15],[6,9,10,17,18],[7,8,11,16,19]]\n\t\toutList = []\n\t\ttotal = len(names)\n\t\tfor x in cheat[nums]:\n\t\t\tif total > x:\n\t\t\t\toutList.append(names[x])\n\t\treturn outList\n\n\tdef loadImage(name):\n\t\ttry: \n\t\t\timg = Image.open(\"Images/\"+ name +\".png\")\n\t\t\tImageHandler.cache.append(name)\n\t\texcept:\n\t\t\turl = \"http://ddragon.leagueoflegends.com/cdn/9.13.1/img/champion/\"+ name +\".png\"\n\t\t\tresponse = requests.get(url)\n\t\t\timg = Image.open(BytesIO(response.content))\n\t\t\timg.save(\"Images/\"+ name +\".png\",\"PNG\")\n\t\t\tImageHandler.cache.append(name)\n\n\tdef formatNames(names):\n\t\toutNames = []\n\t\tfor name in names:\n\t\t\tif name is \"\":\n\t\t\t\toutNames.append(\"Empty\")\n\t\t\telse:\n\t\t\t\ttempName = name.replace(\"\\'\", \" \").title()\n\t\t\t\ttempName = tempName.replace(\" \",\"\")\n\t\t\t\toutNames.append(tempName)\n\t\treturn outNames\n\t\t\n\tdef makeNewDraft(names, id, num):\n\t\torder = [[[0,2,4,12,14],[1,3,5,13,15]],[[6,9,10,17,18],[7,8,11,16,19]]]\n\t\tnames = ImageHandler.formatNames(names)\n\t\tfor name in names:\n\t\t\tImageHandler.loadImage(name)\n\t\tpicks = len(names)\n\t\tfor x in range(picks, 21):\n\t\t\tnames.append(\"Empty\")\n\t\tfileNames = []\n\t\tfor name in names:\n\t\t\tfileNames.append(\"Images/\"+ name + \".png\")\n\t\timages = map(Image.open, fileNames)\n\t\tnew_im = Image.new('RGB', (660, 150))\n\t\tyOffset = 0\n\t\tfor group in order:\n\t\t\txOffset = 0\t\n\t\t\tfor row in group:\n\t\t\t\tfor img in row:\n\t\t\t\t\ttempImg = Image.open(fileNames[img])\n\t\t\t\t\tnew_im.paste(tempImg.resize((60,60), PIL.Image.ANTIALIAS), (xOffset, yOffset))\n\t\t\t\t\txOffset += 60\n\t\t\t\txOffset += 60\n\t\t\tyOffset += 90\n\t\tlocation = \"Images/\"+str(id)+\".png\"\n\t\tnew_im.save(location)\n\t\treturn location\n\n\nclass Pick:\n\tdef __init__(self, name):\n\t\tself.name = name\n\t\tself.children = []\n\n\tdef isChild(self, name):\n\t\tfor child in self.children:\n\t\t\tif child.name == name:\n\t\t\t\treturn True\n\t\treturn False\n\n\tdef addChild(self, name):\n\t\tself.children.append(Pick(name))\n\n\tdef getChild(self, name):\n\t\tfor child in self.children:\n\t\t\tif child.name == name:\n\t\t\t\treturn child\n\t\treturn \"NULL\"\n\n\tdef getChildren(self):\n\t\treturn self.children\n\n\tdef familySize(self):\n\t\treturn len(self.children)\n\n\tdef emptyNest(self):\n\t\treturn not self.children\n\n\tdef recurPrint(self):\n\t\tprint(self.name)\n\t\tfor child in self.children:\n\t\t\tchild.recurPrint()\n\n\nclass Tree:\n\tdef __init__(self):\n\t\tself.nodes = []\n\t\tself.root = Pick(\"HEAD\")\n\n\tdef __init__(self, name):\n\t\tself.nodes = []\n\t\tself.root = Pick(name)\n\n\tdef addDraft(self, insturctions):\n\t\tcurNode = self.root\n\t\tfor champ in insturctions:\n\t\t\tif champ is \"\":\n\t\t\t\treturn\n\t\t\tif curNode.isChild(champ):\n\t\t\t\tcurNode = curNode.getChild(champ)\n\t\t\telse:\n\t\t\t\tcurNode.addChild(champ)\n\t\t\t\tcurNode = curNode.getChild(champ)\n\n\tdef printTree(self):\n\t\tself.root.recurPrint()\n\n\tdef getRoot(self):\n\t\treturn self.root\n\n\nclass MindMap:\n\n\tblueBans = [1,3,5,13,15]\n\tredBans = [2,4,6,14,16]\n\tbluePicks = [7,10,11,18,19]\n\tredPicks = [8,9,12,17,20]\n\n\tdef __init__(self):\n\t\tself.name = \"NULL\"\n\t\tself.outFile = \"test.mm\"\n\t\tself.id = 1\n\n\tdef __init__(self, name):\n\t\tself.name = name\n\t\tself.outFile = name + \".mm\"\n\t\tself.id = 1\n\n\tdef recurWrite(self, node, file, depth, path):\n\t\tnewPath = path\n\t\tnewPath.append(node.name)\n\t\tcolor = depth % 2\n\t\tif (depth >= 9 and depth <= 10) or (depth >= 17 and depth <= 18):\n\t\t\tcolor = (color + 1) % 2 \n\t\tif color:\n\t\t\tbackColor = \"#0033ff\"\n\t\telse:\n\t\t\tbackColor = \"#ff0033\"\n\t\tif depth is 0:\n\t\t\tbackColor = \"#000000\"\n\n\t\tif depth is 0:\n\t\t\tfile.write(\"\\n\")\n\t\t\tself.id = self.id + 1\n\t\t\tfor child in node.getChildren():\n\t\t\t\tself.recurWrite(child, file, depth + 1, [])\n\t\t\tfile.write(\"\\n\")\n\t\telse:\n\t\t\tlink = \"Images/Empty.png\"\n\t\t\tif depth in MindMap.blueBans or depth in MindMap.redBans:\n\t\t\t\tlink = ImageHandler.makeNewDraft(path, self.id, self.getType(depth))\n\t\t\tif depth in MindMap.bluePicks or depth in MindMap.redPicks:\n\t\t\t\tlink = ImageHandler.makeNewDraft(path, self.id, self.getType(depth))\n\n\t\t\tif not node.emptyNest():\n\t\t\t\tfile.write(\"\\n\")\n\t\t\t\tfile.write(\"\\n\")\n\t\t\t\tself.id = self.id + 1\n\t\t\t\ti = 0\n\t\t\t\tfor child in node.getChildren():\n\t\t\t\t\ti += 1\n\t\t\t\t\tself.recurWrite(child, file, depth + 1, newPath[:])\n\t\t\t\tfile.write(\"\\n\")\n\t\t\telse:\n\t\t\t\tfile.write(\"\\n\")\n\t\t\t\tfile.write(\"\\n\")\n\t\t\t\tself.id = self.id + 1\n\t\t\t\treturn\n\n\n\tdef generateMindMap(self, tree):\n\t\tfile = open(self.outFile, \"w\")\n\t\tfile.write(\"\\n\")\n\t\tself.recurWrite(tree.getRoot(), file, 0, [])\n\t\tfile.write(\"\\n\")\n\t\tfile.close()\n\n\tdef getType(self, depth):\n\t\t# 0 - Blue Ban\n\t\t# 1 - Red Ban\n\t\t# 2 - Blue Draft\n\t\t# 3 - Red Draft\n\t\tif depth is 9 or depth is 17:\n\t\t\treturn 3\n\t\tif depth is 10 or depth is 18:\n\t\t\treturn 2\n\t\tif depth % 2 is 1:\n\t\t\tif depth <= 6 or (depth >= 13 and depth <= 16):\n\t\t\t\treturn 0\n\t\t\treturn 2\n\t\tif depth <= 6 or (depth >= 13 and depth <= 16):\n\t\t\treturn 1\n\t\treturn 3\n\n\nclass SimpleCSV:\n\tdef __init__(self, name):\n\t\tself.topLim = 2\n\t\tself.botLim = 14 \n\t\tself.leftPoint = 3\n\t\tself.rightPoint = 24\n\t\tself.fileName = name\n\n\tdef getDrafts(self):\n\t\tdrafts = []\n\t\twith open(self.fileName, newline='') as csvFile:\n\t\t\tspamreader = csv.reader(csvFile, delimiter=',', quotechar='|')\n\t\t\trowCount = 0\n\t\t\tfor row in spamreader:\n\t\t\t\ttempDraft = []\n\t\t\t\tif rowCount >= self.topLim and rowCount <= self.botLim:\n\t\t\t\t\tcolCount = 0\n\t\t\t\t\tfor entry in row:\n\t\t\t\t\t\tif colCount >= self.leftPoint and colCount <= self.rightPoint:\n\t\t\t\t\t\t\ttempDraft.append(entry)\n\t\t\t\t\t\tcolCount = colCount+1\n\t\t\t\t\tdrafts.append(tempDraft)\n\t\t\t\trowCount = rowCount+1\n\t\tfor draft in drafts:\n\t\t\tif draft[0] is \"\":\n\t\t\t\tdrafts.remove(draft)\n\n\t\treturn drafts\n\n\nclass Application(tk.Frame):\n\tdef __init__(self, master=None):\n\t\tsuper().__init__(master)\n\t\tself.master = master\n\t\tself.pack()\n\t\tself.create_widgets()\n\n\tdef create_widgets(self):\n\t\tself.hi_there = tk.Button(self)\n\t\tself.hi_there[\"text\"] = \"Load File\\n(click me)\"\n\t\tself.hi_there[\"command\"] = self.say_hi\n\t\tself.hi_there.pack(side=\"top\")\n\t\tself.create_doc = tk.Button(self)\n\t\tself.create_doc[\"text\"] = \"Load File First\"\n\t\tself.create_doc[\"command\"] = self.errorPrint\n\t\tself.create_doc.pack(side=\"top\")\n\t\tself.quit = tk.Button(self, text=\"QUIT\", fg=\"red\",\n\t\t\t\t\t\t\t\tcommand=self.master.destroy)\n\t\tself.quit.pack(side=\"bottom\")\n\n\tdef say_hi(self):\n\t\tself.filename = filedialog.askopenfilename(initialdir = \"/\",\n\t\t\t\t\t\t\t\t\t\t\t\t\ttitle = \"Select draft file\",\n\t\t\t\t\t\t\t\t\t\t\t\t\tfiletypes = ((\"CSV Draft\",\"*.csv\"),(\"all files\",\"*.*\"))\n\t\t\t\t\t\t\t\t\t\t\t\t\t)\n\t\tself.hi_there[\"text\"] = \"File Found\"\n\t\tself.create_doc[\"text\"] = \"File Loaded\\n(click me)\"\n\t\tself.create_doc[\"command\"] = self.parse\n\t\tprint(self.filename)\n\n\tdef errorPrint(self):\n\t\tprint(\"No file loaded\")\n\n\tdef parse(self):\n\t\tmyCSV = SimpleCSV(self.filename)\n\t\tdraftList = myCSV.getDrafts()\n\n\t\tmainTree = Tree(\"HEAD\")\n\t\tfor draft in draftList:\n\t\t\tmainTree.addDraft(draft)\n\n\t\tmainMap = MindMap(\"test1\")\n\t\tmainMap.generateMindMap(mainTree)\n\t\tself.create_doc[\"text\"] = \"File Created!\"\n\n\nroot = tk.Tk()\napp = Application(master=root)\napp.mainloop()","sub_path":"Prototype.py","file_name":"Prototype.py","file_ext":"py","file_size_in_byte":7603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"411338762","text":"from pathlib import Path\r\nfrom functools import partial\r\n\r\nimport joblib\r\nimport numpy as np\r\nfrom joblib import Parallel, delayed\r\nfrom sklearn.pipeline import Pipeline\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.preprocessing import Normalizer\r\nfrom sklearn.feature_selection import SelectKBest, VarianceThreshold\r\n\r\ntry:\r\n from . import utils as ul\r\n from . import metrics as mt\r\nexcept ImportError:\r\n import utils as ul\r\n import metrics as mt\r\n \r\n\r\nnp.seterr(all='ignore')\r\n\r\n\r\ndef grid_search(x, y, param_grid, n_jobs):\r\n grid = GridSearchCV(SVC(random_state=1), cv=5, n_jobs=n_jobs, param_grid=param_grid)\r\n clf = grid.fit(x, y)\r\n C, gamma = clf.best_params_['C'], clf.best_params_['gamma']\r\n kernel = clf.best_params_['kernel']\r\n return C, gamma, kernel\r\n\r\n\r\ndef train(x, y, clf, out):\r\n model = clf.fit(x, y)\r\n joblib.dump(model, out)\r\n\r\n\r\n# its discarded\r\ndef batch_train(in_dir, out_dir, C, gamma, n_job):\r\n\r\n def process_func(file, C, gamma):\r\n _, (x, y) = ul.load_data(file, normal=True)\r\n model = train(x, y, C, gamma)\r\n return model\r\n train_func = partial(process_func, C=C, gamma=gamma)\r\n dirs = Path(in_dir)\r\n out_dir = Path(out_dir)\r\n with Parallel(n_jobs=n_job) as train_pa, Parallel(n_jobs=int(n_job/2),\r\n backend='threading') as save_pa:\r\n for type_ in dirs.iterdir():\r\n models = train_pa(delayed(train_func)(file) for file in type_.iterdir())\r\n type_name = type_.name\r\n type_dir = out_dir / type_name\r\n type_dir.mkdir(exist_ok=True)\r\n save_pa(delayed(save_model)(m, type_dir/ f.stem) for m,f in zip(models, type_.iterdir()))\r\n\r\n\r\ndef predict(x, model):\r\n y_pred = model.predict(x)\r\n try:\r\n y_prob = model.predict_proba(x) # 模型训练 probability=True才能用\r\n except:\r\n y_prob = None\r\n return y_pred, y_prob\r\n\r\n\r\ndef cv_predict(clf, x, y, cver, method=\"predict\"):\r\n y_pred = cross_val_predict(clf, x, y, cv=cver, method=method)\r\n return y_pred\r\n\r\n\r\ndef evaluate(x, y, cv, clf):\r\n k = int(cv)\r\n if k in (-1, 1):\r\n metric_dic = mt.loo_metrics(clf, x, y)\r\n elif int(k) > 1:\r\n metric_dic = mt.cv_metrics(clf, x, y, cv)\r\n else: ## hold out\r\n pass\r\n return metric_dic \r\n\r\n\r\ndef batch_evaluate(in_dir, out_dir, cv, clf, n_job):\r\n \r\n def eval_(file, cv, clf):\r\n _, (x, y) = ul.load_data(file)\r\n metric_dic = evaluate(x, y, cv, clf)\r\n return metric_dic\r\n \r\n eval_func = partial(eval_, cv=cv, clf=clf)\r\n all_metric_dic = {}\r\n with Parallel(n_jobs=n_job) as eval_pa:\r\n for type_ in in_dir.iterdir():\r\n metrics_iter = eval_pa(delayed(eval_func)(file) for file in type_.iterdir())\r\n all_metric_dic[type_] = [i for i in metrics_iter]\r\n return all_metric_dic\r\n\r\n\r\ndef feature_select(x, y, step, cv, clf, n_jobs):\r\n scaler = Normalizer()\r\n scaler_ft = partial(scaler.fit_transform, y=y)\r\n selector = VarianceThreshold()\r\n new_x = selector.fit_transform(x)\r\n score_idx = selector.get_support(indices=True)\r\n sb = SelectKBest(k='all')\r\n new_data = sb.fit_transform(new_x, y)\r\n f_value = sb.scores_\r\n idx_score = [(i, v) for i, v in zip(score_idx, f_value)]\r\n rank_score = sorted(idx_score, key=lambda x: x[1], reverse=True)\r\n feature_idx = [i[0] for i in rank_score]\r\n evla_func = partial(evaluate, y=y, cv=cv, clf=clf)\r\n feature_num = len(feature_idx)\r\n step_num = feature_num // step\r\n if step_num * step == feature_num:\r\n step_num = step_num + 1\r\n else:\r\n step_num = step_num + 2\r\n result_ls = Parallel(n_jobs=n_jobs)(\r\n delayed(evla_func)(\r\n scaler_ft(x[:, feature_idx[:i*step]])) for i in range(1, step_num))\r\n return result_ls, feature_idx\r\n","sub_path":"raatk/compute.py","file_name":"compute.py","file_ext":"py","file_size_in_byte":3974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"56711812","text":"#!/usr/bin/env python\nimport argparse, os\nimport ray\nfrom utils import *\n\n# Define the parser\nparser = argparse.ArgumentParser(description='Take user-defined run-time \\\n arguments')\n\n# Declare an argument, and using the default value if the argument isn't given\nparser.add_argument('pdbdir', help='Directory of candidate scaffold PDBs')\nparser.add_argument('outputdir', help='Output directory for rosetta files, \\\n scorefile, etc.')\nparser.add_argument('targetdir', help='Directory containing target catalytic \\\n pocket conformations')\nparser.add_argument('-a', '--alpha', dest='alpha', default=0.80,\n metavar='A', help='Percentage cutoff for minimum number of \\\n alpha spheres in fpocket, default behavior \\\n is 0.80 of target structure')\nparser.add_argument('-c', '--cutoff', dest='cutoff', default=1.50,\n metavar='C', help='Percentage cutoff for maximum number of \\\n alpha spheres in fpocket,default behavior \\\n is 1.50 of target structure')\nparser.add_argument('-f', '--filter', dest='filt', default=0.7,\n metavar='F', help='Minimum shared %% identity')\nparser.add_argument('-ht','--hits', dest='hilt', default=1,\n metavar='H', help='Minimum number of %% identity hits')\nparser.add_argument('-s','--screen', dest='screen', default=0.5,\n metavar='S', help='Short screen filter to determine whether \\\n translational sampling occurs')\nparser.add_argument('-cp','--checkpoint', dest='checkpoint', default=False,\n metavar='P', help='The name of checkpoint file to read in order \\\n to restart a run')\nparser.add_argument('-mp', '--multiprocessing', dest='mp', default=1,\n metavar='M', help='Proportion of cpu threads to use in \\\n multiprocessing. If set to 0, multi\\\n processing is turned off. Defaults to all \\\n available threads for use on hpc resources.')\n\n# Now, parse the command line arguments and \n# store the values in the arg variable\nargs = parser.parse_args()\n\n# initialize all options to corresponding variables\npdbdir = args.pdbdir\noutputdir = args.outputdir\ntargetdir = args.targetdir\nalpha = float(args.alpha)\ncutoff = float(args.cutoff)\nmin_intersect = float(args.filt)\nmin_hits = int(args.hilt)\nscreen = float(args.screen)\ncheckpoint = args.checkpoint\nmultiprocessing = int(args.mp)\n\n# make sure directories have correct formatting\npdbdir = check_format(pdbdir)\noutputdir = check_format(outputdir)\ntargetdir = check_format(targetdir)\n\n# run preprocessing\npreprocessed = preprocess(checkpoint, pdbdir, targetdir, alpha, cutoff, outputdir)\ntracker, t, s, short_sample, vol = preprocessed\n\n# initialize scorefile\ngen_scorefile(outputdir)\n\n# run pocketSearch\nif multiprocessing:\n # wrap with ray to enable multiprocessing\n pocket_search = ray.remote(pocket_search)\n \n # initialize ray\n ray.init()\n \n # obtain parameters for ray\n shared = [outputdir, pdbdir, targetdir, t, s, short_sample, \n min_intersect, vol, screen, min_hits]\n params = [[i, structure] + shared for i, structure in enumerate(tracker)]\n\n # setup futures\n futures = [pocket_search.remote(*par) for par in params]\n \n # run ray\n _ = ray.get(futures)\n\nelse:\n # run in serial on one thread\n for i, structure in enumerate(tracker):\n pocket_search(i, structure, outputdir, pdbdir, targetdir, t, s, \n short_sample, min_intersect, vol, screen, min_hits) \n\npostprocessing(outputdir)\n\nif os.path.exists('checkpoint.chk'):\n os.remove('checkpoint.chk')\n","sub_path":"pocketSearch.py","file_name":"pocketSearch.py","file_ext":"py","file_size_in_byte":3952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"293512538","text":"# -*- coding:utf-8 -*-\n__author__ = 'rudolf'\n\n#数据库连接配置\n\ndb_connects={\"wallet\": \"mysql://root:1qaz@WSX@99.48.66.40:3306/wallet?charset=utf8\", #钱包数据\n \"credit\": \"mysql://creditlimit@mime:SIT_mime2016@99.48.58.196:3306/creditlimit?charset=utf8\",#额度系统\n \"coupon\": \"mysql://couponuser:couponuser@99.48.58.196:3306/coupon?charset=utf8\",#优费券系统\n \"user\": \"mssql+pymssql://xingwang.han:MiMe@2015@99.48.66.112:1433/memedaidb?charset=utf8\", #user 数据库\n }\n\n\ntable_colums={\"CRM.MEMBER\":(\"MEMBER_ID\",\"CREATE_TIME\",\"MEMBER_TYPE\",\"MEMBER_NAME\",\"MOBILE_NO\", \"source\"), #用户信息表,增加一个来源source\n \"apply_info\":(\"apply_type\",\"member_id\",\"cellphone\",\"identification_name\",\"identification_id\",\"apply_status\",\"created_datetime\",\"apply_no\",\"zx_result\"), # 钱包申请表\n \"money_box\":(\"member_id\",\"apply_no\",\"actived_datetime\",\"expired_date\"), #钱柜表\n \"money_box_order\":(\"order_no\",\"member_id\",\"order_type\",\"status\",\"amount\",\"repayment_periods\",\"repayment_type\",\n \"merchant_id\",\"store_id\",\"product_id\",\"product_name\",\"mobile\",\"allies_code\",\"merchant_industry_type\",\"merchant_cl_type\"),#订单表\n \"credit_cl_credit_limit\":(\"member_id\",\"category_id\",\"credit_limit\",\"expire_date\"), #额度系统\n \"coupon_CP_COUPON_CODE\":(\"COUPON_ID\",\"COUPON_CODE\",\"PICK_UP_TIME\",\"PICK_UP_CHANNEL\",\"COUPON_NAME\",\"STATUS\",\"MEMBER_NAME\",\n \"MEMBER_PHONE\",\"MEMBER_ID\",\"COUPON_TYPE\",\"EFFECTIVE_TIME\",\"EXPIRY_TIME\",\"DISCOUNT\",\"VALUE\",\"PERIODS\",\"RANGE_TYPE\"), #优费券表\n \"CRM.ID_CARD\":(\"INPUT_NAME\",\"INPUT_ID_NO\",\"MEMBER_ID\",\"NAME\",\"ID_NO\",\"OCR_ID_NO\",\"KEYIN_STATUS\"), # 身份证信息表\n \"APPL.A_APPL\":(\"MEMBER_ID\",\"APPL_AMT\",\"APPL_NO\",\"PRODUCT\",\"APPL_TERM\",\"APPL_REPAY_METHOD\",\"ALLIES_CODE\",\"PROGRAM_CODE\",\"APPL_TIME\",\n \"CLUSTER_NO\",\"ROLE\",\"EXISTING_FLAG\"),\n \"CRM.BANK_CARD\":(\"ID\",\"MEMBER_ID\",\"CARD_NO\", \"NAME\",\"CARD_NO_SNAP\",\"ISSUE_BANK_NAME\",\"ISSUE_BANK\",\"ISSUE_BANK_BRANCH\", \"CHANNEL\",\n \"IS_BIND_FASTPAYMENT\", \"CREATE_TIME\",\"BANK_PHONE\",\"DEFAULT_USE\",\"CARD_TYPE\",\"IS_VALID\",\"CARD_BIN\",\"CARD_LEVEL\",\"ID_NO\",\"PURPOSE\"),\n \"FSS.ACCOUNT_CASH\":(\"ACCOUNT_NO\",\"MEMBER_ID\",\"MERCHANT_NO\",\"BALANCE\"),\n \"CRM.MEMBER_WECHAT\":(\"MEMBER_ID\",\"SUBSCRIBE\",\"OPENID\",\"NICKNAME\",\"SEX\",\"LANGUAGE\",\"HEADIMGURL\",\"SUBSCRIBE_TIME\",\"UNIONID\",\"FIRST_SUBSCRIBE_TIME\",\"LAST_UPDATE_time\")\n }\n\n\n\nsql_phone = \"select count(1) from CRM.MEMBER where MOBILE_NO =\"\n\nsql_idcard = \"select count(1) from CRM.ID_CARD WHERE ID_NO=\"\n\nsql_sequence = \"select NEXT_VALUE from CRM.SEQUENCE where SEQ_NAME='MEMBER_ID'\"\n\nmerchent_sql = \"select ALLIES_NAME ,BANK_NAME ,ACCOUNT_NO,UNIONPAY_BANK_NUMBER from CRM.ALLIES where ALLIES_CODE = \"\n\n\nProductInfo_sql = 'select PROGRAM_NAME,ENUM_AMT,BEGIN_AMT,END_AMT,AVAILABLE_PERIOD,AVAILABLE_REPAY_METHOD, \\\n pa.APR,pf.RATE_VALUE \\\n from MKT.PROGRAM pr inner join CRM.ALLIES al on pr.MERCHANT=al.ALLIES_CODE LEFT join \\\n FSS.PRICING_APR pa on pr.PROGRAM_CODE=pa.PROGRAM_CODE LEFT join FSS.PRICING_FEE pf on pr.PROGRAM_CODE=pf.PROGRAM_CODE \\\n WHERE pr.EXPIRE_TIME>GETDATE() AND pr.PROGRAM_CODE= '\n\nREPAY_METHOD = {\"554\": \"INSTALLMENT\", \"331\": \"AVERAGE_CAPITAL_PLUS_INTEREST\"}\n\n########## 创建贷款 dubbo 配置 ############\n\nloans_interface_name = \"cn.memedai.loan.facade.business.DubboLoanCreateBusiness\"\nloans_method_name = \"createLoan\"\nparameter_types = [\"cn.memedai.loan.facade.request.LoanCreateForm\"]\nzookerpath = \"zookeeper://99.48.66.13:2181\"\nversion = '1.0.0'\n\n\n","sub_path":"Public/publiclibary/data_generator/common/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"326084385","text":"#!/usr/bin/env python3\n\"\"\"API module\"\"\"\nimport requests\nfrom datetime import datetime\n\n\nif __name__ == '__main__':\n base_url = \"https://api.spacexdata.com/v3\"\n\n response = requests.get(base_url + \"/rockets\")\n content = response.json()\n\n rockets = []\n\n for rocket in content:\n rockets.append(rocket['rocket_name'])\n\n launches = dict()\n for rocket in rockets:\n payload = {\"rocket_name\": rocket}\n response = requests.get(base_url + \"/launches\", params=payload)\n content = response.json()\n launches['rocket'] = len(content)\n\n print(\"{}: {}\".format(rocket, len(content)))\n","sub_path":"pipeline/0x01-apis/4-rocket_frequency.py","file_name":"4-rocket_frequency.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"581204695","text":"from egnyte.tests.config import IntegrationCase\n\nclass TestEvents(IntegrationCase):\n def setUp(self):\n super(TestEvents, self).setUp()\n self.root_folder.create()\n self.filepath = self.root_folder.path + '/search/test1.txt'\n\n def test_filter_poll(self):\n events = self.client.events\n events = events.filter(events.oldest_event_id)\n results = events.poll(count=1)\n self.assertNotEqual(0, len(results), \"Poll results should not be empty\")\n self.assertNotEqual(events.start_id, events.oldest_event_id, \"latest_event_id should have been bumped after non-empty poll\")\n","sub_path":"egnyte/tests/test_events.py","file_name":"test_events.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"305243762","text":"#存储函数\nimport time\nfrom constant import ip_headers,raw_ip_list\nfrom pprint import pprint\nimport requests\nfrom random import choice\nfrom lxml import etree\nimport csv\n\n#从列表中随机选择一个元素\ndef random_ip(L):\n return choice(L)\n\n#过滤掉不能爬取豆瓣的ip\ndef sift_ip(ip_list,url):\n iptr_list=[]\n for ip in ip_list:\n proxies = {\"http\": ip}\n r=requests.get(url, proxies=proxies,timeout=10)\n print(ip,url,r.status_code)\n with open('ip_status_data.csv','a+',newline='',encoding='utf8') as csvfile:\n writer = csv.writer(csvfile)\n L=[ip,url,r.status_code]\n writer.writerow(L)\n csvfile.close()\n time.sleep(3)\n # try:\n # r=requests.get(\"https://movie.douban.com/subject/26393561/?from=showing\", proxies=proxies)\n # # print(r.text)\n # print(ip,r.status_code)\n # if(r.status_code==200):\n # iptr_list.append(ip)\n # except Exception as e:\n # print(e,ip,'fail')\n\n#输入文件名和列表,执行写入csv操作\ndef write_csv(file,L):\n with open(file,'a+',newline='') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(L)\n csvfile.close()\n\n#从网站产生代理IP\ndef generate_ip():\n ip=random_ip(raw_ip_list)\n ip='http://'+ip\n proxies = {\"http\": ip}\n\n\n for i in range(1,100):\n iptr_list=[]\n ip_url='https://www.kuaidaili.com/free/inha/%d/' %i\n\n ip_req=requests.get(ip_url,timeout=10,proxies=proxies)\n selector = etree.HTML(ip_req.text)\n ip_list=selector.xpath('//td[@data-title=\"IP\"]/text()')\n port_list=selector.xpath('//td[@data-title=\"PORT\"]/text()')\n iptr_list.extend(list(map(lambda x, y: 'http://'+ x + ':' + y, ip_list, port_list)))\n print('第%d个页面,已爬取%d条' %(i,len(iptr_list)))\n time.sleep(3)\n\n for ip in iptr_list:\n with open('ip_data.csv','a+',newline='',encoding='utf8') as csvfile:\n writer = csv.writer(csvfile)\n L=[ip]\n writer.writerow(L)\n csvfile.close()\n\n#输入url和ip,返回爬取结果\ndef parse_web(url,ip):\n proxies = {\"http\": ip}\n data={}\n r=requests.get(url, proxies=proxies,timeout=10)\n if(r.status_code==404):\n print(url,'404页面不存在')\n L=[url,r.status_code]\n write_csv('douban_url_404.csv',L)\n elif(r.status_code==403):\n print(url,'403拒绝访问')\n L=[url,r.status_code]\n write_csv('douban_url_403.csv',L)\n elif(r.status_code==200):\n selector = etree.HTML(r.text)\n # print(r.text)\n data['name']=selector.xpath(\"//div[@id='content']/h1/span[@property='v:itemreviewed']/text()\")[0]\n data['year']=selector.xpath(\"//div[@id='content']/h1/span[@class='year']/text()\")[0]\n data['director']=selector.xpath(\"//div[@id='info']/span[1]/span[@class='attrs']/a/text()\")\n data['editor']=selector.xpath(\"//div[@id='info']/span[2]/span[@class='attrs']/a/text()\")\n data['actor']=selector.xpath(\"//div[@id='info']/span[@class='actor']/span[@class='attrs']/a/text()\")\n data['classification']=selector.xpath(\"//div[@id='info']/span[@property='v:genre']/text()\")\n data['making_nation']=[element.strip() for element in selector.xpath(\"//div[@id='info']/text()\") if len(element.strip())>1][0]\n\n try:\n data['score']=selector.xpath(\"//div[@class='rating_self clearfix']/strong/text()\")[0]\n data['evaluator_count']=selector.xpath(\"//a[@class='rating_people']/span/text()\")[0]\n data['percent_5']=selector.xpath(\"//div[@class='ratings-on-weight']/div[1]/span[@class='rating_per']/text()\")[0]\n data['percent_4']=selector.xpath(\"//div[@class='ratings-on-weight']/div[2]/span[@class='rating_per']/text()\")[0]\n data['percent_3']=selector.xpath(\"//div[@class='ratings-on-weight']/div[3]/span[@class='rating_per']/text()\")[0]\n data['percent_2']=selector.xpath(\"//div[@class='ratings-on-weight']/div[4]/span[@class='rating_per']/text()\")[0]\n data['percent_1']=selector.xpath(\"//div[@class='ratings-on-weight']/div[5]/span[@class='rating_per']/text()\")[0]\n\n data['watchmen']=selector.xpath(\"//div[@class='subject-others-interests-ft']/a[1]/text()\")[0]\n data['latent_watchmen']=selector.xpath(\"//div[@class='subject-others-interests-ft']/a[2]/text()\")[0]\n\n data['brief_comment_count']=selector.xpath(\"//div[@id='comments-section']/div[1]/h2/span/a/text()\")[0]\n data['voting_for_brief_comment']=selector.xpath(\"//div[@id='hot-comments']/div[@class='comment-item']/div[@class='comment']/h3/span[@class='comment-vote']/span[@class='votes']/text()\")\n\n data['film_comment_count']=selector.xpath(\"//section[@class='reviews mod movie-content']/header/h2/span/a[@href='reviews']/text()\")[0]\n\n data['tags']=selector.xpath(\"//div[@class='tags-body']/a/text()\")\n\n except IndexError:\n data['score']=''\n data['evaluator_count']=''\n data['percent_5']=''\n data['percent_4']=''\n data['percent_3']=''\n data['percent_2']=''\n data['percent_1']=''\n data['watchmen']=''\n data['latent_watchmen']=''\n data['brief_comment_count']=''\n data['voting_for_brief_comment']=''\n data['film_comment_count']=''\n data['tags']=''\n\n try:\n data['language']=[element.strip() for element in selector.xpath(\"//div[@id='info']/text()\") if len(element.strip())>1][1]\n except IndexError:\n data['language']=''\n data['on_date']=selector.xpath(\"//div[@id='info']/span[@property='v:initialReleaseDate']/text()\")\n try:\n data['film_length']=selector.xpath(\"//div[@id='info']/span[@property='v:runtime']/text()\")[0]\n except IndexError:\n data['film_length']=''\n\n # pprint(data)\n\n L=[data['name'],data['year'],data['score'],data['evaluator_count'],\n data['percent_5'],data['percent_4'],data['percent_3'],data['percent_2'],\n data['percent_1'],data['director'],data['editor'],data['actor'],\n data['classification'],data['making_nation'],data['language'],\n data['on_date'],data['film_length'],data['tags'],data['watchmen'],data['latent_watchmen'],\n data['brief_comment_count'],data['voting_for_brief_comment'],data['film_comment_count']]\n write_csv('douban_movie_data.csv',L)\n print(url,'已爬取')\n else:\n L=[url,r.status_code]\n write_csv('douban_url_unknow.csv',L)\n print(url,'unknow')\n\n","sub_path":"function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":6702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"437389652","text":"import sys, UI\nimport requests\nimport re, os\nfrom PyQt4 import QtCore, QtGui, QtDeclarative\nfrom PyQt4.QtCore import QTimer\nfrom PyQt4.QtNetwork import *\nimport threading\nimport time\n\napp = QtGui.QApplication(sys.argv)\nwindow = QtGui.QWidget()\nUI = UI.Ui_PARSER()\nUI.setupUi(window)\n\n\npath = ''\nurl = ''\nexcept_url = []\nimage_size = 500\n\nQPixmap_images = ['None','None','None', 'None', 'None']\n\n\n\n\ndef Qimages():\n \n\n path = os.path.abspath(UI.lineEdit_PATH.text())\n\n if not os.path.exists(path):\n os.mkdir(path)\n\n if os.listdir(path):\n global QPixmap_images\n files =[os.path.join(path,i) for i in os.listdir(path)]\n image = os.path.join(path, max(files, key=os.path.getctime))\n QPixmap_images.append(image)\n\n UI.label_image01.setPixmap(QtGui.QPixmap(r'{}'.format(QPixmap_images[-1])))\n UI.label_image02.setPixmap(QtGui.QPixmap(r'{}'.format(QPixmap_images[-2])))\n UI.label_image03.setPixmap(QtGui.QPixmap(r'{}'.format(QPixmap_images[-3])))\n UI.label_image04.setPixmap(QtGui.QPixmap(r'{}'.format(QPixmap_images[-4])))\n UI.label_image05.setPixmap(QtGui.QPixmap(r'{}'.format(QPixmap_images[-5])))\n\n\nQimages_Timer = QTimer()\nQimages_Timer.timeout.connect(Qimages)\nQimages_Timer.start(1000)\n\n# DATA PREPARATION IN LIST BY URL\ndef data(url):\n data = []\n deep_data = []\n\n if not re.findall(r'(htt.{1,3}://.+?)', url):\n url = \"http://\" + url\n\n ROOT_URL = re.findall(r'(^http[s]*://.+\\.(ua|com|ru|me|net|io|to)).*', url)[0]\n page = requests.get(url='{}'.format(url))\n print(\"url :\",url)\n print(\"ROOT_URL :\", ROOT_URL)\n #################################################################################\n def images(pattern: str, text: str):\n pattern = pattern\n URLs = re.findall(pattern, text)\n for i in URLs:\n if not re.findall(\"(^htt.*)\", i) and re.findall(\"(jpg|png|gif|ico)\", i):\n print(\"1 =\", i)\n data.append(str(ROOT_URL[0]) + \"/\" + i)\n\n elif re.findall(\"(attachment)\", i):\n print(\"2 =\", i)\n data.append(str(ROOT_URL[0]) + \"/\" + i)\n\n elif re.findall(\"(^htt.*jpg|png|gif|ico.*)\", i):\n print(\"3 =\", i)\n data.append(i)\n\n else:\n deep_data.append(i)\n print(\"deep_data: \",i)\n\n #################################################################################\n\n\n RSS_URLs = re.findall(r'.*\"(htt.*rss)\".*', page.text)\n if RSS_URLs:\n print(RSS_URLs)\n RSS = requests.get(url='{}'.format(RSS_URLs[0]))\n images(r'.*img src\\=\"(.*?)\\\"', RSS.text)\n images(r'\"(htt.{1,3}://.+?)\"',RSS.text)\n\n images(r'.*img src\\=\"(.*?)\\\"', page.text)\n images(r'\"(htt.{1,3}://.+?)\"', page.text)\n images(r'src=\"(/attachment[\\w?\\.\\=]*[0-9]*)\"', page.text)\n\n if UI.checkBox.checkState():\n for i in set(deep_data):\n try:\n deep_URLs = requests.get(url='{}'.format(i))\n images(r'.*img src\\=\"(.*?)\\\"', deep_URLs.text)\n images(r'\"(htt.{1,3}://.+?)\"', deep_URLs.text)\n images(r'src=\"(/attachment[\\w?\\.\\=]*[0-9]*)\"', deep_URLs.text)\n except:\n print(\"404 -\", i)\n\n return set(data)\n\n\n# SAVE IMAGE BY URL\ndef save(url: str):\n path = dir_path_exists() + '\\\\'\n try:\n name = naming(url)\n image = requests.get(url)\n if image.content.__sizeof__() > int(float(UI.lineEdit_SIZE_value.text())*1e+3):\n with open('{}{}'.format(path, name), \"wb\") as imgfile:\n imgfile.write(image.content)\n except:\n print(\"can`t save: \", url)\n\n# NAME IMAGE BY URL\ndef naming(url: str):\n url_separator = re.split('\\/|\\?', url)\n type_of_image = re.findall(\".*(jpg|png|gif|ico).*\", url)\n name_of_image = re.findall('([=a-zA-Z0-9_-]*).*', url_separator[-1])\n if type_of_image:\n return str(name_of_image[0]) + '.' + str(type_of_image[0])\n else:\n return str(name_of_image[0]) + '.' + str(\"png\")\n\n# DIR PATH EXISTS\ndef dir_path_exists():\n if not os.path.exists(os.path.abspath(UI.lineEdit_PATH.text())):\n if UI.lineEdit_PATH.text() == None:\n UI.lineEdit_PATH.setText('temp')\n os.mkdir(os.path.abspath(UI.lineEdit_PATH.text()))\n\n else:\n pass\n return str(os.path.abspath(UI.lineEdit_PATH.text()))\n# OPEN DIR\ndef open_dir():\n dir_path_exists()\n os.startfile(os.path.abspath(UI.lineEdit_PATH.text()))\n\n\nclass programThreadsFor():\n def __init__(self, save, data):\n self._running = None\n self.save = save\n self.data = data\n self.progressBar_value = 0\n self.timer = QTimer()\n\n def progressBar(self):\n UI.progressBar.setProperty(\"value\", self.progressBar_value)\n\n def BODY(self):\n self.url = UI.lineEdit_URL.text()\n self.summ = len(self.data(self.url))\n\n for i in enumerate(self.data(self.url)):\n if self._running == False: break\n self.save(i[1])\n self.progressBar_value = (i[0]/self.summ)*100\n self.progressBar_value = 100\n\n def START(self):\n self.progressBar_value = 0\n self.timer.timeout.connect(self.progressBar)\n self.timer.start(1000)\n\n self.url = UI.lineEdit_URL.text()\n if not re.findall(r'(htt.{1,3}://.+?)', self.url):\n self.url = \"http://\" + self.url\n\n UI.webView.setUrl(QtCore.QUrl(self.url))\n self._running = True\n self.Thread = threading.Thread(target=lambda: self.BODY())\n self.Thread.start()\n\n def STOP(self):\n self.timer.stop()\n self.Thread.daemon\n self._running = False\n self.Thread.join()\n print('=== END ===')\n\nclass programThreads():\n def __init__(self, func):\n self.func = func\n\n def START(self):\n self.Thread = threading.Thread(target=lambda: self.func())\n self.Thread.start()\n\n\ntread_parse = programThreadsFor(save, data)\ntread_open_dir = programThreads(lambda: open_dir())\n\ndef horizontalSlider_func():\n UI.lineEdit_SIZE_value.setText(\"{}\".format(UI.horizontalSlider.value()))\n\n\n\nUI.horizontalSlider.valueChanged.connect(horizontalSlider_func)\n\nQtCore.QObject.connect(UI.pushButton_START,QtCore.SIGNAL(\"clicked()\"), lambda: tread_parse.START())\nQtCore.QObject.connect(UI.pushButton_STOP, QtCore.SIGNAL(\"clicked()\"), lambda: tread_parse.STOP())\nQtCore.QObject.connect(UI.pushButton_OPEN, QtCore.SIGNAL(\"clicked()\"), lambda: tread_open_dir.START())\n\n\n\nif __name__ == '__main__':\n window.show()\n sys.exit(app.exec_())\n","sub_path":"Parser.py","file_name":"Parser.py","file_ext":"py","file_size_in_byte":6628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"53165092","text":"# -*- coding:utf-8 -*-\nfrom time import sleep\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n# Error Unicode equal comparison failed to convert both arguments to Unicode - interpreting\n# import sys\n#\n# reload(sys)\n# sys.setdefaultencoding('utf8')\n\n# Session 1\noptions = webdriver.ChromeOptions()\noptions.add_argument('--headless')\noptions.add_argument('--disable-gpu')\noptions.add_argument('--no-sandbox')\noptions.add_experimental_option(\"prefs\", {\n \"download.default_directory\": r\"./\",\n \"download.prompt_for_download\": False,\n \"download.directory_upgrade\": True,\n \"safebrowsing.enabled\": True\n})\ndriver = webdriver.Chrome(chrome_options=options)\ndriver.get('https://XXXXX.slack.com/services/export')\n\ntry:\n WebDriverWait(driver, 15).until(\n EC.presence_of_element_located((By.ID, 'email')))\nexcept:\n print(\"Error : Can not loading page.\")\nprint(\"Loading success #1\")\n\nemail = driver.find_element_by_id('email')\nemail.clear()\nemail.send_keys('your email address')\n\npassword = driver.find_element_by_id('password')\npassword.clear()\npassword.send_keys('your password')\n\ndriver.find_element_by_id('signin_btn').click()\n\n# Session 2\ntry:\n WebDriverWait(driver, 15).until(\n EC.presence_of_element_located((By.CLASS_NAME, 'c-input_select__wrapper')))\nexcept:\n print(\"Error : Can not loading page.\")\n\nprint(\"Loading success #2\")\n\nrange_input = driver.find_element_by_class_name(\n 'c-input_select__wrapper')\nrange_input.click()\nrange_list = range_input.find_elements_by_tag_name('span')\nfor item in range_list:\n if item.text == '過去 30日間':\n item.click()\n break\n\ndriver.find_element_by_xpath(\n '/html/body/div[1]/div[1]/div[2]/div[4]/div[2]/button').click()\n\nprint(\"Data exporting...\")\n\n# Session # 3\nwhile 1:\n try:\n sleep(5)\n table = driver.find_element_by_id('export_history')\n tbody = table.find_element_by_tag_name('tbody')\n tr = tbody.find_elements_by_tag_name('tr')\n tr[0].find_elements_by_tag_name('a')[1].click()\n print(\"Export success\")\n break\n except:\n driver.refresh()\n\nprint(\"File download finish\")\n# browser.close()\n","sub_path":"slack_export.py","file_name":"slack_export.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"145580549","text":"#!/usr/bin/env python3\nimport os, unittest, pickle, types, json\nimport quiz\n\nTEST_DIRECTORY = os.path.dirname(__file__)\n\n\n#############\n# Problem 1 #\n#############\n\nclass TestProblem1(unittest.TestCase):\n def _validate_trees(self, k, n=None, limit=None):\n def get_size(tree):\n if tree is None:\n return 0\n elif isinstance(tree, tuple) and len(tree) == 2:\n return 1 + get_size(tree[0]) + get_size(tree[1])\n return None\n\n def check_tree(tree, seen):\n size = get_size(tree)\n if size is None:\n self.fail('%s is not a binary tree' % (tree,))\n if size != k:\n self.fail('%s is not of size %d' % (tree, k))\n if tree in seen:\n self.fail('repeated tree: %s' % (tree,))\n seen.add(tree)\n\n # Check size and uniqueness.\n trees = set()\n if limit is None:\n for tree in quiz.binary_trees(k):\n check_tree(tree, trees)\n else:\n gen = iter(quiz.binary_trees(k))\n for i in range(limit):\n try:\n tree = next(gen)\n except StopIteration:\n self.fail('only %d trees returned, expected more' % (i,))\n check_tree(tree, trees)\n\n # Check count.\n if n is not None and len(trees) != n:\n self.fail('expected %d trees, got %d' % (n, len(trees)))\n\n def test_01(self):\n self._validate_trees(0, n=1)\n\n def test_02(self):\n self._validate_trees(1, n=1)\n\n def test_03(self):\n self._validate_trees(2, n=2)\n\n def test_04(self):\n self._validate_trees(3, n=5)\n\n def test_05(self):\n self._validate_trees(12, n=208012)\n\n def test_06(self):\n self._validate_trees(13, limit=10000)\n\n def test_07(self):\n self._validate_trees(100, limit=10000)\n\n def test_08(self):\n self._validate_trees(900, limit=10000)\n\n\n#############\n# Problem 2 #\n#############\n\nclass TestProblem2(unittest.TestCase):\n\n def validate_possible(self, n, init_bishop_locs, target):\n result = quiz.n_bishops(n, init_bishop_locs, target)\n self.assertTrue(result is not None)\n self.assertTrue(isinstance(result, set))\n self.assertTrue(len(result) >= target)\n self.assertTrue(init_bishop_locs.issubset(result))\n self.assertTrue(\n all(((0 <= r < n) and (0 <= c < n) and isinstance(r, int) and isinstance(c, int) for r, c in result)))\n self.assertTrue(self._checker(result))\n\n def validate_impossible(self, n, init_bishop_locs, target):\n self.assertEqual(quiz.n_bishops(n, init_bishop_locs, target), None)\n\n def _checker(self, lb):\n sd = lambda z1, z2: (z1[0] - min(z1), z1[1] - min(z1)) == (z2[0] - min(z2), z2[1] - min(z2)) or sum(z1) == sum(z2)\n for z1 in lb:\n for z2 in lb:\n if z1 != z2:\n if sd(z1, z2): return False\n return True\n\n def test_01(self):\n # n=3 initially empty board -> possible\n self.validate_possible(3, set(), 4)\n\n def test_02(self):\n # n=4 initially partially filled board -> possible\n self.validate_possible(4, {(3, 0), (3, 2), (0, 2)}, 6)\n\n def test_03(self):\n # n=4 initially partially filled board -> impossible\n self.validate_impossible(4, {(3, 0), (3, 2), (0, 2)}, 7)\n\n def test_04(self):\n # n=13 initially partially filled board -> impossible\n self.validate_impossible(13,\n {(12, 2), (11, 7), (4, 9), (8, 2), (11, 6), (5, 11), (3, 3), (8, 11), (5, 7), (11, 12),\n (0, 11), (4, 3), (12, 9), (12, 3), (5, 3)}, 20)\n\n def test_05(self):\n # n=18 initially partially filled board -> possible\n self.validate_possible(18, \n {(17, 4), (0, 1), (9, 13), (6, 8), (17, 9), (3, 1), \n (0, 13), (7, 13), (17, 12), (0, 9)}, 30)\n\n def test_06(self):\n # n=25 initially partially filled board -> possible\n self.validate_possible(25, \n {(23, 4), (23, 19), (5, 10), (22, 24), (22, 15), (0, 6),\n (22, 4), (10, 24), (14, 15), (4, 3), (6, 15), (20, 15), (15, 5), (13, 10)}, 39)\n\n\n#############\n# Problem 3 #\n#############\n\nclass TestProblem3(unittest.TestCase):\n def _test_insert_with_file(self, file):\n with open(file, 'r') as f:\n data = json.load(f)\n points = set(zip(data['x'], data['y']))\n width = data['width']\n height = data['height']\n quadtree = quiz.QuadTree(0, 0, width, height)\n self._test_insert(quadtree, points)\n\n def _test_insert(self, quadtree, points):\n for point in points:\n quadtree.insert(point)\n\n # validate quadtree structure\n is_valid, message = self._is_valid(quadtree)\n self.assertTrue(is_valid, message) #\n if isinstance(points, list):\n points = set(points)\n # check to make sure all points were added\n self.assertEqual(sorted(points), sorted(self._get_all(quadtree)), \"All points different from inserted points\")\n for point in points:\n self.assertTrue(self._find_point(quadtree, point), \"Cannot find point {} in quadtree\".format(point))\n\n # Retrieves all points in this quadtree\n def _get_all(self, quadtree):\n points = []\n if quadtree.children is not None:\n for child in quadtree.children:\n points.extend(self._get_all(child))\n else:\n points = list(quadtree.points)\n return points\n\n # Checks if this quadtree is valid (follows the invariants listed at the top)\n def _is_valid(self, quadtree):\n # Checks if the ranges are valid\n if quadtree.x_start > quadtree.x_end or quadtree.y_start > quadtree.y_end:\n return False, \"Node has invalid range\"\n\n # If the quadtree has children, it should have four\n if quadtree.children is not None and len(quadtree.children) == 4:\n # Check that the children do not overlap\n for c1 in quadtree.children:\n for c2 in quadtree.children:\n if c1 is not c2 and not (c1.x_start >= c2.x_end or c2.x_start >= c1.x_end \\\n or c1.y_start >= c2.y_end or c2.y_start >= c1.y_end):\n return False, \"Children ranges overlap\"\n\n # Check that its children are also valid recursively\n for child in quadtree.children:\n if not self._is_valid(child):\n return False, \"Child node is not valid\"\n\n # Check that its points is None\n if quadtree.points is not None:\n return False, \"Non-leaf node's points should be None\" #\n\n elif quadtree.children is None:\n # Should have four or less points\n if len(quadtree.points) > 4:\n return False, \"Leaf node should have 4 or less points\"\n\n # Points should be in range\n for (x, y) in quadtree.points:\n if x < quadtree.x_start or x >= quadtree.x_end or y < quadtree.y_start or y >= quadtree.y_end:\n return False, \"Point should be in range of the node's range\"\n else:\n return False, \"Non-leaf node does not have 4 children\"\n\n return True, \"Success!\"\n\n def _find_point_in_quadtree(self, q, point):\n \"\"\"\n Returns True if point exists in the quadtree\n :param point: point (x, y) to find\n \"\"\"\n x, y = point\n if q.children is not None:\n for c in q.children:\n # If a child's range includes the point, recurse into this child\n if x >= c.x_start and x < c.x_end and y >= c.y_start and y < c.y_end:\n return self._find_point_in_quadtree(c, point)\n else:\n # If the quadtree doesn't have children, we can check for the point\n if point in q.points:\n return True\n else:\n return False\n\n def _find_point(self, quadtree, point):\n x, y = point\n if quadtree.children is not None:\n for c in quadtree.children:\n # If a child's range includes the point, recurse into this child\n if x >= c.x_start and x < c.x_end and y >= c.y_start and y < c.y_end:\n return self._find_point_in_quadtree(c, point)\n else:\n # If the quadtree doesn't have children, we can check for the point\n if point in quadtree.points:\n return True\n else:\n return False\n\n def test_01(self):\n width = 5\n height = 5\n quadtree = quiz.QuadTree(0, 0, width, height)\n points = {(0, 0), (2, 4)}\n self._test_insert(quadtree, points)\n\n def test_02(self):\n width = 10\n height = 20\n quadtree = quiz.QuadTree(0, 0, width, height)\n points = {(7, 6), (6, 8), (8, 9), (9, 6), (5, 2)}\n self._test_insert(quadtree, points)\n\n def test_03(self):\n width = 5\n height = 10\n quadtree = quiz.QuadTree(0, 0, width, height)\n points = [(0, 1), (0, 1), (2, 3), (1, 8), (2, 3)]\n self._test_insert(quadtree, points)\n\n def test_04(self):\n self._test_insert_with_file('resources/insert_quadtree_many_points_small_rectangle.json')\n\n def test_05(self):\n self._test_insert_with_file('resources/insert_quadtree_small.json')\n\n def test_06(self):\n self._test_insert_with_file('resources/insert_quadtree_medium.json')\n\n def test_07(self):\n self._test_insert_with_file('resources/insert_quadtree_large.json')\n\n\nif __name__ == '__main__':\n res = unittest.main(verbosity=3, exit=False)\n","sub_path":"quiz2/test_b.py","file_name":"test_b.py","file_ext":"py","file_size_in_byte":9816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"203982900","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport freezegun\nimport pretend\nimport pytest\nimport redis\n\nfrom zope.interface.verify import verifyClass\n\nfrom warehouse.packaging.interfaces import IDownloadStatService\nfrom warehouse.packaging.services import RedisDownloadStatService\n\n\n@freezegun.freeze_time(\"2012-01-14\")\nclass TestRedisDownloadStatService:\n\n def test_verify_service(self):\n assert verifyClass(IDownloadStatService, RedisDownloadStatService)\n\n def test_creates_redis(self, monkeypatch):\n redis_obj = pretend.stub()\n redis_cls = pretend.stub(\n from_url=pretend.call_recorder(lambda u: redis_obj),\n )\n monkeypatch.setattr(redis, \"StrictRedis\", redis_cls)\n\n url = pretend.stub()\n svc = RedisDownloadStatService(url)\n\n assert svc.redis is redis_obj\n assert redis_cls.from_url.calls == [pretend.call(url)]\n\n @pytest.mark.parametrize(\n (\"keys\", \"result\"),\n [\n ([], 0),\n ([5, 7, 8], 20),\n ]\n )\n def test_get_daily_stats(self, keys, result):\n svc = RedisDownloadStatService(\"\")\n svc.redis = pretend.stub(mget=pretend.call_recorder(lambda *a: keys))\n\n call_keys = (\n [\"downloads:hour:12-01-14-00:foo\"] +\n [\n \"downloads:hour:12-01-13-{:02d}:foo\".format(i)\n for i in reversed(range(24))\n ] +\n [\"downloads:hour:12-01-12-23:foo\"]\n )\n\n assert svc.get_daily_stats(\"foo\") == result\n assert svc.redis.mget.calls == [pretend.call(*call_keys)]\n\n @pytest.mark.parametrize(\n (\"keys\", \"result\"),\n [\n ([], 0),\n ([5, 7, 8], 20),\n ]\n )\n def test_get_weekly_stats(self, keys, result):\n svc = RedisDownloadStatService(\"\")\n svc.redis = pretend.stub(mget=pretend.call_recorder(lambda *a: keys))\n\n call_keys = [\n \"downloads:daily:12-01-{:02d}:foo\".format(i + 7)\n for i in reversed(range(8))\n ]\n\n assert svc.get_weekly_stats(\"foo\") == result\n assert svc.redis.mget.calls == [pretend.call(*call_keys)]\n\n @pytest.mark.parametrize(\n (\"keys\", \"result\"),\n [\n ([], 0),\n ([5, 7, 8], 20),\n ]\n )\n def test_get_monthly_stats(self, keys, result):\n svc = RedisDownloadStatService(\"\")\n svc.redis = pretend.stub(mget=pretend.call_recorder(lambda *a: keys))\n\n call_keys = [\n \"downloads:daily:12-01-{:02d}:foo\".format(i)\n for i in reversed(range(1, 15))\n ] + [\n \"downloads:daily:11-12-{:02d}:foo\".format(i + 15)\n for i in reversed(range(17))\n ]\n\n assert svc.get_monthly_stats(\"foo\") == result\n assert svc.redis.mget.calls == [pretend.call(*call_keys)]\n","sub_path":"tests/unit/packaging/test_services.py","file_name":"test_services.py","file_ext":"py","file_size_in_byte":3326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"4802250","text":"import pygame\nimport random\nimport PygamePrint as pp\nimport UserClass\nimport os\n\"\"\"\n업기\n스페이스\n도 1\n개 2\n걸 3\n윷 4\n모 5\n(2,0) (1,4) (1,3) (1,2) (1,1) (1,0)\n\n(2,1) (-2,1) (-1,1) (0,4)\n\n(2,2) (-2,2) (-1,2) (0,3)\n\n (-1,3)\n (-2,3)\n \n(2,3) (-1,4) (-2,4) (0,2)\n\n(2,4) (-1,5) (-2,5) (0,1)\n\n(3,0) (3,1) (3,2) (3,3) (3,4) (0,0) (4,0)\n\"\"\"\npygame.init()\nos.environ['SDL_VIDEO_CENTERED'] = '0'\n\npygame.display.set_caption(\"YUT\")\n\ndef play(mode):\n if mode == 0:\n user1 = UserClass.Computer(0)\n user2 = UserClass.Computer(1)\n if mode == 1:\n user1 = UserClass.Player(0)\n user2 = UserClass.Computer(1)\n if mode == 2:\n user1 = UserClass.Player(0)\n user2 = UserClass.Player(1)\n\n pp.print_all(user2, user1)\n pygame.display.update()\n\n order = ['first','second']\n random.shuffle(order)\n\n if order[0] == 'second':\n pp.situation(user1, user2, 'first')\n pp.situation(user1, user2, 'turn')\n user1.act(user2)\n\n else:\n pp.situation(user2, user1, 'first')\n\n while True:\n\n if user1.fineggno == 4:\n pp.situation(user1, user2, 'win')\n break\n\n pp.situation(user2, user1, 'turn')\n user2.act(user1)\n\n if user2.fineggno == 4:\n pp.situation(user2, user1, 'win')\n break\n\n pp.situation(user1, user2, 'turn')\n user1.act(user2)\n\n if user1.fineggno == 4:\n pp.situation(user1, user2, 'win')\n break\n\n\n","sub_path":"YHS.py","file_name":"YHS.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"532416564","text":"import xml.etree.ElementTree as ET\nimport csv\nimport sys\nimport os\nimport json\n\nclass Trial():\n\tdef __init__(self, number, responses ,prescreened_out=False, used=True, reason=\"NA\"):\n\t\tself.number=int(number)\n\t\tself.prescreened_out=prescreened_out\n\t\tself.responses=responses\n\t\tself.used=used\n\t\tself.reason=reason if prescreened_out else \"NA\"\n\t\tif prescreened_out:\n\t\t\tself.used=False\n\n\n\tdef __eq__(self,other):\n\t\treturn(self.number==other.number)\n\n\tdef __str__(self):\n\t\tand_or_but='and' if ((self.used) and (not self.prescreened_out)) or ((not self.used) and (self.prescreened_out)) else 'but'\n\t\treturn(\"Trial number %i, %s %s %s.\" %(self.number, 'prescreened out' if self.prescreened_out else \"not prescreened out\",and_or_but, 'used' if self.used else 'unused'))\n\t\t\nclass Response():\n\tdef __init__(self, ID, hour,minute,second,frame, trial, trial_status, Type, duration=\"NA\"):\n\t\tself.ID = int(ID) if Type!='coding' else str(ID) # Response ID, starts from 1. (int)\n\t\tself.hour=int(hour) # Time of response in hours (int).\n\t\tself.second=int(second) # Time of response in hours (int).\n\t\tself.minute=int(minute) # Time of response in hours (int).\n\t\tself.frame=int(frame) # Time of response in hours (int).\n\t\tself.trial=int(trial) # Trial Number. (int)\n\t\tself.trial_status=bool(trial_status) # Trial Status. (boolean)\n\t\tself.Type=str(Type) # Trial Type: right, left, away, or off. (str)\n\t\tself.time=self.calculate_time()\n\t\tself.duration=duration\n\n\tdef calculate_time(self):\n\t\t\"\"\" Calculates the time of the response in milliseconds.\n\t\t\tReturns time in milliseconds (int).\n\t\t\t\"\"\"\n\t\tHours=int(self.hour)\n\t\tMinutes=int(self.minute)+60*Hours\n\t\tSeconds=int(self.second)+60*Minutes\n\t\tFrames=int(self.frame)+30*Seconds\n\t\tmilliseconds=Frames*100/3\n\t\treturn(milliseconds)\n\n\tdef __str__(self):\n\t\tif self.Type=='coding':\n\t\t\treturn('Response %s at hour: %i, minute: %i, second: %i, frame: %i, at trial %i, which is %s is at %s' %(self.ID,self.hour,self.minute,self.second,self.frame,self.trial,\"active\" if self.trial_status else \"inactive\",self.Type))\n\t\telse:\n\t\t\treturn('Response %i at hour: %i, minute: %i, second: %i, frame: %i, at trial %i, which is %s is at %s' %(self.ID,self.hour,self.minute,self.second,self.frame,self.trial,\"active\" if self.trial_status else \"inactive\",self.Type))\n\n\tdef __eq__(self,other):\n\t\treturn(self.ID == other.ID and self.time==other.time and self.trial==other.trial and self.trial_status == other.trial_status and self.Type==other.Type)\n\ndef XMLDict_to_Pythondict(XML_Dict):\n\t\"\"\" Takes an XML dictionary (an XML object with the tag \"dict\")\n\t\tReturns a python dictionary mapping each key to its value in a python-readable form\n\t\t\"\"\"\n\tdef array_to_list(array_item):\n\t\tl=[]\n\t\tfor item in array_item:\n\t\t\tif item.tag=='dict':\n\t\t\t\tl.append(XMLDict_to_Pythondict(item))\n\t\t\telif item.tag=='true' or item.tag=='false':\n\t\t\t\tl.append(item.tag)\n\t\t\telse:\n\t\t\t\tl.append(item.text)\n\t\treturn(l)\n\tkeys=[]\n\tvalues=[]\n\tfor item in XML_Dict:\n\t\tif item.tag=='key':\n\t\t\tkeys.append(item.text)\n\t\telif item.tag=='dict':\n\t\t\tvalues.append(XMLDict_to_Pythondict(item))\n\t\telif item.tag=='true' or item.tag=='false':\n\t\t\tvalues.append(item.tag)\n\t\telif item.tag=='array':\n\t\t\tvalues.append(array_to_list(item))\n\t\telse:\n\t\t\tvalues.append(item.text)\n\tif len(keys)!=len(values):\n\t\traise ValueError(\"Lengths are not equal\")\n\treturn({keys[i]:values[i] for i in range(len(keys))})\n\ndef extract_responses(tree):\n\t\"\"\" Takes a parsed XML (or vcx) file and extracts the important data\n\t\tInputs: XML tree\n\t\tReturns: a dictionary on the form:\n\t\t\t{'Subject_info':dict mapping each attribute to its value, 'Responses': list of Response objects}\n\t\t\"\"\"\n\tmain_dict=XMLDict_to_Pythondict(tree.getroot().find('dict'))\n\tSubject_data=main_dict['Subject']\n\tResponses=Subject_data['Responses']\n\tdel Subject_data['Responses']\n\torganized_responses=[]\n\tfor resp in Responses:\n\t\ttrial_stat=True if Responses[resp]['Trial Status']=='true' else False\n\t\tcurr_Resp=Response(int(resp.split(\" \")[1]),\n\t\t\t\tResponses[resp]['Timecode']['Hour'],\n\t\t\t\tResponses[resp]['Timecode']['Minute'],\n\t\t\t\tResponses[resp]['Timecode']['Second'],\n\t\t\t\tResponses[resp]['Timecode']['Frame'],\n\t\t\t\tint(Responses[resp]['Trial']),\n\t\t\t\ttrial_stat, Responses[resp]['Type'])\n\t\torganized_responses.append(curr_Resp)\n\treturn({'Subject_info':Subject_data, 'Responses':sorted(organized_responses, key=lambda x: int(x.calculate_time()))})\n\ndef get_trials(Data, unused_trials=False):\n\t\"\"\" Takes a list of Respone objects and a boolean to indicate whether to include non-coded trials or not.\n\t\tReturns a dictionary mapping each trial number to a list fo its responses.\n\t\t\"\"\"\n\tdef get_unused_trials(Data):\n\t\tprescreened_out_dict=Data['Subject_info']['Pre-Screen Information']['Pre-Screen Array 0']\n\t\tprescreened_out_list=[]\n\t\tfor entry in prescreened_out_dict:\n\t\t\tprescreened_out_list.append((int(prescreened_out_dict[entry]['Trial']),str(prescreened_out_dict[entry]['Reason'])))\n\t\treturn([Trial(int(t), [], used=False) for t in Data['Subject_info']['Unused Trials']]+[Trial(int(t[0]), [], prescreened_out=True, reason=t[1]) for t in prescreened_out_list])\n\ttotal_trials={}\n\tResponses=Data['Responses']\n\tfor res in Responses:\n\t\tif res.trial in total_trials:\n\t\t\ttotal_trials[res.trial].responses.append(res)\n\t\telse:\n\t\t\ttotal_trials[res.trial]=Trial(res.trial, [res])\n\tlist_of_trials=list(total_trials.values())\n\tif unused_trials:\n\t\tfor t in get_unused_trials(Data):\n\t\t\tlist_of_trials.append(t)\n\treturn(sorted(list_of_trials, key=lambda x:x.number))\n\ndef Response_duration(Responses, Response):\n\t\"\"\" Takes a list of Response objects and a specific Response object.\n\t\tReturns the duration of the specific Response object (i.e. the time difference between the response and the next response/next off).\n\t\tRaises an error if the response is not in the responses list, if the response type is \"off\", or if the trial_status of the response is Fasle.\n\t\t\"\"\"\n\tif Response not in Responses:\n\t\traise ValueError(\"Response not in responses!\")\n\tif Response.Type==\"off\":\n\t\traise ValueError('Responses of type \"off\" do not have a duration.')\n\tif Response.trial_status==False:\n\t\traise ValueError('Responses with inactive trial do not have a duration.')\n\tnext_resp=Responses[Responses.index(Response)+1]\n\treturn(abs(next_resp.time-Response.time))\n\ndef get_coding_duration(Data, Response):\n\t\"\"\" Measures the duration of the trial that begins with the given 'coding' response. This function is different from Response_duration() by that it measures\n\t\tthe duration of the entire trial, instead of the duration of the time difference between the two subsequent responses.\n\t\tTakes a list of Response objects and a specific Response object of type \"coding\".\n\t\tReturns the duration of the trial that begins with the given response of type \"coding\".\n\t\tNOTE: Response must be of type coding, otherwise a ValueError apeears.\n\t\t\"\"\"\n\n\tfor trial in get_trials(Data, False):\n\t\tcoding_event=trial.responses[0]\n\t\tif Response==coding_event:\n\t\t\tlast_Event=trial.responses[-1]\n\t\t\treturn(round(abs(last_Event.time-coding_event.time),2))\n\n\n\ndef get_total_time(Responses, types=[], trials=None, milliseconds=False):\n\t\"\"\" Takes a list of Response objects, a list of response types (e.g. ['left','right', 'away']), and a list of trial numbers.\n\t\tIf given a list of trials, returns the amount of time of all the responses of the types specified in the trials specified.\n\t\tIf not given a list of trials, returns the total amount of time of all the responses of the types specified in all trials.\n\t\tIf milliseconds is True, returns total time in 30*seconds. Otherwise returns in seconds.\n\t\t\"\"\"\n\tif \"off\" in types:\n\t\traise ValueError(\"Responses of type 'off' do not have a duration.\")\n\tif trials==None:\n\t\tval=sum([Response_duration(Responses, Resp) for Resp in Responses if Resp.Type in types and Resp.trial_status])\n\t\treturn val if milliseconds else round(val/30, 2)\n\telif type(trials)==list:\n\t\tval=sum([Response_duration(Responses, Resp) for Resp in Responses if Resp.Type in types and Resp.trial in trials and Resp.trial_status])\n\t\treturn val if milliseconds else round(val/30,2)\n\telse:\n\t\traise ValueError(\"Trials must be entered as a list, even if only one trial\")\n\ndef clean(Data):\n\t\"\"\" This functions takes some data and cleans it so that it is easily accessible by Python. Currently, the function does:\n\t\t- Given a Data dictionary in the form {'Subject_info': {...} , 'Responses': [...]}, it modifies the responses so that trials\n\t\t\tstart and end with a 'coding' event according to their situation.\n\t\tReturns a Data dictionary in the form {'Subject_info': {...} , 'Responses': [...]}.\n\t\t\"\"\"\n\tcopyData=Data.copy()\n\tResponses=copyData['Responses']\n\tif len(Responses)==0:\n\t\treturn(copyData)\n\tnew_Responses=[]\n\tTrials=get_trials(copyData, False)\n\tfor trial in Trials:\n\t\tcur_res=trial.responses[0]\n\t\tnew_Responses.append(Response(\"\", trial.responses[0].hour, trial.responses[0].minute, trial.responses[0].second, trial.responses[0].frame, trial.responses[0].trial, False, 'coding'))\n\tResponses=new_Responses+Responses\n\treturn({'Subject_info': copyData['Subject_info'] , 'Responses': sorted(Responses, key=lambda x:x.time)})\n\ntree=ET.parse('../raw_data/source_data/vcx/%s.vcx' %('trial_file'))\nData=clean(extract_responses(tree))\n# for r in Data['Responses']:\n# \tprint(r)\n# for trial in (get_trials(Data, True)):\n# \tprint(trial)\n# \tfor r in trial.responses:\n# \t\tprint(r)\n\n# vcx_dir='/Users/lookit/Desktop/Khaled-UROP/VM_to_PsychDS/V.M.-to-Psy-DS' # For Lab's mac\nvcx_dir='/Users/shehada/Desktop/UROP/Psych-DS Project/vm to psychds/raw_data/source_data/vcx' # For my personal device.\nwith open('../raw_data/marchman_participants_data.tsv', 'w') as tsv_participants_file:\n\t# The marchman_participants_data.tsv file is opened this early so that we do not have to iterate over the sessions twice.\n\tfor File in os.listdir(vcx_dir):\n\t\tif File[-4:]=='.vcx':\n\t\t\t## Exporting data from the vcx file.\n\t\t\tFilename=File[:-4]\n\t\t\ttree=ET.parse('../raw_data/source_data/vcx/%s.vcx' %(Filename))\n\t\t\tData=clean(extract_responses(tree))\n\n\t\t\t## Dividing data into three subcategories: Responses_data, Session_level_data, and Trial_level_data. \n\t\t\tResponses_data=Data['Responses'][:]\n\t\t\tSession_level_data=Data['Subject_info'].copy()\n\t\t\tTrial_level_data={'Pre-Screen Information':Session_level_data['Pre-Screen Information'], 'Unused Trials':Session_level_data[\"Unused Trials\"]}\n\t\t\tdel Session_level_data['Pre-Screen Information']\n\t\t\tdel Session_level_data['Unused Trials']\n\n\t\t\t## Building the _timecourse_data.tsv file.\n\t\t\twith open('../raw_data/%s_timecourse_data.tsv' %(Filename), 'w') as tsv_timecourse_file:\n\t\t\t tsv_writer = csv.writer(tsv_timecourse_file, delimiter='\\t')\n\t\t\t first_row=['Response', 'Hour', 'Minute', 'Second', 'Frame', 'Trial', 'Trial Status', 'Type', 'Duration']\n\t\t\t tsv_writer.writerow(first_row) # First Row\n\t\t\t for resp in Responses_data:\n\t\t\t \tif resp.Type=='coding':\n\t\t\t \t\ttsv_writer.writerow([str(resp.ID), str(resp.hour), str(resp.minute), str(resp.second), str(resp.frame), str(resp.trial), str(resp.trial_status), str(resp.Type),get_coding_duration(Data, resp)])\n\t\t\t \telse:\t\n\t\t\t \t\ttsv_writer.writerow([str(resp.ID), str(resp.hour), str(resp.minute), str(resp.second), str(resp.frame), str(resp.trial), str(resp.trial_status), str(resp.Type),\"NA\"])\n\n\t\t\t# Building the marchman_participants_data.tsv file.\n\t\t\ttsv_writer = csv.writer(tsv_participants_file, delimiter='\\t')\n\t\t\ttsv_writer.writerow(['Number','Birthday','Sex','Months','Date of Test', 'Primary PS Complete', 'Primary Pre-Screener', 'Secondary PS Complete','Secondary Pre-Screener','Coded From','Coder', 'Checked By', 'Order'])\n\t\t\ttsv_writer.writerow([Session_level_data['Number'], Session_level_data['Birthday'], Session_level_data['Sex'], Session_level_data['Months'], Session_level_data['Date of Test'], Session_level_data['Primary PS Complete'], Session_level_data['Primary Pre-Screener'], Session_level_data['Secondary PS Complete'], Session_level_data['Secondary PS Complete'], Session_level_data['Coded From'], Session_level_data['Coder'], Session_level_data['Checked By'], Session_level_data['Order']])\n\n\t\t\t# Building the _trial_data.tsv\n\t\t\twith open('../raw_data/%s_trial_data.tsv' %(Filename), 'w') as tsv_trial_file:\n\t\t\t\ttsv_writer=csv.writer(tsv_trial_file, delimiter=\"\\t\")\n\t\t\t\tfirst_row=[\"Trial\", \"prescreened_out\", \"used\", \"reason\"]\n\t\t\t\ttsv_writer.writerow(first_row) # First Row\n\t\t\t\tfor trial in get_trials(Data,True):\n\t\t\t\t\tif trial.prescreened_out:\n\t\t\t\t\t\ttsv_writer.writerow([str(trial.number), str(trial.prescreened_out), str(trial.used), str(trial.reason)])\n\t\t\t\t\telse:\n\t\t\t\t\t\ttsv_writer.writerow([str(trial.number), str(trial.prescreened_out), str(trial.used), \"NA\"])\n\n\t\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"scripts/python_to_xml.py","file_name":"python_to_xml.py","file_ext":"py","file_size_in_byte":12657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"30841210","text":"import socket\ns = socket.socket()\nhost = socket.gethostname()\nport = 9091\n\ns.connect((host, 9090))\n\nwhile True:\n msg = s.recv(1024)\n print(msg.decode(\"utf-8\"))\n Input = input()\n if Input=='quit':\n s.close()\n break\n s.sendto(str.encode(Input),(host, 9090))\n","sub_path":"task1/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"199094923","text":"# first stage CeVICA_analysis: input fastq for paired end sequencing reads, and output merged reads separated by index\r\n\r\n# required packages\r\nimport xlrd \r\nimport sqlite3\r\nimport multiprocessing\r\nimport time\r\n\r\n\r\ndef read_paired_end(r1,r2):\r\n lines_4x2 = []\r\n lines_4x2 += [r1.readline().strip()] + [r1.readline().strip()] + [r1.readline().strip()] + [r1.readline().strip()]\r\n lines_4x2 += [r2.readline().strip()] + [r2.readline().strip()] + [r2.readline().strip()] + [r2.readline().strip()]\r\n if len(lines_4x2[0]) > 2:\r\n return lines_4x2\r\n else:\r\n return 'end of file'\r\n\r\n\r\ndef merge_2X250(lines_4X2): # merges long reads and combine quality scores for overlap region, first 4 lines read1, last 4 lines read2.\r\n str_r2c = '' # reverse complement read2 sequence\r\n for n in range(0,len(lines_4X2[5])):\r\n str_r2c = str_r2c + comp_encoding[lines_4X2[5][n]]\r\n str_r2rc = str_r2c[::-1]\r\n lines_4X2[5] = str_r2rc\r\n lines_4X2[7] = lines_4X2[7][::-1] # reverse read2 quality score\r\n\r\n # sequence alignment for merging\r\n status = ''\r\n score = 0\r\n slice_location = 0\r\n for n in range(50,100): # adjust this search window depending on how much the two reads overlap, changed from 50,150 8-9-2019\r\n for seq_location in range(0,16):\r\n if lines_4X2[1][len(lines_4X2[1])-16+seq_location] == lines_4X2[5][n + seq_location]:\r\n score +=1\r\n if score > match_score_cutoff:\r\n status = 'complete'\r\n slice_location = n\r\n break\r\n score = 0\r\n if status == 'complete':\r\n r1_only = lines_4X2[1][:len(lines_4X2[1])-slice_location-16]\r\n r2_only = lines_4X2[5][slice_location+16:]\r\n r1_overlap = lines_4X2[1][len(lines_4X2[1])-slice_location-16:]\r\n r2_overlap = lines_4X2[5][:slice_location+16]\r\n r1_onlyQS = lines_4X2[3][:len(lines_4X2[1])-slice_location-16]\r\n r2_onlyQS = lines_4X2[7][slice_location+16:]\r\n r1_overlapQS = lines_4X2[3][len(lines_4X2[1])-slice_location-16:]\r\n r2_overlapQS = lines_4X2[7][:slice_location+16]\r\n overlap_comb = ''\r\n overlap_combQS = ''\r\n for n in range(0,slice_location+16):\r\n if r1_overlap[n] == r2_overlap[n]:\r\n overlap_comb += r1_overlap[n]\r\n try:\r\n overlap_combQS += list(qs_encoding.keys())[list(qs_encoding.values()).index(qs_encoding[r1_overlapQS[n]] + qs_encoding[r2_overlapQS[n]])]\r\n except:\r\n overlap_combQS += 'I'\r\n if r1_overlap[n] != r2_overlap[n]:\r\n if qs_encoding[r1_overlapQS[n]] >= qs_encoding[r2_overlapQS[n]]:\r\n overlap_comb += r1_overlap[n]\r\n overlap_combQS += list(qs_encoding.keys())[list(qs_encoding.values()).index(qs_encoding[r1_overlapQS[n]])]\r\n if qs_encoding[r1_overlapQS[n]] < qs_encoding[r2_overlapQS[n]]:\r\n overlap_comb += r2_overlap[n]\r\n overlap_combQS += list(qs_encoding.keys())[list(qs_encoding.values()).index(qs_encoding[r2_overlapQS[n]])]\r\n output = [lines_4X2[0]]\r\n output += [r1_only + overlap_comb + r2_only]\r\n output += [r1_onlyQS + overlap_combQS + r2_onlyQS]\r\n output += ['None']\r\n output += ['None']\r\n # identify index\r\n index_neighboring_seq = 'TCAGAAG' # for RDS5 index\r\n if index_neighboring_seq in output[1][-25:]:\r\n output[3] = output[1][-25:][output[1][-25:].index(index_neighboring_seq)+7:]\r\n \r\n # identify UMI\r\n if 'CTTTAAG' in output[1][:25]:\r\n output[4] = output[1][:output[1][:25].index('CTTTAAG')]\r\n return output\r\n \r\n\r\ndef DNA_match_score(seq1,seq2):\r\n score = 0\r\n if len(seq1) == len(seq2):\r\n for n in range(len(seq1)):\r\n if seq1[n] == seq2[n]:\r\n score += 1\r\n else:\r\n score = 0\r\n return score\r\n\r\n\r\ndef split_by_index8(db_path):\r\n # RDS5 index set\r\n sampA1 = ['C']\r\n sampA2 = ['CCAAG']\r\n sampB1 = ['TT']\r\n sampB2 = ['CGTTGGT']\r\n sampC1 = ['CGA']\r\n sampC2 = ['CTAGATCC']\r\n sampD1 = ['TACG']\r\n sampD2 = ['GAGCAGCTA']\r\n\r\n db = sqlite3.connect(db_path)\r\n cursor = db.cursor()\r\n with open('sample_A1.fastq', 'w') as a:\r\n with open('sample_A2.fastq', 'w') as b:\r\n with open('sample_B1.fastq', 'w') as c:\r\n with open('sample_B2.fastq', 'w') as d:\r\n with open('sample_C1.fastq', 'w') as e:\r\n with open('sample_C2.fastq', 'w') as f:\r\n with open('sample_D1.fastq', 'w') as g:\r\n with open('sample_D2.fastq', 'w') as h:\r\n cursor.execute('SELECT * FROM fastq_processed')\r\n for row in cursor:\r\n if row[5] in sampA1:\r\n a.write(row[3] + '#' + row[4] + '\\n')\r\n a.write(row[1] + '\\n')\r\n a.write('+' + '\\n')\r\n a.write(row[2] + '\\n')\r\n if row[5] in sampA2:\r\n b.write(row[3] + '#' + row[4] + '\\n')\r\n b.write(row[1] + '\\n')\r\n b.write('+' + '\\n')\r\n b.write(row[2] + '\\n')\r\n if row[5] in sampB1:\r\n c.write(row[3] + '#' + row[4] + '\\n')\r\n c.write(row[1] + '\\n')\r\n c.write('+' + '\\n')\r\n c.write(row[2] + '\\n')\r\n if row[5] in sampB2:\r\n d.write(row[3] + '#' + row[4] + '\\n')\r\n d.write(row[1] + '\\n')\r\n d.write('+' + '\\n')\r\n d.write(row[2] + '\\n')\r\n if row[5] in sampC1:\r\n e.write(row[3] + '#' + row[4] + '\\n')\r\n e.write(row[1] + '\\n')\r\n e.write('+' + '\\n')\r\n e.write(row[2] + '\\n')\r\n if row[5] in sampC2:\r\n f.write(row[3] + '#' + row[4] + '\\n')\r\n f.write(row[1] + '\\n')\r\n f.write('+' + '\\n')\r\n f.write(row[2] + '\\n')\r\n if row[5] in sampD1:\r\n g.write(row[3] + '#' + row[4] + '\\n')\r\n g.write(row[1] + '\\n')\r\n g.write('+' + '\\n')\r\n g.write(row[2] + '\\n')\r\n if row[5] in sampD2:\r\n h.write(row[3] + '#' + row[4] + '\\n')\r\n h.write(row[1] + '\\n')\r\n h.write('+' + '\\n')\r\n h.write(row[2] + '\\n')\r\n\r\n\r\nworkbook_qs = xlrd.open_workbook(\"quality_score.xlsx\")\r\nworksheet_qs = workbook_qs.sheet_by_index(0)\r\ncomp_encoding = {'A':'T', 'T':'A','G':'C','C':'G','N':'N'}\r\nqs_encoding = {}\r\nfor nrows in range(1,42):\r\n try:\r\n qs_code = int(worksheet_qs.cell_value(nrows,0))\r\n except:\r\n qs_code = worksheet_qs.cell_value(nrows,0)\r\n qs_encoding[str(qs_code)] = worksheet_qs.cell_value(nrows,2)\r\nmatch_score_cutoff = 15 \r\nlen_cutoff = 40 \r\n\r\n\r\nif __name__ == '__main__':\r\n start = time.time()\r\n# Input definition, change accordingly on the line below #\r\n list_sample = ['SR3', 'GP3', 'STR'] # sample identifier for generating filepath for analysis\r\n for s in list_sample:\r\n status = ''\r\n print('merging reads...')\r\n total_count = 0\r\n merged_count = 0\r\n # create a database file to store merged reads before demultiplexing, not required if demultiplexing is already done\r\n db_fastq = sqlite3.connect(f'{s}_fastq.db')\r\n cursor_fastq = db_fastq.cursor()\r\n try:\r\n cursor_fastq.execute(\r\n 'CREATE TABLE fastq_processed(id INTEGER PRIMARY KEY, sequences TEXT, Q_score TEXT, read_id TEXT, UMI TEXT, barcode TEXT)')\r\n except:\r\n pass\r\n with open(f'{s}_read1.fastq', 'r') as r1: # read1 fastq file \r\n with open(f'{s}_read2.fastq', 'r') as r2: # read2 fastq file\r\n while True:\r\n if status == 'complete':\r\n break\r\n list_of_blocks = []\r\n for n in range(0,100000):\r\n lines_4x2 = read_paired_end(r1,r2)\r\n if len(lines_4x2[1])>225 and len(lines_4x2[5])>225:\r\n list_of_blocks += [lines_4x2]\r\n total_count += 1\r\n elif lines_4x2 == 'end of file': \r\n print('end of file')\r\n status = 'complete'\r\n break\r\n pool = multiprocessing.Pool()\r\n result = pool.map(merge_2X250, list_of_blocks)\r\n\r\n # save merged reads to database\r\n for block in result:\r\n try:\r\n cursor_fastq.execute('INSERT INTO fastq_processed(sequences, Q_score, read_id, UMI, barcode) VALUES(?,?,?,?,?)', (block[1], block[2], block[0], block[4], block[3]))\r\n merged_count += 1\r\n except:\r\n pass\r\n db_fastq.commit()\r\n\r\n # save merged reads to fastq\r\n with open(f'{s}.fastq', 'a') as save_fastq:\r\n for block in result:\r\n if block != None:\r\n save_fastq.write(block[0] + '\\n')\r\n save_fastq.write(block[1] + '\\n')\r\n save_fastq.write('+' + '\\n')\r\n save_fastq.write(block[2] + '\\n')\r\n\r\n print('merging reads completed')\r\n print(f'{total_count} sequences total for {s}')\r\n print(f'{merged_count} sequences merged for {s}')\r\n end = time.time()\r\n print(f'total processing time: {int((end - start)//60)}mins {round((end - start)%60,2)}s')\r\n\r\n # optional demultiplexing\r\n # print('sorting reads by index...')\r\n # split_by_index8('_fastq.db')\r\n","sub_path":"CeVICA_analysis1.py","file_name":"CeVICA_analysis1.py","file_ext":"py","file_size_in_byte":11152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"187222178","text":"import os\nfrom igf_data.igfdb.collectionadaptor import CollectionAdaptor\nfrom ehive.runnable.IGFBaseProcess import IGFBaseProcess\n\nclass FetchFastqForRun(IGFBaseProcess):\n '''\n A IGF process runnable for fetching all fastq files for an experiment\n '''\n def param_defaults(self):\n params_dict=super(FetchFastqForRun,self).param_defaults()\n params_dict.update({\n 'fastq_collection_type':'demultiplexed_fastq',\n 'fastq_collection_table':'run'\n })\n return params_dict\n\n def run(self):\n try:\n project_igf_id=self.param_required('project_igf_id')\n experiment_igf_id=self.param_required('experiment_igf_id')\n sample_igf_id=self.param_required('sample_igf_id')\n run_igf_id=self.param_required('run_igf_id')\n igf_session_class=self.param_required('igf_session_class')\n fastq_collection_type=self.param('fastq_collection_type')\n fastq_collection_table=self.param('fastq_collection_table')\n ca=CollectionAdaptor(**{'session_class':igf_session_class})\n ca.start_session()\n fastq_files=ca.get_collection_files(collection_name=run_igf_id,\n collection_type=fastq_collection_type,\n collection_table=fastq_collection_table,\n output_mode='dataframe')\n ca.close_session()\n fastq_counts=len(fastq_files.index)\n fastq_list=list(fastq_files['file_path'].values) # converting fastq filepaths to a list\n if not isinstance(fastq_list, list) or \\\n len(fastq_list)==0:\n raise ValueError('No fastq file found for run {0}'.format(run_igf_id))\n\n for file in fastq_list:\n if not os.path.exists(file):\n raise IOError('Fastq file path {0} not found for run {1}'.\\\n format(file,run_igf_id))\n\n self.param('dataflow_params',{'fastq_files_list':fastq_list}) # add fastq filepaths to dataflow\n except Exception as e:\n message='project: {2}, sample:{3}, Error in {0}: {1}'.\\\n format(self.__class__.__name__,\n e,\n project_igf_id,\n sample_igf_id)\n self.warning(message)\n self.post_message_to_slack(message,reaction='fail') # post msg to slack for failed jobs\n self.post_message_to_ms_team(\n message=message,\n reaction='fail')\n raise","sub_path":"ehive/runnable/process/alignment/FetchFastqForRun.py","file_name":"FetchFastqForRun.py","file_ext":"py","file_size_in_byte":2458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"551481771","text":"# -*- coding: utf-8 -*-\n\nfrom django.http import HttpResponse, Http404, HttpResponseRedirect\nfrom django.template import Context, RequestContext\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.contrib.auth import logout\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.urlresolvers import reverse\nfrom django.core.paginator import Paginator, InvalidPage\nfrom django.contrib import messages\nfrom models import Todo, Tag\nfrom forms import TodoForm, RegistrationForm\nimport datetime\n\n\ndef index(request):\n\n variables = RequestContext(request, {\n 'user': request.user\n })\n return render_to_response('index.html', variables)\n\n\nITEMS_PRE_PAGE = 3\n\n\n@login_required\ndef user(request, username):\n\n form = TodoForm()\n author = request.user\n todos = Todo.objects.filter(author=author).order_by('finished', '-pubtime')\n # todos = Todo.objects.all().order_by('finished', '-pubtime')\n\n paginator = Paginator(todos, ITEMS_PRE_PAGE)\n\n try:\n page_number = int(request.GET['page'])\n except (KeyError, ValueError):\n page_number = 1\n\n try:\n current_page = paginator.page(page_number)\n except InvalidPage:\n raise Http404\n\n curremt_todo = current_page.object_list\n\n variables = RequestContext(request, {\n 'form': form,\n 'todos': curremt_todo,\n 'show_paginator': paginator.num_pages > 1,\n 'has_prev': current_page.has_previous,\n 'has_next': current_page.has_next,\n 'curr_page': page_number,\n 'pages': paginator.num_pages,\n 'prev_page': current_page.previous_page_number(),\n 'next_page': current_page.next_page_number(),\n 'dbprev_page': current_page.previous_page_number() - 1,\n 'dbnext_page': current_page.next_page_number() + 1\n })\n return render_to_response('user.html', variables)\n\n\n@login_required\ndef add(request):\n\n if request.method == 'POST':\n form = TodoForm(request.POST)\n if form.is_valid():\n title = form.cleaned_data['title']\n\n # Create or get bookmark.\n todo = Todo.objects.create(\n title=title, finished=0, author=request.user)\n\n # Create new tag list.\n tag_names = form.cleaned_data['tags'].split()\n for tag_name in tag_names:\n tag, dummy = Tag.objects.get_or_create(name=tag_name)\n todo.tag_set.add(tag)\n todo.save()\n messages.info(request, u'新增成功 !')\n return HttpResponseRedirect(reverse('todo_user', args=[request.user]))\n else:\n form = TodoForm()\n\n variables = RequestContext(request, {\n 'form': form\n })\n return render_to_response('add.html', variables)\n\n\n@login_required\ndef finish(request, id):\n\n todo = get_object_or_404(Todo, id=id)\n status = request.GET.get('status', '')\n\n if status == 'yes':\n finished = 1\n elif status == 'no':\n finished = 0\n else:\n messages.info(request, u'非法请求!')\n return HttpResponseRedirect(reverse('todo_user', args=[request.user]))\n todo.finished = finished\n todo.pubtime = datetime.datetime.now()\n todo.save()\n messages.info(request, u'修改成功!')\n return HttpResponseRedirect(reverse('todo_user', args=[request.user]))\n\n\n@login_required\ndef delete(request, id):\n todo = get_object_or_404(Todo, id=id)\n todo.delete()\n messages.info(request, u'删除成功!')\n return HttpResponseRedirect(reverse('todo_user', args=[request.user]))\n\n\n@login_required\ndef modify(request, id):\n\n if request.method == 'POST':\n form = TodoForm(request.POST)\n if form.is_valid():\n todo = get_object_or_404(Todo, id=id)\n todo.title = form.cleaned_data['title']\n todo.author = request.user\n todo.pubtime = datetime.datetime.now()\n\n # 创建新的 tags\n todo.tag_set.clear()\n tag_names = form.cleaned_data['tags'].split()\n for tag_name in tag_names:\n tag, dummy = Tag.objects.get_or_create(name=tag_name)\n todo.tag_set.add(tag)\n\n todo.save()\n messages.info(request, u'编辑成功')\n return HttpResponseRedirect(reverse('todo_user', args=[request.user]))\n else:\n form = TodoForm()\n todo = Todo.objects.get(id=id)\n\n variables = RequestContext(request, {\n 'form': form,\n 'todo': todo\n })\n return render_to_response('add.html', variables)\n\n\ndef logoff(request):\n logout(request)\n return HttpResponseRedirect('/')\n\n\ndef register(request):\n\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n user = User.objects.create_user(\n username=form.cleaned_data['username'],\n password=form.cleaned_data['password1'],\n email=form.cleaned_data['email']\n )\n return HttpResponseRedirect('accounts/success/')\n else:\n form = RegistrationForm()\n variables = RequestContext(request, {\n 'form': form\n\n })\n return render_to_response('registration/register.html', variables)\n\n\n@login_required\ndef tags(request):\n\n author = request.user\n todos = author.todo_set.all()\n variables = RequestContext(request, {\n 'todos': todos\n\n })\n return render_to_response('tags.html', variables)\n\n\n@login_required\ndef tags_user(request, tagname):\n tag = Tag.objects.get(name=tagname)\n todos = tag.todos.filter(\n author=request.user).order_by('finished', '-pubtime')\n paginator = Paginator(todos, ITEMS_PRE_PAGE)\n\n try:\n page_number = int(request.GET['page'])\n except (KeyError, ValueError):\n page_number = 1\n\n try:\n current_page = paginator.page(page_number)\n except InvalidPage:\n raise Http404\n\n curremt_todo = current_page.object_list\n variables = RequestContext(request, {\n 'todos': curremt_todo,\n 'show_paginator': paginator.num_pages > 1,\n 'has_prev': current_page.has_previous,\n 'has_next': current_page.has_next,\n 'curr_page': page_number,\n 'pages': paginator.num_pages,\n 'prev_page': current_page.previous_page_number(),\n 'next_page': current_page.next_page_number(),\n 'dbprev_page': current_page.previous_page_number() - 1,\n 'dbnext_page': current_page.next_page_number() + 1\n\n })\n return render_to_response('tags_user.html', variables)\n","sub_path":"todo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"355105912","text":"'''\n18. 各行を 3 コラム目の数値の降順にソート\n各行を 3 コラム目の数値の逆順で整列せよ(注意: 各行の内容は変更せずに並び替えよ).\n確認には sort コマンドを用いよ(この問題はコマンドで実行した時の結果と合わなくてもよい).\n\n# 罠: 入力ファイルが既に降順\n'''\nfrom operator import itemgetter as get\n\nwith open('hightemp.txt', 'r') as f:\n lines = [line.split() for line in f]\n # [x] lines.sort(key=get(2), reverse=True) # <- 数値に変換すべき\n # [o]\n lines.sort(key=lambda x: -float(x[2]))\n\nwith open('out18', 'w') as f:\n for line in lines:\n print('\\t'.join(line), file=f)\n","sub_path":"kiyuna/chapter02/knock18.py","file_name":"knock18.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"415245946","text":"from flask_wtf import FlaskForm\nfrom flask_wtf.file import FileField\nfrom wtforms import StringField, SubmitField, HiddenField\nfrom wtforms.validators import DataRequired\n\nclass CreateBucketForm(FlaskForm):\n new_bucket_name = StringField(\n 'Bucket Name',\n validators = [\n DataRequired()\n ]\n )\n\n create = SubmitField('Create')\n\nclass DeleteBucketForm(FlaskForm):\n bucket_name = HiddenField()\n\n delete = SubmitField('Delete')\n\nclass FileForm(FlaskForm):\n bucket_name = HiddenField()\n\n file_name = HiddenField()\n\n delete = SubmitField('Delete')\n\nclass FileUploadForm(FlaskForm):\n file = FileField(\n 'Select File',\n validators=[\n DataRequired()\n ]\n )\n\n upload = SubmitField('Upload')","sub_path":"s3/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"559301928","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/2/23 19:22\n# @Author : ShaHeTop-Almighty-ares\n# @Email : yang6333yyx@126.com\n# @File : CmsHook.py\n# @Software: PyCharm\n\nfrom flask import request, g\n\nfrom app.controllers.cms.cms_bp import route_admin\nfrom common.libs.customException import ab_code\nfrom common.libs.tokens import get_user\n\n\n@route_admin.before_request\ndef before_request_cms():\n print('cms before_request')\n path = request.path\n print(path)\n if '/test_exc' in path:\n print('测试异常')\n return\n\n if request.method == 'OPTIONS':\n return\n\n if path in '/cms/login':\n print('访问:/cms/login')\n return\n\n if '/cms' in path:\n print('访问:/cms')\n is_token = 'Token' in dict(request.headers) # 是否存在token\n print('头部是否存在key:token->', is_token)\n\n if not is_token:\n ab_code(666)\n elif is_token:\n token = request.headers.get('token', '') # 提取token\n # print(token)\n # 通过token查找user\n # 将user存放在全局g对象中\n get_user(token)\n else:\n g.app_user = None\n","sub_path":"common/interceptors/CmsHook.py","file_name":"CmsHook.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"492948539","text":"from glob import glob\nimport numpy as np\nimport pickle as pkl\nfrom sklearn.gaussian_process.kernels import Matern\nfrom optml.bayesian_optimizer.gp_categorical import GaussianProcessRegressorWithCategorical\nfrom optml.bayesian_optimizer import BayesianOptimizer\nfrom optml import Parameter\nfrom optml.models import Model\nfrom sklearn.metrics import log_loss\nimport matplotlib.pyplot as plt\n\nclass DummyModel(Model):\n __module__ = 'xgboost'\n def __init__(self):\n pass\n\nparams = [\n Parameter(name='eta', param_type='continuous', lower=0.001, upper=1),\n Parameter(name='max_depth', param_type='integer', lower=2, upper=20),\n Parameter(name='subsample', param_type='continuous', lower=0.5, upper=1),\n Parameter(name='colsample_bytree', param_type='continuous', lower=0.5, upper=1),\n Parameter(name='colsample_bylevel', param_type='continuous', lower=0.5, upper=1),\n Parameter(name='min_child_weight', param_type='continuous', lower=0.001, upper=1),\n Parameter(name='alpha', param_type='continuous', lower=0.001, upper=1),\n Parameter(name='lambda', param_type='continuous', lower=0.001, upper=1),\n Parameter(name='gamma', param_type='continuous', lower=0.0, upper=1)\n ]\n\n\ndef eval_func(x,y): \n return -log_loss(x,y>0.5)\n\nif __name__ == '__main__':\n f = 'quality_benchmarks/prepare_kick/xgb_results_tuned_prepare_kick_20180319-165432.pkl'\n results = pkl.load(open(f, 'r'))\n bayesOpt = BayesianOptimizer(model=DummyModel(), \n hyperparams=params,\n eval_func=eval_func)\n\n xs = np.array([bayesOpt._param_dict_to_arr(x[1]) for x in results['trials']])\n # normalize xs\n #xs -= np.mean(xs,axis=0)\n #stds = np.std(xs,axis=0)\n #xs[:,stds>0] = xs[:,stds>0]/stds[np.newaxis, stds>0]\n ys = [x[0] for x in results['trials']]\n\n\n optimizer = GaussianProcessRegressorWithCategorical(kernel=Matern(),\n alpha=1e-4,\n n_restarts_optimizer=5,\n normalize_y=True)\n\n optimizer.fit(xs,ys)\n\n for param in params:\n print(\"{} {}\".format(param.name, np.std([bayesOpt._param_arr_to_dict(x)[param.name] for x in xs])))\n\n param_name = 'colsample_bytree'\n\n\n plt.scatter([bayesOpt._param_arr_to_dict(x)[param_name] for x in xs], ys)\n\n preds, stds = optimizer.predict(xs, return_std=True)\n plt.plot([bayesOpt._param_arr_to_dict(x)[param_name] for x in xs], preds)\n\n def apply_optimizer(optimizer, X, Y):\n Z = np.zeros(X.shape)\n for i in range(Z.shape[0]):\n for j in range(Z.shape[1]):\n Z[i,j] = optimizer.predict([X[i][j], Y[i][j]])[0]\n return Z\n\n # plot surface\n for i in range(len(xs)):\n bayesOpt.hyperparam_history.append((ys[i], bayesOpt._param_arr_to_dict(xs[i])))\n\n print(bayesOpt.get_random_values_arr()); \n bayesOpt.get_next_hyperparameters(optimizer)\n print(bayesOpt.optimize_continuous_problem(optimizer, bayesOpt.get_random_values_arr())['x'])\n\n x = np.linspace(params[0].lower, params[0].upper, 30)\n y = np.linspace(params[1].lower, params[1].upper, 30)\n X, Y = np.meshgrid(x, y)\n Z = apply_optimizer(optimizer, X, Y)\n contours = plt.contour(X, Y, Z)\n plt.clabel(contours, inline=True, fontsize=8)\n plt.xlabel(params[0].name)\n plt.ylabel(params[1].name)\n plt.scatter(xs[:,0], xs[:,1], c=range(len(xs)), cmap='Blues')\n plt.show()","sub_path":"plot_history.py","file_name":"plot_history.py","file_ext":"py","file_size_in_byte":3610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"317592554","text":"import tkinter as tk\nfrom tkinter import ttk\n\n\nclass ScrollableFrame(ttk.Frame):\n def __init__(self, container, width, height, bg='white', *args, **kwargs):\n super().__init__(container, *args, **kwargs)\n self.canvas = tk.Canvas(self, height=height, width=width, bg=bg)\n scrollbar = ttk.Scrollbar(self, orient=\"vertical\", command=self.canvas.yview)\n self.scrollable_frame = ttk.Frame(self.canvas)\n\n self.scrollable_frame.bind(\n \"\",\n lambda e: self.canvas.configure(\n scrollregion=self.canvas.bbox(\"all\")\n )\n )\n\n self.canvas.create_window((0, 0), window=self.scrollable_frame, anchor=\"nw\")\n\n self.canvas.configure(yscrollcommand=scrollbar.set)\n self.scrollable_frame.bind(\"\", self._on_mousewheel)\n\n self.canvas.grid()\n scrollbar.grid(column=1, row=0, sticky='ns') \n \n self.scrollable_frame.bind('', self._bound_to_mousewheel)\n self.scrollable_frame.bind('', self._unbound_to_mousewheel)\n\n def _bound_to_mousewheel(self, event):\n self.canvas.bind_all(\"\", self._on_mousewheel) \n\n def _unbound_to_mousewheel(self, event):\n self.canvas.unbind_all(\"\")\n\n def _on_mousewheel(self, event):\n self.canvas.yview_scroll(int(-1*(event.delta/120)), \"units\")","sub_path":"scrollable_frame.py","file_name":"scrollable_frame.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"250812965","text":"#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#\n#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#\n#\n# File / Package Import\n#\n#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#\n#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$# \n\nfrom collections import Counter\nfrom matplotlib import pyplot\nfrom matplotlib import image\nfrom TextNorm import TextNorm\nimport pandas\nimport numpy\n\n# supress warnings\nimport warnings\nwarnings.simplefilter('ignore')\n\n#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#\n#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#\n#\n# Methods\n#\n#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#\n#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#\n\ndef return_plot_data(m_groupby_index):\n '''\n this method filters through the groupyby index and returns dataframes\n to be plotted\n \n Requirements:\n package pandas\n \n Inputs:\n m_groupby_index\n Type: pandas dataframe groupby index\n Desc: the dataframe grouped by the string_callkey\n dataframe['string_callkey'] -> type: string; the call id\n dataframe['int_segment_num'] -> type: int; the records sequence number\n dataframe['date_record'] -> type: datetime; the date of the record\n dataframe['string_time'] -> type: string; the text value of the time\n dataframe['int_time'] -> type: int; the time in miliseconds since the\n origin time\n dataframe['float_geoxcoord'] -> type: float; the longitude of the record\n dataframe['float_geoycoord'] -> type: float; the latitude of the record\n dataframe['string_callpriority'] -> type: string; the priority of the call\n dataframe['string_calltype'] -> type: string; the type of the call\n \n Important Info:\n None\n \n Return:\n object\n Type: dict\n Desc: dataframes of data held in a dictionary; each dataframe has a \n dataframe['string_key'] -> type: string; the callkey for th record\n dataframe[bool_multiple'] -> type: boolean; if the primary metric has\n multiple values; if true then both init_metric\n and end_metric will be filled\n dataframe['init_metric'] -> type: string; the initial value of the metric; \n e.g. if call priority '1' or '2', etc...\n dataframe['end_metric'] -> type: string; the ending value of the metric if\n it changed\n dataframe['metric_02'] -> type: string; a secondary metric to aid in the \n plotting of data; e.g. call type\n dataframe['start_geox'] -> type: float; the starting geo x coordinate\n dataframe['start_geoy'] -> type: float; the starting geo y coordinate\n dataframe['stop_geox'] -> type: float; the end geo x coordinate if the \n point changed\n dataframe['stop_geoy'] -> type: float; the end geo y coordinate if the \n point changed\n \n dict_return['cp'] -> type: dataframe; primary metric is the call priority \n which did not change\n dict_return['cp_change'] -> type: dataframe; primary metric is the call \n priority which has changed\n dict_return['ct'] -> type: dataframe; primary metric is the call type \n which has not changed\n dict_return['ct_change'] -> type: dataframe; primary metric is the call \n type which has changed\n dict_return['geo'] -> type: dataframe; primary metric is the geo coords\n which has not changed\n dict_return['geo_change'] -> type: dataframe: primary metric is the geo \n coords which has changed\n ''' \n\n #-------------------------------------------------------------------------#\n # objects declarations\n #-------------------------------------------------------------------------#\n\n #-------------------------------------------------------------------------#\n # time declarations\n #-------------------------------------------------------------------------#\n\n #-------------------------------------------------------------------------#\n # iteration declarations (list, set, tuple, counter, dictionary)\n #-------------------------------------------------------------------------#\n \n dict_return = dict()\n list_data_cp, list_data_cp_change = list(), list()\n list_data_ct, list_data_ct_change = list(), list()\n list_data_geo, list_data_geo_change = list(), list()\n list_columns = ['string_key', 'bool_multiple', 'init_metric', 'end_metric', \n 'metric_02', 'start_geox', 'start_geoy', 'stop_geox', \n 'stop_geoy']\n \n #-------------------------------------------------------------------------#\n # variables declarations\n #-------------------------------------------------------------------------#\n\n int_len = len(m_groupby_index)\n int_count = 1\n\n #-------------------------------------------------------------------------#\n # db connections\n #-------------------------------------------------------------------------#\n\n #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#\n #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#\n #\n # Start\n #\n #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#\n #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$# \n\n #-------------------------------------------------------------------------#\n # start loop through groupby index\n #-------------------------------------------------------------------------#\n \n for key, dataframe in m_groupby_index:\n if int_count == 1 or int_count % 1000 == 0:\n print('starting %i of %i' %(int_count, int_len))\n int_count += 1\n dataframe.sort_values(by = 'int_time', inplace = True)\n series_cp = dataframe['string_callpriority'].dropna()\n series_ct = dataframe['string_calltype'].dropna()\n df_geo = dataframe[['float_geoxcoord', 'float_geoycoord']].dropna()\n \n #-------------------------------------------------------------------------#\n # get metrics\n #-------------------------------------------------------------------------# \n \n # call priority\n if len(series_cp.unique()) > 1:\n string_cp_00 = series_cp.unique()[0]\n string_cp_01 = series_cp.unique()[-1]\n bool_cp_len = True\n elif len(series_cp.unique()) == 1:\n string_cp_00 = series_cp.unique()[0]\n string_cp_01 = None\n bool_cp_len = False\n else:\n string_cp_00 = None\n string_cp_01 = None\n bool_cp_len = False\n \n # call type\n if len(series_ct.unique()) > 1:\n string_ct_00 = series_ct.unique()[0]\n string_ct_01 = series_ct.unique()[-1]\n bool_ct_len = True\n elif len(series_ct.unique()) == 1:\n string_ct_00 = series_ct.unique()[0]\n string_ct_01 = None\n bool_ct_len = False\n else:\n string_ct_00 = None\n string_ct_01 = None\n bool_ct_len = False\n \n # geo coords\n if len(df_geo) > 1:\n series_ll_00 = df_geo.iloc[0]\n series_ll_01 = df_geo.iloc[-1]\n bool_geo_len = True\n elif len(df_geo) == 1:\n series_ll_00 = df_geo.iloc[0]\n series_ll_01 = None\n bool_geo_len = False\n else:\n series_ll_00 = None\n series_ll_01 = None\n bool_geo_len = False\n \n #-------------------------------------------------------------------------#\n # look at the call priority\n #-------------------------------------------------------------------------# \n\n if bool_cp_len and bool_geo_len:\n list_data_cp_change.append([key, True, string_cp_00, string_cp_01,\n string_ct_00,\n series_ll_00['float_geoxcoord'],\n series_ll_00['float_geoycoord'],\n series_ll_01['float_geoxcoord'],\n series_ll_01['float_geoycoord']])\n elif bool_cp_len == False and len(series_cp.unique()) and len(df_geo):\n list_data_cp.append([key, False, string_cp_00, None, string_ct_00,\n series_ll_00['float_geoxcoord'],\n series_ll_00['float_geoycoord'],\n None, None])\n else:\n pass\n \n #-------------------------------------------------------------------------#\n # look at the call type\n #-------------------------------------------------------------------------# \n \n if bool_ct_len and bool_geo_len:\n list_data_ct_change.append([key, True, string_ct_00, \n string_ct_01, string_cp_00,\n series_ll_00['float_geoxcoord'],\n series_ll_00['float_geoycoord'],\n series_ll_01['float_geoxcoord'],\n series_ll_01['float_geoycoord']])\n elif bool_ct_len == False and len(series_ct.unique()) and len(df_geo):\n list_data_ct.append([key, False, string_ct_00, None, string_cp_00,\n series_ll_00['float_geoxcoord'],\n series_ll_00['float_geoycoord'],\n None, None])\n else:\n pass\n \n #-------------------------------------------------------------------------#\n # look at the geo_coords\n #-------------------------------------------------------------------------#\n \n if bool_geo_len: \n list_data_geo_change.append([key, True, string_cp_00, None, \n string_ct_00,\n series_ll_00['float_geoxcoord'],\n series_ll_00['float_geoycoord'],\n series_ll_01['float_geoxcoord'],\n series_ll_01['float_geoycoord']])\n elif bool_geo_len == False and len(df_geo):\n list_data_geo.append([key, False, string_cp_00, None, string_ct_00,\n series_ll_00['float_geoxcoord'],\n series_ll_00['float_geoycoord'],\n None, None])\n else:\n pass\n\n #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#\n #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#\n #\n # set-up return dictionary\n #\n #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#\n #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$# \n\n dict_return['cp'] = pandas.DataFrame(data = list_data_cp, \n columns = list_columns)\n dict_return['cp_change'] = pandas.DataFrame(data = list_data_cp_change, \n columns = list_columns)\n dict_return['ct'] = pandas.DataFrame(data = list_data_ct, \n columns = list_columns)\n dict_return['ct_change'] = pandas.DataFrame(data = list_data_ct_change, \n columns = list_columns)\n dict_return['geo'] = pandas.DataFrame(data = list_data_geo, \n columns = list_columns)\n dict_return['geo_change'] = pandas.DataFrame(data = list_data_geo_change, \n columns = list_columns)\n\n #-------------------------------------------------------------------------#\n # variable / object cleanup\n #-------------------------------------------------------------------------#\n\n #-------------------------------------------------------------------------#\n # return value\n #-------------------------------------------------------------------------#\n\n return dict_return\n\ndef plot_one(m_df, m_string_metric, m_string_file_name):\n '''\n this method plots one metric on a picture from google maps\n \n Requirements:\n package pandas\n package matplotlib.pyplot\n package matplotlib.image\n class TextNorm\n \n Inputs:\n m_df\n Type: pandas dataframe\n Desc: dataframes of data held in a dictionary; each dataframe has a \n m_df['string_key'] -> type: string; the callkey for th record\n m_df[bool_multiple'] -> type: boolean; if the primary metric has\n multiple values; if true then both init_metric\n and end_metric will be filled\n m_df['init_metric'] -> type: string; the initial value of the metric; \n e.g. if call priority '1' or '2', etc...\n m_df['end_metric'] -> type: string; the ending value of the metric if\n it changed\n m_df['metric_02'] -> type: string; a secondary metric to aid in the \n plotting of data; e.g. call type\n m_df['start_geox'] -> type: float; the starting geo x coordinate\n m_df['start_geoy'] -> type: float; the starting geo y coordinate\n m_df['stop_geox'] -> type: float; the end geo x coordinate if the \n point changed\n m_df['stop_geoy'] -> type: float; the end geo y coordinate if the \n point changed\n \n m_string_metric\n type: string\n desc: the string to index the dataframe for the legend metric\n \n m_string_file_name\n type: string\n desc: name of the file to save; including extension\n \n Important Info:\n image will be saved in the working directory\n \n Return:\n n/a\n Type: n/a\n Desc: n/a\n '''\n \n #-------------------------------------------------------------------------#\n # objects declarations\n #-------------------------------------------------------------------------#\n\n #-------------------------------------------------------------------------#\n # time declarations\n #-------------------------------------------------------------------------#\n\n #-------------------------------------------------------------------------#\n # iteration declarations (list, set, tuple, counter, dictionary)\n #-------------------------------------------------------------------------#\n \n # [latitude, longitude] ranges of map\n image_coord_lower_left = [32.845953, -96.952832]\n image_coord_upper_right = [32.896463, -96.898045]\n \n # for the color maps\n cp_cm = TextNorm(m_df[m_string_metric])\n \n #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#\n #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#\n #\n # Start\n #\n #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#\n #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$# \n \n #-------------------------------------------------------------------------#\n # add axis for image; works; need to ensure you change the scale to the \n # scale of the image loaded; the frame and the x-ticks and y-ticks are \n # taken out\n #-------------------------------------------------------------------------#\n\n fig = pyplot.figure(figsize = (7.90, 8.68), frameon = False)\n \n # add data in scatter plot\n ax = fig.add_subplot(111, frameon = False, xticks = [], yticks = [], \n xlim = (image_coord_lower_left[1], image_coord_upper_right[1]),\n ylim = (image_coord_lower_left[0], image_coord_upper_right[0]))\n \n for color_key in cp_cm.dict_textmap:\n array_bool = m_df[m_string_metric] == color_key\n series_x = m_df['start_geox'][array_bool]\n series_y = m_df['start_geoy'][array_bool]\n # series_c = df_cp_1y['init_metric'][array_bool]\n ax.scatter(series_x, series_y, label = color_key, s = 20)\n \n #-------------------------------------------------------------------------#\n # seperate colors for each collection of points \n #-------------------------------------------------------------------------#\n \n #colormap = pyplot.get_cmap('hsv')\n colormap = pyplot.get_cmap('brg')\n #colormap = pyplot.get_cmap('rainbow')\n list_color = [colormap(x) for x in numpy.linspace(start = 0, stop = 1, \n num = len(ax.collections))]\n for t, j1 in enumerate(ax.collections):\n j1.set_color(list_color[t])\n \n #ax.scatter(df_cp_1y['start_geox'], df_cp_1y['start_geoy'],\n # c = cp_1y_cm(df_cp_1y['init_metric']),\n # cmap = 'hsv',\n # s = 20)\n \n #-------------------------------------------------------------------------#\n # add image\n #-------------------------------------------------------------------------#\n \n image_search_area = image.imread('Search_Area.PNG')\n image_ax = fig.add_axes(ax.get_position(), label = 'image', zorder = -1,\n xticks = [], yticks = [], frameon = False)\n image_mpl = image_ax.imshow(image_search_area, aspect = 'auto', \n extent = (image_coord_lower_left[1], \n image_coord_upper_right[1],\n image_coord_lower_left[0],\n image_coord_upper_right[0]))\n \n #-------------------------------------------------------------------------#\n # add legend\n #-------------------------------------------------------------------------#\n \n ax.legend(loc = 'upper right', scatterpoints = 1, ncol = 2, fontsize = 8)\n \n #image_search_area = image.imread('Search_Area.PNG')\n #image_ax = fig.add_axes(ax.get_position(), label = 'image', zorder = -1,\n # xticks = [], yticks = [])\n #fig.set_figwidth(len(image_search_area[1]) / 100)\n #fig.set_figheight(len(image_search_area[0]) / 100)\n #fig.frameon = False\n #image_mpl = image_ax.imshow(image_search_area, aspect = 'auto',\n # extent = (image_coord_lower_left[1], \n # image_coord_upper_right[1],\n # image_coord_lower_left[0],\n # image_coord_upper_right[0]),\n # zorder = -1)\n #image_mpl = image_ax.imshow(image_search_area, aspect = 'auto',\n # zorder = -1) \n \n # don't need the code below, already set\n #image_ax.set_xlim((image_coord_lower_left[1], image_coord_upper_right[1]))\n #image_ax.set_ylim((image_coord_lower_left[0], image_coord_upper_right[0]))\n \n #-------------------------------------------------------------------------#\n # save image and close figure\n #-------------------------------------------------------------------------#\n \n fig.savefig(m_string_file_name, bbox_inches = 'tight', pad_inches = 0)\n pyplot.close(fig)\n \ndef plot_bar(m_series, m_top_n, m_string_file_name, m_bool_perc = True,\n m_string_color = 'blue', m_bool_rot_ticks = False):\n '''\n this method plots one metric on a picture from google maps\n \n Requirements:\n package pandas\n package matplotlib.pyplot\n package collections.Counter\n \n Inputs:\n m_series\n Type: pandas series\n Desc: data to plot on the bar chart\n \n m_top_n\n Type: int\n Desc: top n to plot on bar graph; if None is passed plot all of the counter \n \n m_string_file_name\n type: string\n desc: name of the file to save; including extension\n \n m_bool_perc\n type: boolean\n desc: flag to plot the percentage or the counts; if True plot percentage\n if False plot counts \n \n m_string_color\n type: string\n desc: color for the bar chart \n \n m_bool_rot_ticks\n type: boolean\n desc: flag to rotate ticks \n \n Important Info:\n image will be saved in the working directory\n \n Return:\n n/a\n Type: n/a\n Desc: n/a\n '''\n \n #-------------------------------------------------------------------------#\n # objects declarations\n #-------------------------------------------------------------------------#\n\n #-------------------------------------------------------------------------#\n # time declarations\n #-------------------------------------------------------------------------#\n\n #-------------------------------------------------------------------------#\n # iteration declarations (list, set, tuple, counter, dictionary)\n #-------------------------------------------------------------------------#\n \n counter_data = Counter(m_series)\n \n #-------------------------------------------------------------------------#\n # variables declarations\n #-------------------------------------------------------------------------#\n \n if m_top_n == None or m_top_n == 0:\n m_top_n = len(counter_data)\n \n #-------------------------------------------------------------------------#\n # db connections\n #-------------------------------------------------------------------------#\n\n #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#\n #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#\n #\n # Start\n #\n #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#\n #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$# \n\n #-------------------------------------------------------------------------#\n # dataframe of data\n #-------------------------------------------------------------------------#\n \n df_plot = pandas.DataFrame(data = counter_data.most_common(m_top_n),\n columns = ['keys', 'values'])\n \n #-------------------------------------------------------------------------#\n # test for count or percentage\n #-------------------------------------------------------------------------#\n \n if m_bool_perc:\n df_plot['values'] = df_plot['values'] * 100 / sum(df_plot['values'])\n \n #-------------------------------------------------------------------------#\n # plot bar\n #-------------------------------------------------------------------------#\n \n fig, ax = pyplot.subplots(nrows = 1, ncols = 1)\n ax.bar(left = df_plot.index, \n height = df_plot['values'], \n tick_label = df_plot['keys'], \n color = m_string_color)\n \n # rotate ticks 15 degrees\n if m_bool_rot_ticks:\n for tick in ax.get_xticklabels():\n tick.set_rotation(15)\n del tick\n \n #-------------------------------------------------------------------------#\n # save plot\n #-------------------------------------------------------------------------#\n \n fig.savefig(m_string_file_name)\n pyplot.close()\n \n #-------------------------------------------------------------------------#\n # clean-up\n #-------------------------------------------------------------------------#\n \n del df_plot, counter_data, fig, ax\n\ndef plot_traffic(m_df, m_string_metric, m_string_file_name, \n m_string_load_file = None):\n '''\n this method plots one metric on a picture from google maps\n \n Requirements:\n package pandas\n package matplotlib.pyplot\n package matplotlib.image\n class TextNorm\n \n Inputs:\n m_df\n Type: pandas dataframe\n Desc: dataframes of data held in a dictionary; each dataframe has a \n m_df['string_key'] -> type: string; the key of the grouping which is\n route_id int_leg_num string_day int_hour\n m_df['string_route_id'] -> type: string; the id of the route e.g.\n 'route_01', 'route_02', etc...\n m_df['int_leg_num'] -> type: int; leg of the route\n m_df['string_day'] -> type: string; day of the week\n m_df['int_hour'] -> type: int; hour of the day 0 - 23\n m_df['float_mean'] -> type: float; mean of the traffic delay\n m_df['float_median'] -> type: float; median of the traffic delay\n m_df['float_std'] -> type: float; standard deviation of the traffic delay\n m_df['float_leg_lat_start'] -> type: float; geo y coordinate of the \n starting point of the leg\n m_df['float_leg_lng_start'] -> type: float; geo x coordinate of the \n starting point of the leg\n m_df['float_leg_lat_stop'] -> type: float; geo y coordinate of the \n end point of the leg\n m_df['float_leg_lng_stop'] -> type: float; geo x coordinate of the \n end point of the leg\n \n m_string_metric\n type: string\n desc: the string to index the dataframe for the legend metric\n \n m_string_file_name\n type: string\n desc: name of the file to save; including extension\n \n m_string_load_file\n type: string\n desc: file to load for background; if none will load default file from \n working directory; Search_Area.png\n \n \n Important Info:\n image will be saved in the working directory\n \n Return:\n n/a\n Type: n/a\n Desc: n/a\n '''\n \n #-------------------------------------------------------------------------#\n # objects declarations\n #-------------------------------------------------------------------------#\n \n counter_leg = Counter(m_df['int_leg_num'])\n df_leg_count = pandas.DataFrame(data = counter_leg.most_common(),\n columns = ['int_leg_num', 'int_count'])\n del counter_leg\n\n #-------------------------------------------------------------------------#\n # time declarations\n #-------------------------------------------------------------------------#\n\n #-------------------------------------------------------------------------#\n # iteration declarations (list, set, tuple, counter, dictionary)\n #-------------------------------------------------------------------------#\n \n # [latitude, longitude] ranges of map\n image_coord_lower_left = [32.845953, -96.952832]\n image_coord_upper_right = [32.896463, -96.898045]\n \n # for the color maps\n cp_cm = TextNorm(df_leg_count['int_leg_num'])\n \n #-------------------------------------------------------------------------#\n # variables\n #-------------------------------------------------------------------------#\n \n int_base = 300\n \n if m_string_load_file == None:\n string_file_map = 'Search_Area.PNG'\n else:\n string_file_map = m_string_load_file\n \n #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#\n #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#\n #\n # Start\n #\n #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#\n #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$# \n \n #-------------------------------------------------------------------------#\n # add axis for image; works; need to ensure you change the scale to the \n # scale of the image loaded; the frame and the x-ticks and y-ticks are \n # taken out\n #-------------------------------------------------------------------------#\n\n fig = pyplot.figure(figsize = (7.90, 8.68), frameon = False)\n \n # add data in scatter plot\n ax = fig.add_subplot(111, frameon = False, xticks = [], yticks = [], \n xlim = (image_coord_lower_left[1], image_coord_upper_right[1]),\n ylim = (image_coord_lower_left[0], image_coord_upper_right[0]))\n \n for color_key in cp_cm.dict_textmap:\n # get the right coordinates\n array_bool = m_df['int_leg_num'] == color_key\n series_x = m_df['float_leg_lng_stop'][array_bool]\n series_y = m_df['float_leg_lat_stop'][array_bool]\n x_coord = series_x.iloc[0]\n y_coord = series_y.iloc[0]\n \n # calc size\n int_count = df_leg_count[df_leg_count['int_leg_num'] == color_key]\\\n .iloc[0, 1]\n int_size = int_base * int_count\n ax.scatter(x_coord, y_coord, label = color_key, s = int_size, \n alpha = .5)\n \n #-------------------------------------------------------------------------#\n # seperate colors for each collection of points \n #-------------------------------------------------------------------------#\n \n #colormap = pyplot.get_cmap('hsv')\n colormap = pyplot.get_cmap('brg')\n #colormap = pyplot.get_cmap('rainbow')\n list_color = [colormap(x) for x in numpy.linspace(start = 0, stop = 1, \n num = len(ax.collections))]\n for t, j1 in enumerate(ax.collections):\n j1.set_color(list_color[t])\n \n #ax.scatter(df_cp_1y['start_geox'], df_cp_1y['start_geoy'],\n # c = cp_1y_cm(df_cp_1y['init_metric']),\n # cmap = 'hsv',\n # s = 20)\n \n #-------------------------------------------------------------------------#\n # add image\n #-------------------------------------------------------------------------#\n \n image_search_area = image.imread(string_file_map)\n image_ax = fig.add_axes(ax.get_position(), label = 'image', zorder = -1,\n xticks = [], yticks = [], frameon = False)\n image_ax.imshow(image_search_area, aspect = 'auto', \n extent = (image_coord_lower_left[1], \n image_coord_upper_right[1],\n image_coord_lower_left[0],\n image_coord_upper_right[0]))\n \n #-------------------------------------------------------------------------#\n # add legend\n #-------------------------------------------------------------------------#\n\n #ax.legend(loc = 'upper right', scatterpoints = 1, ncol = 2, fontsize = 8)\n \n #image_search_area = image.imread('Search_Area.PNG')\n #image_ax = fig.add_axes(ax.get_position(), label = 'image', zorder = -1,\n # xticks = [], yticks = [])\n #fig.set_figwidth(len(image_search_area[1]) / 100)\n #fig.set_figheight(len(image_search_area[0]) / 100)\n #fig.frameon = False\n #image_mpl = image_ax.imshow(image_search_area, aspect = 'auto',\n # extent = (image_coord_lower_left[1], \n # image_coord_upper_right[1],\n # image_coord_lower_left[0],\n # image_coord_upper_right[0]),\n # zorder = -1)\n #image_mpl = image_ax.imshow(image_search_area, aspect = 'auto',\n # zorder = -1) \n \n # don't need the code below, already set\n #image_ax.set_xlim((image_coord_lower_left[1], image_coord_upper_right[1]))\n #image_ax.set_ylim((image_coord_lower_left[0], image_coord_upper_right[0]))\n \n #-------------------------------------------------------------------------#\n # save image and close figure\n #-------------------------------------------------------------------------#\n \n fig.savefig(m_string_file_name, bbox_inches = 'tight', pad_inches = 0)\n pyplot.close(fig) \n \ndef plot_traffic_bar(m_series, m_string_file_name, m_bool_perc = True,\n m_string_color = 'blue', m_bool_rot_ticks = False):\n '''\n this method plots one metric on a picture from google maps\n \n Requirements:\n package pandas\n package matplotlib.pyplot\n package collections.Counter\n \n Inputs:\n m_series\n Type: pandas series\n Desc: data to plot on the bar chart \n \n m_string_file_name\n type: string\n desc: name of the file to save; including extension\n \n m_bool_perc\n type: boolean\n desc: flag to plot the percentage or the counts; if True plot percentage\n if False plot counts \n \n m_string_color\n type: string\n desc: color for the bar chart \n \n m_bool_rot_ticks\n type: boolean\n desc: flag to rotate ticks \n \n Important Info:\n 1. image will be saved in the working directory\n 2. index of series is the tick label\n \n Return:\n n/a\n Type: n/a\n Desc: n/a\n '''\n \n #-------------------------------------------------------------------------#\n # objects declarations\n #-------------------------------------------------------------------------#\n\n #-------------------------------------------------------------------------#\n # time declarations\n #-------------------------------------------------------------------------#\n\n #-------------------------------------------------------------------------#\n # iteration declarations (list, set, tuple, counter, dictionary)\n #-------------------------------------------------------------------------#\n \n \n #-------------------------------------------------------------------------#\n # variables declarations\n #-------------------------------------------------------------------------#\n \n #-------------------------------------------------------------------------#\n # db connections\n #-------------------------------------------------------------------------#\n\n #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#\n #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#\n #\n # Start\n #\n #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#\n #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$# \n \n #-------------------------------------------------------------------------#\n # test for count or percentage\n #-------------------------------------------------------------------------#\n \n if m_bool_perc:\n series_plot = m_series * 100 / sum(m_series)\n else:\n series_plot = m_series\n series_plot.sort_values(ascending = False, inplace = True)\n \n #-------------------------------------------------------------------------#\n # plot bar\n #-------------------------------------------------------------------------#\n \n fig, ax = pyplot.subplots(nrows = 1, ncols = 1)\n ax.bar(left = numpy.arange(0, len(series_plot), 1), \n height = series_plot, \n tick_label = series_plot.index, \n color = m_string_color)\n \n posit_orig = ax.get_position()\n posit_new = [posit_orig.x0, posit_orig.y0 + .4, posit_orig.width,\n posit_orig.height * 0.6]\n ax.set_position(posit_new)\n \n # rotate ticks 15 degrees\n if m_bool_rot_ticks:\n for tick in ax.get_xticklabels():\n tick.set_rotation(90)\n del tick\n \n #-------------------------------------------------------------------------#\n # save plot\n #-------------------------------------------------------------------------#\n \n fig.savefig(m_string_file_name)\n pyplot.close()\n \n #-------------------------------------------------------------------------#\n # clean-up\n #-------------------------------------------------------------------------#\n \n del series_plot, fig, ax\n \ndef small_area_plots(m_df, m_list_cp = None, m_string_title = '', \n m_string_save_name = ''):\n '''\n this method will plot the call priorities in m_list_cp; if m_list_cp is \n None the method will plot all the call priorites (columns of the dataframe)\n \n Requirements:\n package pandas\n package matplotlib.pyplot\n \n Inputs:\n m_df\n Type: pandas dataframe\n Desc: for each year (index) the counts of the call priorities 1 - 9\n m_df['2'] -> type: int; count of call priority 2 by year\n ...same for all call prioities 1 - 9\n \n m_list_cp\n Type: list\n Desc: the call priorities to plot; if none plot all\n \n m_string_title\n Type: string\n Desc: preface for the title\n \n m_string_save_name\n Type: string\n Desc: first part of the file name to save\n \n Important Info:\n None\n \n Return:\n n/a\n Type: n/a\n Desc: n/a\n ''' \n\n #-------------------------------------------------------------------------#\n # objects declarations\n #-------------------------------------------------------------------------#\n\n #-------------------------------------------------------------------------#\n # time declarations\n #-------------------------------------------------------------------------#\n\n #-------------------------------------------------------------------------#\n # iteration declarations (list, set, tuple, counter, dictionary)\n #-------------------------------------------------------------------------#\n \n if m_list_cp == None or len(m_list_cp) == 0:\n m_list_cp = list(m_df.columns)\n\n #-------------------------------------------------------------------------#\n # variables declarations\n #-------------------------------------------------------------------------#\n\n #-------------------------------------------------------------------------#\n # db connections\n #-------------------------------------------------------------------------#\n\n #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#\n #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#\n #\n # Start\n #\n #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#\n #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#\n \n for string_series in m_list_cp:\n ax_cp = m_df[string_series].plot(kind = 'bar', grid = True, \n title = m_string_title + ' ' + string_series)\n fig_cp = ax_cp.get_figure()\n fig_cp.savefig(m_string_save_name + '_' + string_series + '.png')\n pyplot.close(fig_cp)\n del ax_cp, fig_cp\n\n #-------------------------------------------------------------------------#\n # return value\n #-------------------------------------------------------------------------#\n\n pass","sub_path":"abb_crime_code/create_graphs/create_graph_methods.py","file_name":"create_graph_methods.py","file_ext":"py","file_size_in_byte":38387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"512358710","text":"import pytest\nfrom django.core import management\n\nfrom ..models import Answer, ErrorPost\n\n\n@pytest.yield_fixture(scope='function')\ndef example_data(db):\n management.call_command(\n 'loaddata',\n 'error_posts/fixtures/example_data.json',\n verbosity=0)\n yield\n Answer.objects.all().delete()\n ErrorPost.objects.all().delete()\n\n\nfrom selenium.webdriver import Firefox\n\n\n@pytest.yield_fixture(scope='session')\ndef webdriver():\n driver = Firefox()\n yield driver\n driver.quit()\n","sub_path":"error_posts/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"402798902","text":"def my_enumerate(obj):\n for i in range(len(obj)):\n yield i, obj[i]\n\n\ndef my_reduce(function, sequence, initial=0):\n return sequence[initial] + function(sequence[initial + 1:])\n\n\ndef my_accumulate(sequence):\n n_sequence = list()\n for i in range(1, len(sequence) + 1):\n n_sequence.append(my_reduce(sum, sequence[:i]))\n return n_sequence\n\n\ndef fizzbuzz(n):\n for i in range(n):\n output = \"\"\n if i % 3 == 0:\n output += (\"fizz\")\n if i % 5 == 0:\n output += (\"buzz\")\n print(output or i)\n\n\nlist_test = [\"Car\", \"Boy\", \"Block\"]\niter_list = my_enumerate(list_test)\nprint(list(iter_list))\n\nnum_test = [1, 2, 3, 4, 5]\nprint(my_reduce(sum, num_test))\n\nacc_list = [1, 2, 3, 4, 5]\nprint(my_accumulate(num_test))\n\nfizzbuzz(15)\n","sub_path":"flow_task.py","file_name":"flow_task.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"584313092","text":"#Phoebe Hughes\n#Cold Calling and Random Group Generator\n\nimport random as r\nimport sys\n\ndef coldCall(students):\n\t'''Input: list of students names\n\tOutput: random student'''\n\tshuffled= students.copy()\n\tfor i in range(len(students)):\n\t\tr.shuffle(shuffled)\n\treturn (shuffled)\n\t\n\t\ndef randomGroups(students, type, num):\n\t'''Input: list of students names\n\ttype is either \"GS\" for group size or \"NG\" for number of groups\n\tnum is either the group size or number of groups\n\tOutput: List of lists. Each group is a sub-list'''\n\tgroups=[]\n\t\n\tif type== \"GS\":\n\t\tgroupSize= num\n\t\tremainder= len(students)%groupSize\n\t\tnumGroups= (len(students)-remainder)/groupSize\n\telif type==\"NG\":\n\t\tnumGroups= num\n\t\tremainder= len(students)%numGroups\n\t\tgroupSize= (len(students)-remainder)/ numGroups\n\telse:\n\t\tprint (\"Error! Incorrect type. Choose GS for group size or NG for number of groups.\")\n\t\treturn []\n\t\n\tfor i in range(int(numGroups)):\n\t\tgroups.append([]) #adds sub list to list\n\t\n\tshuffled= students.copy()\n\tr.shuffle(shuffled)\n\t\n\tif remainder==0:\n\t\tstu= shuffled\n\telse:\n\t\tstu= shuffled[0:-remainder]\n\t\n\t\n\t#creates even groups of groupSize\n\tfor student in stu:\n\t\tadded= False\n\t\twhile not(added):\n\t\t\trandGroup= r.randint(0, numGroups-1) #chooses a random group and places in group if group is not full\n\t\t\tif len(groups[randGroup])!=groupSize: \n\t\t\t\tgroups[randGroup].append(student)\n\t\t\t\tadded=True\n\t\n\t#takes remaining students and places them into groups\n\t#because students at end are always the remainder they will always be in larger groups unless there is no remainder\n\tif remainder!=0:\n\t\tremainingStudents= shuffled[-remainder:]\n\t\tfor i in range(len(remainingStudents)):\n\t\t\tgroups[i].append(remainingStudents[i]);\n\t\n\treturn groups\n\n\t\ndef printGroups(groups):\n\tfor i in range(len(groups)):\n\t\tnum= str(i+1)\n\t\tprint (\"\\nGroup \" + num + \":\")\n\t\tfor student in groups[i]:\n\t\t\tprint (student.strip())\n\n\ndef main(args):\n\tif len(args)<2:\n\t\tprint (\"Usage: coldCall.py \")\n\t\treturn\n\t\t\n\tstudents=[\"Annabelle\",\n\t\t\t\"Briana\",\n\t\t\t\"Chante\",\n\t\t\t\"Dona\",\n\t\t\t\"Elizabeth\",\n\t\t\t\"Guadalupe\",\n\t\t\t\"Jenni\",\n\t\t\t\"Kiaira\",\n\t\t\t\"Kiley\",\n\t\t\t\"Nadine\",\n\t\t\t\"Natalie\",\n\t\t\t\"Sarah\",\n\t\t\t\"Shar\",\n\t\t\t\"Tabatha\",\n\t\t\t\"Tatiana\",\n\t\t\t\"Xiaja\",\n\t\t\t\"Veronica\",\n\t\t\t\"Devine\",\n\t\t\t\"Zaakirah\",\n\t\t\t\"Zion\"]\n\t\n\ttype= args[1]\n\t\n\tif type==\"CC\":\n\t\tshuffled= coldCall(students)\n\t\tfor stu in shuffled:\n\t\t\tprint (stu.strip())\n\t\n\telif(type==\"NG\" or type==\"GS\"):\n\t\tif (len(args) !=3):\n\t\t\tprint (\"Usage: coldCall.py \")\n\t\t\treturn\n\t\tnum= int(args[2])\n\t\tgroups= randomGroups(students, type, num)\n\t\tprintGroups(groups)\n\t\n\telse:\n\t\tprint (\"Non valid type! Choose 'CC' cold calling, 'NG' for number of groups, or 'GS' for group size.\")\n\n\t\t\t\ndef test():\n\tstudents= [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\"]\n\t\n\tprint (\"Random student: \", coldCall(students))\n\tprint (\"Another random student: \", coldCall(students))\n\t\n\tprint (\"\\n\\nRandom groups of size 3\")\n\tgroups= randomGroups(students, \"GS\", 3)\n\tprintGroups(groups)\n\t\n\tprint (\"\\n\\nFour random groups\")\n\tgroups= randomGroups(students, \"NG\", 4)\n\tprintGroups(groups)\n\nif __name__=='__main__':\n\t#test()\n main(sys.argv)\n\t\t\n\t\t\t\n\t\n","sub_path":"randomGen.py","file_name":"randomGen.py","file_ext":"py","file_size_in_byte":3291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"173421932","text":"import pickle\nimport time\n\nimport nltk\nfrom sklearn import svm\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics import classification_report\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.svm import LinearSVC\n\n\ndef train_classifier_obsolete(tweets, training_tweets, testing_tweets):\n \"\"\"\n Uses svm and naivebayes from nltk to classify tweets as positive and negative\n\n :param tweets: a list of labelled tweets in the tuple format - [(tweet, sentiment), ...]\n :param training_tweets: the partition of tweets used for training\n :param testing_tweets: the partition of tweets used for testing\n :return: correct result count of svm, nb, and both together\n \"\"\"\n\n def get_words_in_tweets(tweets_get):\n all_words = []\n for (words, sentiment) in tweets_get:\n all_words.extend(words)\n return all_words\n\n def get_word_features(wordlist_get):\n wordlist_get = nltk.FreqDist(wordlist_get)\n word_features = wordlist_get.keys()\n return word_features\n\n word_features = get_word_features(get_words_in_tweets(training_tweets))\n\n def extract_features(document):\n document_words = set(document)\n features = {}\n for word in word_features:\n features['contains(%s)' % word] = (word in document_words)\n return features\n\n training_set = nltk.classify.apply_features(extract_features, training_tweets)\n nb_classifier = nltk.NaiveBayesClassifier.train(training_set)\n\n svm_classifier = nltk.classify.SklearnClassifier(LinearSVC())\n svm_classifier.train(training_set)\n\n svm_results = []\n nb_results = []\n results = []\n\n for tweet in testing_tweets:\n test_line = tweet[0]\n tweet_words = test_line.split()\n svm_result = svm_classifier.classify(extract_features(tweet_words))\n nb_result = nb_classifier.classify(extract_features(tweet_words))\n result = tweet[1]\n svm_results.append(svm_result)\n nb_results.append(nb_result)\n results.append(result)\n # print(\"%s:%s:%s\" % (svm_result, nb_result, result))\n\n return svm_results, nb_results, results\n\n\ndef train_classifier_sklearn(training_tweets, testing_tweets, training_labels, testing_labels):\n \"\"\"\n Uses svm and naivebayes from sklearn to classify tweets as positive and negative\n\n :param training_tweets:\n :param testing_tweets:\n :param training_labels:\n :param testing_labels:\n :return:\n \"\"\"\n\n # Read the data\n train_data = training_tweets\n train_labels = training_labels\n test_data = testing_tweets\n test_labels = testing_labels\n\n # Create feature vectors\n vectorizer = TfidfVectorizer(min_df=5,\n max_df=0.8,\n sublinear_tf=True,\n use_idf=True)\n train_vectors = vectorizer.fit_transform(train_data)\n test_vectors = vectorizer.transform(test_data)\n\n # \"\"\"\n # Perform classification with SVM, kernel=rbf\n classifier_rbf = svm.SVC()\n t0 = time.time()\n classifier_rbf.fit(train_vectors, train_labels)\n t1 = time.time()\n prediction_rbf = classifier_rbf.predict(test_vectors)\n t2 = time.time()\n time_rbf_train = t1 - t0\n time_rbf_predict = t2 - t1\n # \"\"\"\n\n # Perform classification with SVM, kernel=linear\n classifier_linear = svm.SVC(kernel='linear')\n t0 = time.time()\n classifier_linear.fit(train_vectors, train_labels)\n t1 = time.time()\n prediction_linear = classifier_linear.predict(test_vectors)\n t2 = time.time()\n time_linear_train = t1 - t0\n time_linear_predict = t2 - t1\n\n # \"\"\"\n # Perform classification with SVM, kernel=linear\n classifier_liblinear = svm.LinearSVC()\n t0 = time.time()\n classifier_liblinear.fit(train_vectors, train_labels)\n t1 = time.time()\n prediction_liblinear = classifier_liblinear.predict(test_vectors)\n t2 = time.time()\n time_liblinear_train = t1 - t0\n time_liblinear_predict = t2 - t1\n # \"\"\"\n\n # Print results in a nice table\n print(\"Results for SVC(kernel=rbf)\")\n print(\"Training time: %fs; Prediction time: %fs\" % (time_rbf_train, time_rbf_predict))\n print(classification_report(test_labels, prediction_rbf))\n print(\"Results for SVC(kernel=linear)\")\n print(\"Training time: %fs; Prediction time: %fs\" % (time_linear_train, time_linear_predict))\n print(classification_report(test_labels, prediction_linear))\n print(\"Results for LinearSVC()\")\n print(\"Training time: %fs; Prediction time: %fs\" % (time_liblinear_train, time_liblinear_predict))\n print(classification_report(test_labels, prediction_liblinear))\n\n\n # Naive Bayes Classifier\n # Fit a naive bayes model to the training data.\n # This will train the model using the word counts we computer, and the existing classifications in the training set.\n # nb = MultinomialNB()\n nb = MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)\n t0 = time.time()\n nb.fit(train_vectors, train_labels)\n t1 = time.time()\n prediction_nb = nb.predict(test_vectors)\n t2 = time.time()\n time_nb_train = t1 - t0\n time_nb_predict = t2 - t1\n\n print(\"Results for Naive Bayes (MultinomialNB)\")\n print(\"Training time: %fs; Prediction time: %fs\" % (time_nb_train, time_nb_predict))\n print(classification_report(test_labels, prediction_nb))\n return vectorizer, classifier_linear, nb\n\n\ndef load_dataset_sklearn(filename):\n dataset = pickle.load(open(filename, 'rb'))\n # tweets = pickle.load(open('cruz_list_combined.p', 'rb'))\n data = []\n target = []\n\n for (tweet, sentiment) in dataset:\n data.append(tweet)\n target.append(sentiment)\n\n total_tweet_count = len(data)\n\n # print(\"Dataset count: \\t\\t%s\" % total_tweet_count)\n\n return data, target\n\n\ndef load_and_partition_dataset_sklearn(filename):\n dataset = pickle.load(open(filename, \"rb\"))\n # tweets = pickle.load(open('cruz_list_combined.p', 'rb'))\n data = []\n target = []\n for (tweet, sentiment) in dataset:\n data.append(tweet)\n target.append(sentiment)\n X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.2, random_state=1810)\n total_tweet_count = len(data)\n training_tweet_count = len(X_train)\n testing_tweet_count = len(X_test)\n print(\"Total count: \\t\\t%s\" % total_tweet_count)\n print(\"Testing count: \\t\\t%s\" % testing_tweet_count)\n print(\"Training count: \\t%s\" % training_tweet_count)\n return X_train, X_test, y_train, y_test\n\n\n\ndef load_and_label_and_dump(filename, label, dumpname=\"\"):\n dataset = pickle.load(open(filename, 'rb'))\n # tweets = pickle.load(open('cruz_list_combined.p', 'rb'))\n data = []\n resultset = []\n target = []\n\n for (tweet, sentiment) in dataset:\n data.append(tweet)\n target.append(sentiment)\n\n X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.2, random_state=1810)\n\n total_tweet_count = len(data)\n training_tweet_count = len(X_train)\n testing_tweet_count = len(X_test)\n\n for i in range(total_tweet_count):\n resultset.append((data[i], label))\n\n # print(\"Total count: \\t\\t%s\" % total_tweet_count)\n # print(\"Testing count: \\t\\t%s\" % testing_tweet_count)\n # print(\"Training count: \\t%s\" % training_tweet_count)\n\n pickle.dump(resultset, open(dumpname, \"wb\"))\n\n\ndef load_dataset_nltk(filename):\n X_train, X_test, y_train, y_test = load_and_partition_dataset_sklearn(filename)\n training_labelled_tweets = []\n for x, y in zip(X_train, y_train):\n training_labelled_tweets.append((x, y))\n testing_labelled_tweets = []\n for x, y in zip(X_test, y_test):\n testing_labelled_tweets.append((x, y))\n all_labelled_tweets = training_labelled_tweets + testing_labelled_tweets\n return all_labelled_tweets, training_labelled_tweets, testing_labelled_tweets\n\n\ndef accuracy_three(svm_results, nb_results, results, total_tweet_count):\n both_correct = 0\n svm_correct = 0\n nb_correct = 0\n\n testing_tweet_count = len(results)\n training_tweet_count = total_tweet_count - testing_tweet_count\n print\n testing_tweet_count\n\n for svm_result_a, nb_result_a, result_a in zip(svm_results, nb_results, results):\n if svm_result_a == nb_result_a and nb_result_a == result_a:\n both_correct += 1\n svm_correct += 1\n nb_correct += 1\n elif svm_result_a == result_a:\n svm_correct += 1\n elif nb_result_a == result_a:\n nb_correct += 1\n\n print(\"SVM:\\t\\t %s :: %s\" % (svm_correct, svm_correct * 1.0 / testing_tweet_count * 100.0))\n print(\"Naive Bayes: %s :: %s\" % (nb_correct, nb_correct * 1.0 / testing_tweet_count * 100.0))\n print(\"Both:\\t\\t %s :: %s\" % (both_correct, both_correct * 1.0 / testing_tweet_count * 100.0))\n\n\ndef predict_and_dump_list(training_tweets, testing_tweets, training_labels, testing_labels):\n \"\"\"\n Uses svm and naivebayes from sklearn to classify tweets as positive and negative\n\n :param training_tweets:\n :param testing_tweets:\n :param training_labels:\n :param testing_labels:\n :return:\n \"\"\"\n\n # Read the data\n train_data = training_tweets\n train_labels = training_labels\n test_data = testing_tweets\n test_labels = testing_labels\n\n # Create feature vectors\n vectorizer = TfidfVectorizer(min_df=5,\n max_df=0.8,\n sublinear_tf=True,\n use_idf=True)\n train_vectors = vectorizer.fit_transform(train_data)\n test_vectors = vectorizer.transform(test_data)\n\n # \"\"\"\n # Perform classification with SVM, kernel=rbf\n classifier_rbf = svm.SVC()\n t0 = time.time()\n classifier_rbf.fit(train_vectors, train_labels)\n t1 = time.time()\n prediction_rbf = classifier_rbf.predict(test_vectors)\n t2 = time.time()\n time_rbf_train = t1 - t0\n time_rbf_predict = t2 - t1\n # \"\"\"\n\n # Perform classification with SVM, kernel=linear\n classifier_linear = svm.SVC(kernel='linear')\n t0 = time.time()\n classifier_linear.fit(train_vectors, train_labels)\n t1 = time.time()\n prediction_linear = classifier_linear.predict(test_vectors)\n t2 = time.time()\n time_linear_train = t1 - t0\n time_linear_predict = t2 - t1\n\n # \"\"\"\n # Perform classification with SVM, kernel=linear\n classifier_liblinear = svm.LinearSVC()\n t0 = time.time()\n classifier_liblinear.fit(train_vectors, train_labels)\n t1 = time.time()\n prediction_liblinear = classifier_liblinear.predict(test_vectors)\n t2 = time.time()\n time_liblinear_train = t1 - t0\n time_liblinear_predict = t2 - t1\n # \"\"\"\n\n # Print results in a nice table\n # print(\"Results for SVC(kernel=rbf)\")\n # print(\"Training time: %fs; Prediction time: %fs\" % (time_rbf_train, time_rbf_predict))\n # print(classification_report(test_labels, prediction_rbf))\n print(\"Results for SVC(kernel=linear)\")\n print(\"Training time: %fs; Prediction time: %fs\" % (time_linear_train, time_linear_predict))\n print(classification_report(test_labels, prediction_linear))\n # print(\"Results for LinearSVC()\")\n # print(\"Training time: %fs; Prediction time: %fs\" % (time_liblinear_train, time_liblinear_predict))\n # print(classification_report(test_labels, prediction_liblinear))\n\n\n # Naive Bayes Classifier\n # Fit a naive bayes model to the training data.\n # This will train the model using the word counts we computer, and the existing classifications in the training set.\n # nb = MultinomialNB()\n nb = MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)\n t0 = time.time()\n nb.fit(train_vectors, train_labels)\n t1 = time.time()\n prediction_nb = nb.predict(test_vectors)\n t2 = time.time()\n time_nb_train = t1 - t0\n time_nb_predict = t2 - t1\n\n print(\"Results for Naive Bayes (MultinomialNB)\")\n print(\"Training time: %fs; Prediction time: %fs\" % (time_nb_train, time_nb_predict))\n print(classification_report(test_labels, prediction_nb))\n\n\ndef sklearn_train_and_dump(training_tweets, training_labels):\n \"\"\"\n Uses svm and naivebayes from sklearn to classify tweets as positive and negative\n\n :param training_tweets:\n :param testing_tweets:\n :param training_labels:\n :param testing_labels:\n :return:\n \"\"\"\n\n # Read the data\n train_data = training_tweets\n train_labels = training_labels\n\n # Create feature vectors\n vectorizer = TfidfVectorizer(min_df=5,\n max_df=0.8,\n sublinear_tf=True,\n use_idf=True)\n train_vectors = vectorizer.fit_transform(train_data)\n # test_vectors = vectorizer.transform(test_data)\n\n # Perform classification with SVM, kernel=linear\n classifier_linear = svm.SVC(kernel='linear')\n\n # Naive Bayes Classifier\n # Fit a naive bayes model to the training data.\n # This will train the model using the word counts we computer, and the existing classifications in the training set.\n nb = MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)\n pickle.dump(nb, open(\"sklearn_nb.p\", \"w\"))\n pickle.dump(classifier_linear, open(\"sklearn_svm.p\", \"w\"))\n\n\ndef sklearndata_to_ntlkdata(data, target):\n length = len(data)\n result = []\n for i in range(length):\n result.append((data[i], target[i]))\n return result\n\n\ndef sklearn_load_and_test(testing_tweets, testing_labels):\n # Read the data\n test_data = testing_tweets\n test_labels = testing_labels\n\n # Create feature vectors\n vectorizer = TfidfVectorizer(min_df=5,\n max_df=0.8,\n sublinear_tf=True,\n use_idf=True)\n test_vectors = vectorizer.transform(test_data)\n\n # Perform classification with SVM, kernel=linear\n classifier_linear = pickle.load(open(\"sklearn_svm.p\", \"rb\"))\n prediction_linear = classifier_linear.predict(test_vectors)\n\n # Naive Bayes Classifier\n # Fit a naive bayes model to the training data.\n # This will train the model using the word counts we computer, and the existing classifications in the training set.\n nb = pickle.load(open(\"sklearn_nb.p\", \"w\"))\n prediction_nb = nb.predict(test_vectors)\n\n print(\"Results for SVC(kernel=linear)\")\n print(classification_report(test_labels, prediction_linear))\n\n print(\"Results for Naive Bayes (MultinomialNB)\")\n print(classification_report(test_labels, prediction_nb))\n\n\ndef generate_word_list(data, label):\n length = len(data)\n positive_words = []\n negative_words = []\n for i in range(length):\n if label[i] == \"positive\":\n print(data[i].upper())\n positive_words.append(data[i].split())\n else:\n print(data[i].lower())\n negative_words.append(data[i].split())\n\n\ndef load_dataset_multi_cat(filename1, filename2):\n data1, target1 = load_dataset_sklearn(filename1)\n data2, target2 = load_dataset_sklearn(filename2)\n for i in range(len(data1)):\n target1[i] = \"p1-\" + target1[i]\n for i in range(len(data2)):\n target2[i] = \"p2-\" + target2[i]\n return data1 + data2, target1 + target2\n\n\ndef load_partition_multi_cat(filename1, filename2):\n return train_test_split(load_dataset_multi_cat(filename1, filename2))\n\n\ndef combine(file1, file2, result_file):\n result = []\n data1, target1 = load_dataset_sklearn(file1)\n data2, target2 = load_dataset_sklearn(file2)\n\n for i, z in zip(data1, target1):\n result.append((i, z))\n\n for i, z in zip(data2, target2):\n result.append((i, z))\n\n pickle.dump(result, open(result_file, \"wb\"))\n\n\nif __name__ == \"__main__\":\n # filename = 'cruz_list_combined.p'\n # filename = 'tedCruz_43k_7th_vader.p'\n # filename = 'trump_43k_9th_vader.p'\n # filename = 'trump_list_combined.p'\n\n # train_filename = input(\"Training file:\")\n # test_filename = input(\"Testing file:\")\n #\n # print(\"******************** Training dataset **********************\")\n # tweets_train, labels_train = load_dataset_sklearn(train_filename)\n # print(\"**************** End of Training dataset *******************\\n\\n\")\n #\n # print(\"********************* Testing dataset **********************\")\n # tweets_test, labels_test = load_dataset_sklearn(test_filename)\n # print(\"***************** End of Testing dataset *******************\\n\\n\")\n #\n # nltk_training = sklearndata_to_ntlkdata(tweets_train, labels_train)\n # nltk_testing = sklearndata_to_ntlkdata(tweets_test, labels_test)\n # nltk_all = nltk_training + nltk_testing\n\n # train_classifier_sklearn(tweets_train, tweets_test, labels_train, labels_test)\n ### all_tweets, labelled_training_tweets, labelled_testing_tweets = load_dataset_nltk(train_filename)\n # svm_result, nb_result, result = train_classifier_obsolete(nltk_all, nltk_training, nltk_testing)\n # accuracy_three(svm_result, nb_result, result, len(nltk_all))\n datasets_home = \"../Datasets/\"\n\n hillary_file = \"hillary-agg-17-3-0.8.p\"\n # trump_file = \"trump_list_combined.p\"\n trump_file = \"trump-agg.p\"\n hillary_entity_file = \"hillary-entity.p\"\n trump_entity_file = \"trump-entity.p\"\n combined_entity_file = \"combined-entity.p\"\n\n # print(\"test!\")\n # hillary_train, hillary_test, hillary_train_labels, hillary_test_labels = load_and_partition_dataset_sklearn(\n # \"list.p\")\n # hillary_vectorizer, hillary_svm, hillary_nb = train_classifier_sklearn(hillary_train, hillary_test,\n # hillary_train_labels, hillary_test_labels)\n\n print(\"Hillary!\")\n hillary_train, hillary_test, hillary_train_labels, hillary_test_labels = load_and_partition_dataset_sklearn(\n datasets_home + hillary_file)\n hillary_vectorizer, hillary_svm, hillary_nb = train_classifier_sklearn(hillary_train, hillary_test,\n hillary_train_labels, hillary_test_labels)\n\n print(\"Trump!\")\n trump_train, trump_test, trump_train_labels, trump_test_labels = load_and_partition_dataset_sklearn(datasets_home + trump_file)\n trump_vectorizer, trump_svm, trump_nb = train_classifier_sklearn(trump_train, trump_test, trump_train_labels,\n trump_test_labels)\n\n load_and_label_and_dump(datasets_home + hillary_file, \"hillary\", hillary_entity_file)\n load_and_label_and_dump(datasets_home + trump_file, \"trump\", trump_entity_file)\n\n combine(hillary_entity_file, trump_entity_file, combined_entity_file)\n\n print(\"Entity!\")\n entity_train, entity_test, e_train_labels, e_test_labels = load_and_partition_dataset_sklearn(combined_entity_file)\n entity_vectorizer, entity_svm, entity_nb = train_classifier_sklearn(entity_train, entity_test, e_train_labels,\n e_test_labels)\n\n pickle.dump([entity_vectorizer, entity_svm, entity_nb], open(\"classifiers/entity.p\", \"wb\"))\n pickle.dump([hillary_vectorizer, hillary_svm, hillary_nb], open(\"classifiers/hillary.p\", \"wb\"))\n pickle.dump([trump_vectorizer, trump_svm, trump_nb], open(\"classifiers/trump.p\", \"wb\"))\n","sub_path":"twitter/train_crassify_all.py","file_name":"train_crassify_all.py","file_ext":"py","file_size_in_byte":19417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"137037234","text":"from gene_shapes import Triangle\nfrom gene_shapes import OpenTriangle\n\nimport matplotlib.patches as patches\nfrom matplotlib.path import Path\nfrom matplotlib.text import Text\n\ndef draw_region(\n seq,\n start=None,\n end=None,\n intron_threshold=1000,\n exon=Triangle(width=1),\n intron=OpenTriangle(width=0.5, y_offset=0.5),\n other_shapes=dict(),\n names_to_print=dict(),\n ):\n \"\"\"\n\n Keyword arguments:\n names_to_print -- dict.\n \"\"\"\n if start is None:\n start = 0\n if end is None:\n end = len(seq)\n feature_patches = list()\n text_patches = list()\n for feature in seq[start:end].features:\n if feature.id in names_to_print:\n if 's' not in names_to_print[feature.id]:\n names_to_print[feature.id]['s'] = feature.id\n start = feature.location.start\n end = feature.location.end\n names_to_print[feature.id]['x'] = start + 0.5 * (end - start)\n text_patches.append(names_to_print[feature.id])\n if feature.type == 'CDS' and exon is not None:\n exons = list()\n introns = list()\n reverse = feature.strand == -1\n parts = sorted(feature.location.parts, key=lambda f: min(f.start, f.end), reverse=reverse)\n for i in range(len(parts)):\n strand = parts[i].strand\n # Draw intron if not the last exon\n if i > 0 and intron is not None:\n if strand in {None, 0, 1}:\n strand = 1\n start = parts[i - 1].end\n distance = (parts[i].start - parts[i - 1].end)\n else:\n strand = -1\n start = parts[i - 1].start\n distance = (parts[i].end - parts[i - 1].start)\n if abs(distance) >= intron_threshold:\n incl_intron = True\n verts, codes = intron(\n start,\n 0.,\n distance\n )\n introns.append([start, distance])\n p = Path(verts, codes)\n feature_patches.append(\n patches.PathPatch(\n p,\n **intron.properties\n )\n )\n else:\n incl_intron = False\n\n # Now draw the exon\n start = parts[i].start\n if strand in {None, 0, 1}:\n strand = 1\n start = parts[i].start\n else:\n strand = -1\n start = parts[i].end\n distance = (parts[i].end - parts[i].start) * strand\n\n if len(exons) == 0 or incl_intron:\n exons.append([start, distance])\n else: # Join two exons\n exons[-1][1] += distance\n\n for e in exons:\n verts, codes = exon(e[0], 0., e[1])\n p = Path(verts, codes)\n feature_patches.append(\n patches.PathPatch(\n p,\n **exon.properties\n )\n )\n\n elif feature.type in other_shapes:\n part = feature.location\n strand = part.strand\n if strand in {None, 0, 1}:\n strand = 1\n start = part.start\n else:\n strand = -1\n start = part.end\n distance = (part.end - part.start) * strand\n\n verts, codes = other_shapes[feature.type](start, 0., distance)\n p = Path(verts, codes)\n feature_patches.append(\n patches.PathPatch(\n p,\n **other_shapes[feature.type].properties\n )\n )\n\n return feature_patches, text_patches\n","sub_path":"lib/draw_wrappers.py","file_name":"draw_wrappers.py","file_ext":"py","file_size_in_byte":4173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"209657823","text":"from symspellpy import SymSpell\r\nfrom json import load\r\nfrom re import sub, findall\r\n\r\ntranslation = {281: 'e', 243: 'o', 261: 'a', 347: 's', 322: 'l', 380: 'z', 378: 'z', 263: 'c', 324: 'n'}\r\nperson = 'Bartek Paulewicz'\r\n\r\nsym_spell = SymSpell()\r\nsym_spell.load_dictionary('wordFrequencies.txt', 0, 1, encoding='utf-8')\r\nwordSeg = sym_spell.word_segmentation\r\n\r\nwith open(\"marcin.json\", \"r\", encoding='latin1') as raw, open(\"both.txt\", \"w\", encoding='utf-8') as out:\r\n obj = load(raw)\r\n prev = obj['messages'][-1]['sender_name']\r\n for i, msg in enumerate(reversed(obj['messages'])):\r\n if i % 100 == 0: print(i)\r\n content = msg\\\r\n .get('content', '')\\\r\n .encode('latin1')\\\r\n .decode('utf8')\\\r\n .lower()\r\n if (not content.startswith('http')) \\\r\n and ('wysłał' not in content) \\\r\n and (not content.endswith('kolory czatu.')\r\n ):\r\n content = sub('[^a-z ]', ' ', content.translate(translation)).strip()\r\n if content:\r\n repeated = findall(r'([eyuoam])\\1+', content)\r\n if repeated:\r\n for group in repeated:\r\n content = sub(f'{group}+', group, content)\r\n content = ' '.join([word\r\n if word in sym_spell.words\r\n else wordSeg(word).corrected_string\r\n for word in content.split()\r\n ])\r\n sender = msg['sender_name']\r\n if (sender != person and prev == person) or (sender == person and prev != person):\r\n out.write('\\n')\r\n prev = sender\r\n out.write(content+' ')\r\n #print(msg.get('content', ''), '|', content)","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"352144840","text":"from selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.remote.webelement import WebElement\n\nfrom places.models import Place\nfrom tests.functional_tests.base import FunctionalTest\n\n\nclass TestWorkflow(FunctionalTest):\n def setUp(self):\n super().setUp()\n self.set_up_anonymous()\n self.browser.get(self.live_server_url)\n\n def test_set_up(self):\n self.assertEqual(4, Place.objects.all().count())\n\n def test_login_not_possible(self):\n self.do_logon()\n self.assertFalse(self.check_if_logged_in())\n\n def test_cannot_change_place(self):\n self.browser.get(self.live_server_url + '/places')\n detail_button = self.get_detail_block('id_place_list')\n detail_button.click()\n with self.assertRaises(NoSuchElementException):\n self.wait_for_find_element_by_id('id_detail_action_update_place')\n\n def test_start_site(self):\n self.browser.get(self.live_server_url)\n self.assertIn('HOST THE WAY', self.browser.title)\n\n def test_start_site_index(self):\n self.browser.get(self.live_server_url + '/places')\n self.wait_for_find_element_by_id('id_place_list')\n self.assertTrue(self.browser.find_element_by_id(f'id_place_card_{self.last_place_id}'))\n self.assertTrue(self.browser.find_element_by_id(f'id_place_card_{self.last_place_id - 1}'))\n self.assertTrue(self.browser.find_element_by_id(f'id_place_card_{self.last_place_id - 2}'))\n self.assertTrue(self.browser.find_element_by_id(f'id_place_card_{self.last_place_id - 3}'))\n self.assertIn('HOST THE WAY', self.browser.title)\n\n def test_show_detail(self):\n self.browser.get(self.live_server_url + '/places')\n card = self.get_detail_block('id_place_list')\n card.click()\n self.wait_for_find_element_by_id('id_place_detail_bar')\n self.assertTrue(self.browser.find_element_by_id('id_place_detail_info'))\n self.assertTrue(self.browser.find_element_by_id('id_book_place'))\n\n def test_add_book_request(self):\n self.browser.get(self.live_server_url + '/places')\n card = self.get_detail_block('id_place_list')\n card.click()\n self.wait_for_find_element_by_id('id_place_detail_bar')\n book = self.browser.find_element_by_id('id_book_place')\n self.assertIsInstance(book, WebElement)\n book.click()\n self.wait_for_find_element_by_id('id_login_form') # currently only for logged in users\n","sub_path":"tests/functional_tests/test_anonymous.py","file_name":"test_anonymous.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"593583255","text":"import torch\nimport torch.nn as nn\n\n\ndef get_device():\n if torch.cuda.is_available():\n return torch.device(\"cuda\")\n else:\n return torch.device(\"cpu\")\n\n\ndef weight_init(m):\n classname = m.__class__.__name__\n if classname.find(\"Conv\") != -1:\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find(\"BatchNorm\") != -1:\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0)\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"190224334","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 17 00:06:38 2018\n\n@author: Karn\n\"\"\"\n#1==8 \n#0==5\nt=int(input())\nwhile(t>0):\n t-=1\n n=int(input())\n n=n+1\n s=bin(n)\n s=s[3:]\n ans=\"\"\n for i in s:\n if (i=='1'):\n ans=ans+'8'\n elif(i=='0'):\n ans=ans+'5'\n \n print(ans)\n","sub_path":"Codechef/4 externalContests/Hackon Feb/STRLU.py","file_name":"STRLU.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"255976472","text":"from setuptools import setup\nimport time\n\nwith open('requirements.txt', 'r') as f:\n requirements = [l.strip().strip('\\n') for l in f.readlines() if l.strip() and not l.strip().startswith('#')]\n\nsetup(name=\"ebooktoc\",\n version=int(time.time()),\n author=\"cpg314\",\n license=\"MIT\",\n packages=[\"commandline\", \"document\", \"processor\", \"segmenterocr\", \"tests\", \"utils\"],\n entry_points={\"console_scripts\": \"ebooktoc={0}.{0}:main\".format(\"commandline\")},\n install_requires=requirements,\n zip_safe=False)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"277604768","text":"#! python3\nfrom tkinter import *\nfrom tkinter import ttk\n\nimport os\nimport random\n\nfrom common import ExperimentFrame, InstructionsFrame\nfrom gui import GUI\n\nfrom constants import BONUS\n\n\n################################################################################\n# TEXTS\nquestintro = \"\"\"\nV následující části studie odpovíte na několik otázek o sobě, vašich postojích a názorech. Tato část by měla trvat asi 10-15 minut.\n\nZa účelem ověření, zda otázkám věnujete pozornost, je mezi otázkami umístěno i několik kontrolních otázek. Pokud odpovíte na všechny kontrolní otázky správně, můžete získat dalších {} Kč.\n\nKaždou otázku si proto pečlivě přečtěte. Snažte se však na otázky nemyslet příliš dlouho; první odpověď, která vám přijde na mysl, je obvykle nejlepší.\n\"\"\".format(BONUS)\n\nhexacoinstructions = \"\"\"Na následujících stránkách najdete řadu prohlášení o vaší osobě.\n\nPřečtěte si prosím každé prohlášení a rozhodněte se, do jaké míry s ním souhlasíte, nebo nesouhlasíte.\n\"\"\"\n\nattentiontext = \"Chcete-li prokázat, že zadání věnujete pozornost, vyberte možnost \"\n\n\n################################################################################\n\n\n\nclass Quest(ExperimentFrame):\n def __init__(self, root, perpage, file, name, left, right, options = 5, shuffle = True,\n instructions = \"\", height = 3, width = 80, center = False, checks = 0):\n super().__init__(root)\n\n self.perpage = perpage\n self.left = left\n self.right = right\n self.options = options\n self.checks = checks != 0\n self.name = name\n\n self.file.write(\"{}\\n\".format(name))\n\n if instructions:\n self.instructions = Text(self, height = height, relief = \"flat\", width = width,\n font = \"helvetica 16\", wrap = \"word\")\n self.instructions.grid(row = 1, column = 0, columnspan = 3)\n self.instructions.insert(\"1.0\", instructions, \"text\")\n if center:\n self.instructions.tag_config(\"text\", justify = \"center\") \n self.instructions[\"state\"] = \"disabled\"\n\n self.questions = []\n with open(os.path.join(\"Stuff\", file)) as f:\n for line in f:\n self.questions.append(line.strip())\n\n if shuffle:\n random.shuffle(self.questions)\n\n if checks:\n spread = len(self.questions)//checks\n positions = [random.randint(self.perpage//2 + spread*i, spread*(i+1) - self.perpage//2) for \\\n i in range(checks)]\n for i in range(checks):\n self.questions.insert(positions[i], attentiontext + str(random.randint(1, options)) + \".\")\n\n ttk.Style().configure(\"TButton\", font = \"helvetica 15\")\n self.next = ttk.Button(self, text = \"Pokračovat\", command = self.nextFun,\n state = \"disabled\")\n self.next.grid(row = self.perpage*2 + 4, column = 1)\n\n self.rowconfigure(0, weight = 1)\n self.rowconfigure(1, weight = 2)\n self.rowconfigure(self.perpage*2 + 4, weight = 1)\n self.rowconfigure(self.perpage*2 + 5, weight = 3)\n self.columnconfigure(0, weight = 1)\n self.columnconfigure(2, weight = 1)\n\n self.mnumber = 0\n \n self.createQuestions()\n\n\n def createQuestions(self):\n self.measures = []\n for i in range(self.perpage):\n m = Likert(self, self.questions[self.mnumber], shortText = str(self.mnumber + 1),\n left = self.left, right = self.right, options = self.options)\n m.grid(column = 0, columnspan = 3, row = i*2 + 3)\n self.rowconfigure(i*2 + 4, weight = 1)\n self.mnumber += 1\n self.measures.append(m)\n if self.mnumber == len(self.questions):\n break\n\n\n def nextFun(self):\n for measure in self.measures:\n measure.write()\n measure.grid_forget()\n if self.mnumber == len(self.questions):\n self.file.write(\"\\n\")\n if self.checks:\n self.file.write(\"Attention checks\\n\")\n wrong_checks = str(self.root.texts[\"attention_checks\"])\n self.file.write(self.id + \"\\t\" + self.name + \"\\t\" + wrong_checks + \"\\n\\n\")\n self.destroy()\n self.root.nextFrame()\n else:\n self.next[\"state\"] = \"disabled\"\n self.createQuestions()\n\n\n def check(self):\n for m in self.measures:\n if not m.answer.get():\n return\n else:\n self.next[\"state\"] = \"!disabled\"\n\n\n\nclass Likert(Canvas):\n def __init__(self, root, text, options = 5, shortText = \"\",\n left = \"strongly disagree\", right = \"strongly agree\"):\n super().__init__(root)\n\n self.root = root\n self.text = text\n self.short = shortText\n self.answer = StringVar()\n self[\"background\"] = \"white\"\n self[\"highlightbackground\"] = \"white\"\n self[\"highlightcolor\"] = \"white\"\n\n ttk.Style().configure(\"TRadiobutton\", background = \"white\", font = \"helvetica 13\")\n\n self.question = ttk.Label(self, text = text, background = \"white\",\n anchor = \"center\", font = \"helvetica 14\")\n self.question.grid(column = 0, row = 0, columnspan = options + 2, sticky = S)\n\n self.left = ttk.Label(self, text = left, background = \"white\",\n font = \"helvetica 13\")\n self.right = ttk.Label(self, text = right, background = \"white\",\n font = \"helvetica 13\")\n self.left.grid(column = 0, row = 1, sticky = E, padx = 5)\n self.right.grid(column = options + 1, row = 1, sticky = W, padx = 5) \n\n for value in range(1, options + 1):\n ttk.Radiobutton(self, text = str(value), value = value, variable = self.answer,\n command = self.check).grid(row = 1, column = value, padx = 4)\n\n self.columnconfigure(0, weight = 1)\n self.columnconfigure(options + 1, weight = 1)\n self.rowconfigure(0, weight = 1)\n\n\n def write(self):\n if attentiontext in self.text:\n if not \"attention_checks\" in self.root.root.texts:\n self.root.root.texts[\"attention_checks\"] = 0\n if self.answer.get() != self.text[-2]:\n self.root.root.texts[\"attention_checks\"] += 1\n else:\n ans = \"{}\\t{}\\t{}\\n\".format(self.short, self.answer.get(), self.text.replace(\"\\t\", \" \"))\n self.root.file.write(self.root.id + \"\\t\" + ans)\n\n\n def check(self):\n self.root.check()\n\n\n\n\n\nclass Hexaco(Quest):\n def __init__(self, root):\n super().__init__(root, 9, \"hexaco.txt\", \"Hexaco\", instructions = hexacoinstructions, width = 85,\n left = \"silně nesouhlasím\", right = \"silně souhlasím\", checks = 3,\n height = 3, options = 5, center = True)\n\n\n\nQuestInstructions = (InstructionsFrame, {\"text\": questintro, \"height\": 15})\n\n\nif __name__ == \"__main__\":\n os.chdir(os.path.dirname(os.getcwd()))\n GUI([QuestInstructions,\n Hexaco\n ])\n","sub_path":"Stuff/quest.py","file_name":"quest.py","file_ext":"py","file_size_in_byte":7289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"179935086","text":"import argparse\nimport os\nimport shutil\nfrom pathlib import Path\n\n\ndef copytree(src, dst, symlinks=False, ignore=None):\n for item in os.listdir(src):\n s = os.path.join(src, item)\n d = os.path.join(dst, item)\n if os.path.isdir(s):\n shutil.copytree(s, d, symlinks, ignore)\n else:\n shutil.copy2(s, d)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--frames_dir', required=True, type=Path,\n help='Root directory containing all frames.')\n parser.add_argument('--validation_set_file', required=True, type=Path,\n help='File that contains videos, one per line')\n parser.add_argument('--destination_val_set', required=True, type=Path, help='Destination of the validation set')\n\n args = parser.parse_args()\n\n validation_set = args.validation_set_file\n frames_dir = args.frames_dir\n destination_val_set = args.destination_val_set\n\n validation_videos = open(validation_set).read().splitlines()\n\n os.makedirs(destination_val_set, exist_ok=True)\n\n for target_class in os.listdir(frames_dir):\n target_class_dir = os.path.join(frames_dir, target_class)\n for video_id in os.listdir(target_class_dir):\n if video_id in validation_videos:\n source_video_dir = os.path.join(target_class_dir, video_id)\n dst_dir = os.path.join(destination_val_set, target_class, video_id)\n os.makedirs(dst_dir, exist_ok=True)\n copytree(source_video_dir, dst_dir)\n","sub_path":"extract_validation_set.py","file_name":"extract_validation_set.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"613252334","text":"def _mimport(name, level=1):\n try:\n return __import__(name, globals(), level=level)\n except:\n return __import__(name, globals())\n\ndef load(gbls):\n def loadmod(name,gbls):\n mod=_mimport(name)\n for key in mod.__dict__:\n if not key.startswith('_'):\n gbls[key]=mod.__dict__[key]\n\n for mod in ('apd','mdsarray','compound','mdsdata','ident','treenode','mdsscalar',\n 'tree','mdsdevice','event','_tdishr','scope','_mdsshr','_tdishr',\n '_treeshr','tdipy','_descriptor','connection','mdsExceptions','mdsdcl'):\n loadmod(mod,gbls)\n","sub_path":"mdsobjects/python/_loadglobals.py","file_name":"_loadglobals.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"174952354","text":"import unittest\nimport dummygraph\nfrom offsetbasedgraph import Interval, Position, IntervalCollection, Graph, Block\n\n\nclass TestInterval(unittest.TestCase):\n\n def testSimpelInterval(self):\n region_paths = [1, 3, 4]\n interval = Interval(Position(1, 10), Position(4, 10), region_paths)\n\n for r in region_paths:\n self.assertTrue(\n r in interval.region_paths,\n \"The region path %d is not in interval's region paths\")\n\n self.assertEqual(len(interval.region_paths), 3,\n \"Interval should have 3 region paths\")\n\n def test_interval_length(self):\n graph = dummygraph.get_simple_graph()\n interval = Interval(Position(1, 5),\n Position(4, 10),\n [1, 2, 4],\n graph=graph)\n true_length = 5 + 20 + 10\n self.assertEqual(interval.length(), true_length)\n\n def test_get_position_from_offset(self):\n graph = dummygraph.get_simple_graph()\n interval = Interval(Position(1, 5),\n Position(4, 10),\n [1, 2, 4],\n graph=graph)\n offsets = [3, 23, 33]\n positions = [Position(1, 8),\n Position(2, 18),\n Position(4, 8)]\n\n for offset, position in zip(offsets, positions):\n self.assertEqual(interval.get_position_from_offset(offset),\n position)\n\n def _test_split(self):\n graph = dummygraph.get_simple_graph()\n interval = Interval(Position(1, 5),\n Position(4, 10),\n [1, 2, 4],\n graph=graph)\n splits = interval.split([2, 7, 27])\n true_intervals = [\n Interval(Position(1, 5), Position(1, 7)),\n Interval(Position(1, 7), Position(2, 2), [1, 2]),\n Interval(Position(2, 2), Position(4, 2), [2, 4]),\n Interval(Position(4, 2), Position(4, 10))\n ]\n self.assertEqual(splits, true_intervals)\n\n def _test_join(self):\n graph = dummygraph.get_simple_graph()\n interval = Interval(Position(1, 5),\n Position(4, 10),\n [1, 2, 4],\n graph=graph)\n splits = interval.split([7])\n self.assertEqual(splits[0].join(splits[1]),\n interval)\n\n def test_hash(self):\n interval1 = Interval(5, 10, [1, 2, 3, 4])\n interval2 = Interval(5, 10, [1, 2, 3, 4])\n interval_different = Interval(4, 10, [1, 2, 3, 4])\n interval_different2 = Interval(5, 11, [1, 2, 3, 4])\n interval_different3 = Interval(5, 10, [1, 2, 3, 5])\n\n interval1_minus = Interval(5, 10, [1, 2, 3, 4], direction=-1)\n\n self.assertEqual(interval2.hash(), interval2.hash())\n self.assertTrue(interval1.hash() != interval_different.hash())\n self.assertTrue(interval1.hash() != interval_different2.hash())\n self.assertTrue(interval1.hash() != interval_different3.hash())\n self.assertTrue(interval1.hash() != interval1_minus.hash())\n\n def test_position_at_offset(self):\n graph = Graph(\n {\n 1: Block(10),\n 2: Block(10),\n 3: Block(10)\n },\n {\n 1: [2],\n 2: [3]\n }\n )\n interval = Interval(4, 6, [1, 2, 3], graph)\n\n self.assertEqual(interval.position_at_offset(0), Position(1, 4))\n self.assertEqual(interval.position_at_offset(1), Position(1, 5))\n self.assertEqual(interval.position_at_offset(2), Position(1, 6))\n self.assertEqual(interval.position_at_offset(5), Position(1, 9))\n self.assertEqual(interval.position_at_offset(6), Position(2, 0))\n self.assertEqual(interval.position_at_offset(7), Position(2, 1))\n self.assertEqual(interval.position_at_offset(16), Position(3, 0))\n self.assertEqual(interval.position_at_offset(21), Position(3, 5))\n\n def test_get_subinterval(self):\n graph = Graph(\n {\n 1: Block(10),\n 2: Block(10),\n 3: Block(10)\n },\n {\n 1: [2],\n 2: [3]\n }\n )\n interval = Interval(4, 6, [1, 2, 3], graph)\n self.assertEqual(interval.get_subinterval(0, 1), Interval(4, 5, [1]))\n self.assertEqual(interval.get_subinterval(0, 10), Interval(4, 4, [1, 2]))\n self.assertEqual(interval.get_subinterval(0, 11), Interval(4, 5, [1, 2]))\n self.assertEqual(interval.get_subinterval(1, 20), Interval(5, 4, [1, 2, 3]))\n self.assertEqual(interval.get_subinterval(10, 20), Interval(4, 4, [2, 3]))\n self.assertEqual(interval.get_subinterval(10, 16), Interval(4, 10, [2]))\n\n def test_overlap(self):\n graph = Graph(\n {\n 1: Block(10),\n 2: Block(10),\n 3: Block(10)\n },\n {\n 1: [2],\n 2: [3],\n 3: [1]\n }\n )\n\n interval1 = Interval(0, 10, [1, 2], graph)\n interval2 = Interval(0, 10, [1, 2], graph)\n self.assertEqual(interval1.overlap(interval2), 20)\n\n interval1 = Interval(5, 10, [1, 2], graph)\n interval2 = Interval(0, 10, [1, 2], graph)\n self.assertEqual(interval1.overlap(interval2), 15)\n\n interval1 = Interval(5, 7, [1], graph)\n interval2 = Interval(0, 10, [1, 2], graph)\n self.assertEqual(interval1.overlap(interval2), 2)\n\n interval1 = Interval(5, 7, [1], graph)\n interval2 = Interval(6, 7, [1], graph)\n self.assertEqual(interval1.overlap(interval2), 1)\n\n interval1 = Interval(8, 2, [1, 2, 3, 1], graph)\n interval2 = Interval(0, 10, [1], graph)\n self.assertEqual(interval1.overlap(interval2), 4)\n\n interval1 = Interval(8, 2, [1, 2, 3, 1], graph)\n interval2 = Interval(0, 2, [1, 2], graph)\n self.assertEqual(interval1.overlap(interval2), 6)\n\n def test_contains_correct_order(self):\n\n interval = Interval(0, 10, [1, 2, 3, 4, 5, 6, 7])\n\n other = Interval(0, 10, [1, 2, 3])\n self.assertTrue(interval.contains_in_correct_order(other))\n\n other = Interval(0, 10, [1, 2, 4])\n self.assertFalse(interval.contains_in_correct_order(other))\n\n other = Interval(0, 10, [1, 2, 5])\n self.assertFalse(interval.contains_in_correct_order(other))\n\n other = Interval(0, 10, [4, 5, 6])\n self.assertTrue(interval.contains_in_correct_order(other))\n\n\nclass TestIntervalCollection(unittest.TestCase):\n def test_to_file_from_file(self):\n intervals = (\n Interval(0, 5, [1]),\n Interval(0, 3, [2])\n )\n collection = IntervalCollection(intervals)\n print(collection.intervals)\n collection.to_file(\"test_intervalcollection.tmp\")\n\n collection2 = IntervalCollection.create_generator_from_file(\"test_intervalcollection.tmp\")\n\n #self.assertTrue(collection.intervals == collection2.intervals)\n for i, interval in enumerate(collection2):\n self.assertEqual(interval, intervals[i])\n\n def test_gzip(self):\n intervals = (\n Interval(0, 5, [1]),\n Interval(0, 1, [1]),\n Interval(1, 4, [1,2]),\n Interval(1, 4, [1,2]),\n Interval(1, 4, [1,2]),\n Interval(1, 4, [1,2]),\n Interval(0, 3, [2])\n )\n collection = IntervalCollection(intervals)\n print(collection.intervals)\n collection.to_gzip(\"test_intervalcollection.gzip\")\n\n collection2 = IntervalCollection.from_gzip(\"test_intervalcollection.gzip\")\n\n #self.assertTrue(collection.intervals == collection2.intervals)\n for i, interval in enumerate(collection2):\n self.assertEqual(interval, intervals[i])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/testInterval.py","file_name":"testInterval.py","file_ext":"py","file_size_in_byte":8203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"538558220","text":"# -*- coding: utf-8 -*-\n\nimport logging\n\nfrom sys import exit\nfrom create_mailroom import *\nfrom peewee import *\n\n\nclass Main:\n def __init__(self):\n self.prompt = {1: 'Send A Thank You',\n 2: 'Create a Report',\n 3: 'Send letters to everyone',\n 4: 'Delete a Donor',\n 5: 'Exit'}\n\n def user_input(self):\n print(\"\\nEnter the number from menu: \")\n return {(print(str(k) + ':', v)) for k, v in self.prompt.items()}\n\n def working_system(self):\n while True:\n temp = input()\n try:\n if int(temp) in range(1, 5):\n if int(temp) == 1:\n print('\\n\"Enter the donor name or list: \"')\n temp2 = input()\n #for list\n if temp2 == 'list':\n query = (Donor.select(Donor,\n fn.COUNT(Donor_Amount.id)\n .alias('donation_count'))\n .join(Donor_Amount,\n JOIN.LEFT_OUTER).group_by(Donor)\n .order_by(Donor.donor_name))\n for i in query:\n print(i.donor_name)\n Main()\n self.user_input()\n self.working_system()\n #for new or existing donor\n else:\n donation_amount = input('Enter the donor_amount: ')\n try:\n new_donor_created = Donor.create(donor_name = temp2)\n Donor_Amount.create(new_donor = new_donor_created,\n donor_amount=float(donation_amount))\n self.thank_you(temp2, donation_amount)\n except IntegrityError:\n new_donor_created = Donor_Amount.create(new_donor=temp2,\n donor_amount=float\n (donation_amount))\n new_donor_created.save()\n Main()\n self.user_input()\n self.working_system()\n\n elif int(temp) == 2:\n self.create_report()\n elif int(temp) == 3:\n self.send_thank_you_all()\n elif int(temp) == 4:\n self.delete_donor()\n elif int(temp) == 5:\n raise SystemExit()\n except ValueError:\n print(\"\\nEnter the number from menu: \")\n\n def thank_you(self, donor, donor_amount):\n thankyou = \"Dear {}, thank you for the donation of {}\".format(donor, donor_amount)\n with open('{}.txt'.format(donor.replace(' ', '_')), 'w') as file:\n file.write(thankyou)\n\n def create_report(self):\n print(\"Donor Name || Total Donation || Number of Gifts || Gift Average\")\n print('----------------------------------------------------------------------------')\n report = (Donor.select(Donor, fn.Sum(Donor_Amount.donor_amount).alias('sum'),\n fn.Count(Donor_Amount.id).alias('count'),\n fn.AVG(Donor_Amount.donor_amount).alias('avg'))\n .join(Donor_Amount).group_by(Donor)\n .order_by(fn.Sum(Donor_Amount.donor_amount).desc()))\n\n for index in report:\n print('{:<18} {:>12.02f} {:>13} {:>24.02f}'.format(index.donor_name, index.sum, index.count, index.avg))\n\n Main()\n self.user_input()\n self.working_system()\n\n def send_thank_you_all(self):\n\n everyone = (Donor.select(Donor, fn.Sum(Donor_Amount.donor_amount).alias('sum'),fn.Count(Donor_Amount.id).alias('count'),fn.AVG(Donor_Amount.donor_amount).alias('avg'))\n .join(Donor_Amount).group_by(Donor).order_by(fn.Sum(Donor_Amount.donor_amount).desc()))\n \n letter = \"Dear {}, thank you for the donation of {}\"\n\n for index in everyone:\n with open('{}.txt'.format(index.donor_name.replace(' ', '_')), 'w') as f:\n f.write(letter.format(index.donor_name, index.sum))\n\n Main()\n self.user_input()\n self.working_system()\n\n def delete_donor(self):\n print('\\nEnter the donor name or list: ')\n temp = input()\n if temp == 'list':\n lists = (Donor.select(Donor, fn.COUNT(Donor_Amount.id)\n .alias('donation_count'))\n .join(Donor_Amount, JOIN.LEFT_OUTER).group_by(Donor)\n .order_by(Donor.donor_name))\n for index in lists:\n print(index.donor_name)\n Main()\n self.user_input()\n self.working_system()\n else:\n donor = Donor.get(Donor.donor_name == temp)\n donor.delete_instance()\n print(f'{temp} is deleted')\n Main()\n self.user_input()\n self.working_system()\n\n\nif __name__ == '__main__':\n create_tables()\n system = Main()\n system.user_input()\n system.working_system()\n","sub_path":"students/AndrewKim/lesson7/Mailroom/mailroom_database.py","file_name":"mailroom_database.py","file_ext":"py","file_size_in_byte":5518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"345688885","text":"\nclass test :\n a=19\n def __init__(self):\n self.x=1\n test.b=20\n def f1(self):\n x=100\n self.x=2\n test.c=30\n def f2():\n y=200\n test.d=40\n @staticmethod\n def f3(m):\n test.e=50\n m.y=3\n @classmethod\n def f4(cls,s):\n cls.f=60\n test.g=70\n s.z=4\ntest.h=80\nt1=test()\n#t.f1()\n#test.f1(test)\n#test.f1(t)\n#test.f1(test)\n#test.f2()\n#t.f3(test)\n#test.f3(t)\n#t.f4(t)\n#test.f4(test)\nt2=test()\nt1.f3(t2)\nt1.f4(t2)\nprint(test.__dict__)\nprint(t1.__dict__)\nprint(test.__dict__)\nprint(t2.__dict__)\ninput()\n","sub_path":"102.variablesinclass.py","file_name":"102.variablesinclass.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"159871370","text":"# -*- coding: utf-8 -*-\nprint(\"If the upgrade process fails, please re-upgrade\")\nimport json\nimport os\nimport shutil\n\ndef read_file(filename):\n f = open(filename, newline=None)\n content = f.read()\n return content\n\ndef write_file(filename, content):\n f = open(filename, \"w\", newline=None)\n f.write(content)\n f.close()\n\ndef change_time_fomart(list_item):\n import time\n system_info = json.loads(read_file(\"./config/system.json\"))\n if \"time\" in list_item and isinstance(list_item[\"time\"], str):\n list_item[\"time\"] = time.mktime(time.strptime(list_item[\"time\"], system_info[\"Time_Format\"]))\n return list_item\n\nshutil.copyfile(\"./config/page.json\", \"./config/page.json.bak\")\nwrite_json = json.loads(read_file(\"./config/page.json\"))\nwrite_json = list(map(change_time_fomart, write_json))\nwrite_file(\"./config/page.json\", json.dumps(write_json, indent=4, sort_keys=False, ensure_ascii=False))\n\nfor filename in os.listdir(\"./document/\"):\n if filename.endswith(\".json\"):\n write_json = json.loads(read_file(\"./document/\" + filename))\n write_json = change_time_fomart(write_json)\n write_file(\"./document/\" + filename, json.dumps(write_json, indent=4, sort_keys=False, ensure_ascii=False))\n","sub_path":"upgrade/upgrade_from_1.py","file_name":"upgrade_from_1.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"40501425","text":"import os\n\nfrom django.db import models\n\nfrom .base import BaseModel, Gender\nfrom acmin.utils import attr\nfrom minamin.utils.image import resize_file, png2jpg\n\nWWW_ROOT = '/var/www'\nSTATIC_PREFIX = '/static/minamin/celebrity'\nSTATIC_FOLDER = f'{WWW_ROOT}{STATIC_PREFIX}'\nos.makedirs(STATIC_FOLDER, exist_ok=True)\n\n\nclass School(BaseModel):\n class Meta:\n ordering = ['-id']\n verbose_name = verbose_name_plural = \"名校\"\n\n list_fields = [\"name1\", 'image1', 'name2', 'image2']\n form_exclude = [\"image1\", \"image2\"]\n search_fields = [\"school_name\", 'celebrity1_name', 'celebrity1_detail', 'celebrity2_name', 'celebrity2_detail']\n\n school_name = models.CharField(\"学校名称\", max_length=300)\n\n name1 = models.CharField(\"姓名1\", max_length=10)\n gender1 = models.SmallIntegerField(\"性别1\", choices=Gender.choices, default=Gender.male)\n detail1 = models.TextField(\"详情1\")\n\n image1 = models.CharField(\"图片1\", max_length=100, null=True, blank=True)\n image_file1 = models.ImageField(\"图片1\", null=True, blank=True, upload_to='./veteran')\n\n name2 = models.CharField(\"姓名2\", max_length=10, null=True, blank=True)\n gender2 = models.SmallIntegerField(\"性别2\", choices=Gender.choices, null=True, blank=True)\n detail2 = models.TextField(\"详情2\", null=True, blank=True)\n image2 = models.CharField(\"图片2\", max_length=100, null=True, blank=True)\n image_file2 = models.ImageField(\"图片2\", null=True, blank=True, upload_to='./veteran')\n\n def __str__(self):\n return self.school_name\n\n def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n for index in [1, 2]:\n image_file = attr(self, f\"image_file{index}\")\n if image_file:\n path = attr(self, f\"image_file{index}.path\")\n if os.path.exists(path):\n extension = path.split(\".\").pop().lower()\n name = f'{self.id}-{index}.{extension}'\n new_path = f\"{STATIC_FOLDER}/{name}\"\n if os.path.exists(new_path):\n os.remove(new_path)\n os.rename(path, new_path)\n if extension == \"jpg\":\n resize_file(new_path, new_path + \".thumb.jpg\", width=None, height=50)\n elif extension == \"png\":\n jpg_path = new_path + \".jpg\"\n png2jpg(new_path, jpg_path)\n resize_file(jpg_path, new_path + \".thumb.jpg\", width=None, height=50)\n os.remove(jpg_path)\n setattr(self, f\"image{index}\", f'{STATIC_PREFIX}/{name}')\n\n setattr(self, f\"image_file{index}\", None)\n super().save(*args, **kwargs)\n","sub_path":"minamin/models/celebrity.py","file_name":"celebrity.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"580222274","text":"#coding:utf-8\nimport tensorflow as tf\nimport numpy as np\n#添加了,n_layer参数\ndef add_layer(inputs, in_size, out_size,n_layer, activation_function=None):\n # add one more layer and return the output of this layer\n layer_name=\"layer%s\"%n_layer\n with tf.name_scope('layer'):\n with tf.name_scope('weights'):\n Weights = tf.Variable(tf.random_normal([in_size, out_size]), name='W')\n #添加weights的histogram\n tf.summary.histogram(layer_name+'/weights',Weights)\n with tf.name_scope('biases'):\n biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name='b')\n #添加biases的histogram\n tf.summary.histogram(layer_name+'/biases',biases)\n with tf.name_scope('Wx_plus_b'):\n Wx_plus_b = tf.add(tf.matmul(inputs, Weights), biases)\n\n if activation_function is None:\n outputs = Wx_plus_b\n else:\n outputs = activation_function(Wx_plus_b, )\n #添加outputs的histogram\n tf.summary.histogram(layer_name+'/outpus',outputs)\n return outputs\n\n\n#随机生成数据\nx_data=np.linspace(-1,1,300,dtype=np.float32)[:,np.newaxis]\nnoise=np.random.normal(0,0.05,x_data.shape).astype(np.float32)\ny_data=np.square(x_data)-0.5+noise\n\n#输入数据\nwith tf.name_scope(\"inputs\"):\n xs = tf.placeholder(tf.float32, [None, 1], name='x_input')\n ys = tf.placeholder(tf.float32, [None, 1], name='x_input')\n\n#添加神经层数\nl1=add_layer(xs,1,10,n_layer=1,activation_function=tf.nn.relu)\nprediction=add_layer(l1,10,1,n_layer=2,activation_function=None)\n\n#loss function\nwith tf.name_scope(\"loss\"):\n loss=tf.reduce_mean(tf.reduce_sum(tf.square(prediction-ys),axis=1))\n #添加loss function的event\n tf.summary.scalar('loss',loss)\n\n#train\nwith tf.name_scope(\"train\"):\n train_step=tf.train.GradientDescentOptimizer(0.1).minimize(loss)\n\nsess=tf.Session()\n#合并所有的histogram\nmerge=tf.summary.merge_all()\n\nwrite=tf.summary.FileWriter(\"logs\",sess.graph)\n\ninit=tf.global_variables_initializer()\nsess.run(init)\n\nfor i in range(1000):\n sess.run(train_step,feed_dict={xs:x_data,ys:y_data})\n # result = sess.run(merge, feed_dict={xs: x_data, ys: y_data})\n if i%50 ==0:\n result=sess.run(merge,feed_dict={xs:x_data,ys:y_data})\n write.add_summary(result,i)\n\n","sub_path":"5tf_study_tensorbord2.py","file_name":"5tf_study_tensorbord2.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"533123166","text":"from os import path\n\nimport h5py\nimport numpy as np\nfrom sklearn.preprocessing import minmax_scale\nimport torch\nfrom torch import nn\nfrom torch.utils.data import TensorDataset\n\n\nWAVEMIN, WAVEMAX = 3.5843, 3.9501\nN_WAVES = 3659\nWAVES = np.logspace(WAVEMIN, WAVEMAX, N_WAVES)\n\n\ndef init_weights(m):\n if type(m) == nn.Conv1d:\n nn.init.xavier_uniform_(m.weight.data)\n m.bias.data.fill_(0)\n elif type(m) == nn.Linear:\n nn.init.xavier_uniform_(m.weight.data)\n m.bias.data.fill_(0)\n\n\ndef preprocessing(X):\n return minmax_scale(X, feature_range=(-1, 1), axis=1)\n\n\ndef get_dataset(X, y):\n return TensorDataset(*list(map(\n torch.from_numpy, [X.reshape(-1, 1, N_WAVES), y.astype(\"f4\")]\n )))\n\n\ndef load_ds(ds_file, grp, va=False):\n X_key, y_key = (\"X_tr\", \"y_tr\") if not va else (\"X_va\", \"y_va\")\n with h5py.File(ds_file, \"r\") as ds:\n dom = ds[grp]\n X, y = dom[X_key][...], dom[y_key][:]\n X = preprocessing(X)\n return get_dataset(X, y)\n\n\ndef predict(model, dl, dev):\n bs = dl.batch_size\n trues = torch.zeros(dl.dataset.tensors[0].size(0))\n preds = torch.zeros_like(trues)\n for i, (xb, yb) in enumerate(dl):\n start = i * bs\n end = start + bs\n trues[start:end] = yb\n preds[start:end] = model(xb.to(dev)).squeeze()\n return trues, preds\n","sub_path":"cd/src/experiments/qso/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"642154454","text":"from random import randint\n\nfrom utils import *\n\nBASE = 2 \n\nclass Alice():\n def __init__(self, secret, a2, a3):\n self.secret = secret\n self.a2 = a2\n self.a3 = a3\n\n def message_one(self):\n g2a = exp(BASE, self.a2)\n g3a = exp(BASE, self.a3)\n return { 'g2a': g2a, 'g3a': g3a }\n\n def message_three(self, msg):\n s = randint(1000, 9000) \n\n g2 = exp(msg['g2b'], self.a2)\n g3 = exp(msg['g3b'], self.a3)\n\n self.pb = msg['pb']\n qb = msg['qb']\n \n self.pa = exp(g3, s)\n qa = multiply(exp(BASE, s), exp(g2, self.secret))\n\n ra = exp(divide(qa, qb), self.a3)\n return { 'pa': self.pa, 'qa': qa, 'ra': ra }\n\n def message_five(self, msg):\n rab = exp(msg, self.a3)\n self.matching = equals(rab, divide(self.pa, self.pb))\n\nclass Bob():\n def __init__(self, secret, b2, b3):\n self.secret = secret\n self.b2 = b2\n self.b3 = b3\n\n def message_two(self, msg):\n r = randint(1000, 9000)\n\n g2b = exp(BASE, self.b2)\n g3b = exp(BASE, self.b3)\n\n g2 = exp(msg['g2a'], self.b2)\n g3 = exp(msg['g3a'], self.b3)\n\n self.pb = exp(g3, r)\n self.qb = multiply(exp(BASE, r), exp(g2, self.secret))\n\n return {'g2b': g2b, 'g3b': g3b, 'pb': self.pb, 'qb': self.qb}\n\n def message_four(self, msg):\n rB = exp(divide(msg['qa'], self.qb), self.b3)\n rab = exp(msg['ra'], self.b3)\n self.matching = equals(rab, divide(msg['pa'], self.pb))\n return rB\n\n","sub_path":"smp.py","file_name":"smp.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"241589772","text":"from qm.QuantumMachinesManager import QuantumMachinesManager\nfrom qm.qua import *\nfrom qm import SimulationConfig\nfrom qm.simulate import LoopbackInterface\nimport matplotlib.pyplot as plt\nfrom qualang_tools.loops import from_array\nfrom qualang_tools.results import fetching_tool\nfrom qualang_tools.plot import interrupt_on_close\nfrom qualang_tools.results import progress_counter\nfrom macros import qua_declaration, multiplexed_readout\nfrom quam import QuAM\nfrom configuration import build_config, u\n\n#########################################\n# Set-up the machine and get the config #\n#########################################\nmachine = QuAM(\"quam_bootstrap_state.json\", flat_data=False)\nconfig = build_config(machine)\n\n###################\n\n###################\n# The QUA program #\n###################\ndfs = np.arange(-14e6, +14e6, 0.2e6)\namps = np.arange(0.0, 1, 0.02)\n\ncooldown_time = 1 * u.us\nn_avg = 1000\n\nqb_if_1 = machine.qubits[0].xy.f_01 - machine.local_oscillators.qubits[0].freq\nqb_if_2 = machine.qubits[1].xy.f_01 - machine.local_oscillators.qubits[0].freq\n\nwith program() as rabi_chevron:\n I, I_st, Q, Q_st, n, n_st = qua_declaration(nb_of_qubits=2)\n df = declare(int)\n f_q1 = declare(int)\n f_q2 = declare(int)\n a = declare(fixed)\n\n with for_(n, 0, n < n_avg, n + 1):\n save(n, n_st)\n\n with for_(*from_array(df, dfs)):\n update_frequency(\"q0_xy\", df + qb_if_1)\n update_frequency(\"q1_xy\", df + qb_if_2)\n\n with for_(*from_array(a, amps)):\n # qubit 1 can replace cw by x180 to test the gate\n play(\"x180\" * amp(a), \"q0_xy\")\n play(\"x180\" * amp(a), \"q1_xy\")\n align()\n multiplexed_readout(I, I_st, Q, Q_st, resonators=[0, 1])\n wait(cooldown_time * u.ns)\n\n with stream_processing():\n n_st.save(\"n\")\n # resonator 1\n I_st[0].buffer(len(amps)).buffer(len(dfs)).average().save(\"I1\")\n Q_st[0].buffer(len(amps)).buffer(len(dfs)).average().save(\"Q1\")\n # resonator 2\n I_st[1].buffer(len(amps)).buffer(len(dfs)).average().save(\"I2\")\n Q_st[1].buffer(len(amps)).buffer(len(dfs)).average().save(\"Q2\")\n\n\n#####################################\n# Open Communication with the QOP #\n#####################################\nqmm = QuantumMachinesManager(machine.network.qop_ip, machine.network.qop_port)\n\nsimulate = False\nif simulate:\n # simulate the test_config QUA program\n job = qmm.simulate(\n config,\n rabi_chevron,\n SimulationConfig(\n 11000, simulation_interface=LoopbackInterface([(\"con1\", 1, \"con1\", 1), (\"con1\", 2, \"con1\", 2)], latency=250)\n ),\n )\n job.get_simulated_samples().con1.plot()\n plt.show()\nelse:\n qm = qmm.open_qm(config)\n job = qm.execute(rabi_chevron)\n\n fig = plt.figure()\n interrupt_on_close(fig, job)\n results = fetching_tool(job, [\"n\", \"I1\", \"Q1\", \"I2\", \"Q2\"], mode=\"live\")\n while results.is_processing():\n n, I1, Q1, I2, Q2 = results.fetch_all()\n progress_counter(n, n_avg, start_time=results.start_time)\n\n s1 = u.demod2volts(I1 + 1j * Q1, machine.resonators[0].readout_pulse_length)\n s2 = u.demod2volts(I2 + 1j * Q2, machine.resonators[0].readout_pulse_length)\n\n plt.subplot(221)\n plt.cla()\n plt.pcolor(amps * machine.qubits[0].xy.pi_amp, dfs, I1)\n plt.xlabel(\"qubit pulse amplitude (V)\")\n plt.ylabel(\"qubit 1 detuning (MHz)\")\n plt.title(f\"q1 (f_res1: {machine.qubits[0].xy.f_01 / u.MHz} MHz)\")\n plt.subplot(223)\n plt.cla()\n plt.pcolor(amps * machine.qubits[0].xy.pi_amp, dfs, Q1)\n plt.xlabel(\"qubit pulse amplitude (V)\")\n plt.ylabel(\"qubit 1 detuning (MHz)\")\n plt.subplot(222)\n plt.cla()\n plt.pcolor(amps * machine.qubits[1].xy.pi_amp, dfs, I2)\n plt.title(f\"q2 (f_res2: {machine.qubits[1].xy.f_01 / u.MHz} MHz)\")\n plt.ylabel(\"qubit 2 detuning (MHz)\")\n plt.xlabel(\"qubit pulse amplitude (V)\")\n plt.subplot(224)\n plt.cla()\n plt.pcolor(amps * machine.qubits[1].xy.pi_amp, dfs, Q2)\n plt.xlabel(\"qubit pulse amplitude (V)\")\n plt.ylabel(\"qubit 2 detuning (MHz)\")\n plt.tight_layout()\n plt.pause(0.1)\n # Close the quantum machines at the end in order to put all flux biases to 0 so that the fridge doesn't heat-up\n qm.close()\n","sub_path":"Quantum-Control-Applications/Superconducting/Two-Flux-Tunable-Transmons/Rapid Prototyping/09_rabi_chevron.py","file_name":"09_rabi_chevron.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"219856750","text":"import nox\n\npython_version = \"3.9\"\nlocations = \"src\", \"tests\"\n\nmax_line_length = 75\n\n\n@nox.session(python=python_version)\ndef format(session):\n args = session.posargs or locations\n session.install(\"black\", \"isort\", \"docformatter\", \"reindent\")\n session.run(\"isort\", \"--atomic\", *args)\n session.run(\n \"docformatter\",\n \"--wrap-summaries\",\n f\"{max_line_length}\",\n \"--wrap-descriptions\",\n f\"{max_line_length}\",\n \"--in-place\",\n \"--recursive\",\n *args,\n )\n session.run(\"python\", \"-m\", \"reindent\", \"-r\", \"-n\", *args)\n session.run(\"black\", \"--line-length\", f\"{max_line_length}\", *args)\n\n\n@nox.session(python=python_version)\ndef flake8(session):\n args = session.posargs or locations\n session.install(\"flake8\", \"flake8-import-order\", \"flake8-annotations\")\n session.run(\"flake8\", \"--ignore=ANN101,W503\", *args)\n\n\n@nox.session(python=python_version)\ndef mypy(session):\n args = session.posargs or locations\n session.install(\"mypy\")\n session.run(\"mypy\", *args)\n\n\n@nox.session(python=python_version)\ndef pylama(session):\n args = session.posargs or locations\n session.install(\"pylama\")\n session.run(\"pylama\", *args)\n","sub_path":"noxfile.py","file_name":"noxfile.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"322651030","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport dbtypes\nimport fields\n\n## Проверка, я вляется ли тип/класс test_type наследником класса _type\ndef inherits(test_type, _type) :\n if test_type == _type :\n return True\n bases = None\n if type(test_type) == type(_type) : ## Если в параметре передан класс\n bases = test_type.__bases__\n else : ## Если объект класса\n if test_type.__class__ == _type :\n return True\n bases = test_type.__class__.__bases__\n\n for dbtypes in bases :\n return inherits(dbtypes, _type)\n\n return False\n\n## Возвращает значение по умолчанию для типа\ndef getDefaultValue(_type) :\n if inherits(_type, dbtypes.Pointer) :\n return \"yamm::Pointer<{0}>()\".format(_type.pointed.__name__)\n if inherits(_type, dbtypes.BaseType) :\n return _type.default\n if inherits(_type, dbtypes.EnumType) :\n return \"(\" + getInnerTypeName(_type) + \")\" + \"0\"\n if inherits(_type, dbtypes.ClassType) :\n return _type.__name__ + \"()\"\n if inherits(_type, dbtypes.StructType) :\n result = \"\"\n for field_name in _type.__dict__.keys() :\n field = _type.__dict__[field_name]\n if inherits(field, fields.Field) :\n result += str(getDefaultValue(field.data_type)) + \", \"\n return \"{\" + result[:-2] + \"}\"\n return \"0\"\n\n## \"Внутреннее\" имя типа данных\n## Внутреннее имя испольщуется в реализациях методов класса\n## и как правило совпадает с внешним именем (доступным пользователю),\n## за тем исключением, что во внутреннем имени отсутствует идентификатор\n## пространства имен базы данных. Для классов внутренее имя -\n## это идентификатор ObjectID в массиве объектов класса\ndef getInnerTypeName(_type) :\n if inherits(_type, dbtypes.ClassType) :\n return \"ObjectID\"\n return _type.__name__\n\n## Публичное имя типа, для базовых типов сохраняется неизменным,\n## для типов, определенных пользователем добавляется идентификатор\n## пространства имен\ndef getPublicTypeName(_type, out_of_ns = True) :\n if inherits(_type, dbtypes.Pointer) :\n return \"yamm::Pointer<{0}>\".format(getInnerTypeName(_type.pointed))\n if inherits(_type, dbtypes.BaseType) :\n return _type.__name__\n if out_of_ns :\n return dbtypes.HSDB.prefix + \"::\" + _type.__name__\n return _type.__name__\n\ndef isLogEnabledForClass(the_class) :\n return dbtypes.HSDB.logger_enabled\n","sub_path":"hsdb/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"188145582","text":"\"\"\"Globus Harvester.\n\nUsage:\n globus_harvester.py [--onlyharvest | --onlyexport | --init] [--only-new-records] [--export-filepath=] [--export-format=] [--repository-id=]\n\nOptions:\n --onlyharvest Just harvest new items, do not export anything.\n --onlyexport Just export existing items, do not harvest anything.\n --only-new-records Only export records changed since last crawl.\n --export-filepath= The path to export the data to.\n --export-format= The export format (currently gmeta or xml).\n --repository-id= Only export this repository, based on the database table ID\n --init Just initialize the database, do not harvest or export.\n\n\"\"\"\n\nfrom docopt import docopt\nimport json\nimport os\nimport time\nimport configparser\n\nfrom harvester.OAIRepository import OAIRepository\nfrom harvester.CKANRepository import CKANRepository\nfrom harvester.MarkLogicRepository import MarkLogicRepository\nfrom harvester.CSWRepository import CSWRepository\nfrom harvester.SocrataRepository import SocrataRepository\nfrom harvester.DBInterface import DBInterface\nfrom harvester.HarvestLogger import HarvestLogger\nfrom harvester.TimeFormatter import TimeFormatter\nfrom harvester.Lock import Lock\nfrom harvester.Exporter import Exporter\n\n\ndef get_config_json(repos_json=\"conf/repos.json\"):\n configdict = {}\n with open(repos_json, 'r') as jsonfile:\n configdict = json.load(jsonfile)\n\n return configdict\n\n\ndef get_config_ini(config_file=\"conf/harvester.conf\"):\n '''\n Read ini-formatted config file from disk\n :param config_file: Filename of config file\n :return: configparser-style config file\n '''\n\n config = configparser.ConfigParser()\n config.read(config_file)\n return config\n\n\nif __name__ == \"__main__\":\n\n instance_lock = Lock()\n tstart = time.time()\n\n arguments = docopt(__doc__)\n run_export = True\n run_harvest = True\n if arguments[\"--onlyexport\"] == True:\n run_harvest = False\n if arguments[\"--onlyharvest\"] == True:\n run_export = False\n if arguments[\"--init\"] == True:\n run_export = False\n run_harvest = False\n\n config = get_config_ini()\n final_config = {}\n final_config['update_log_after_numitems'] = int(config['harvest'].get('update_log_after_numitems', 1000))\n final_config['abort_after_numerrors'] = int(config['harvest'].get('abort_after_numerrors', 5))\n final_config['record_refresh_days'] = int(config['harvest'].get('record_refresh_days', 30))\n final_config['repo_refresh_days'] = int(config['harvest'].get('repo_refresh_days', 1))\n final_config['temp_filepath'] = config['harvest'].get('temp_filepath', \"temp\")\n final_config['export_filepath'] = config['export'].get('export_filepath', \"data\")\n final_config['export_file_limit_mb'] = int(config['export'].get('export_file_limit_mb', 10))\n final_config['export_format'] = config['export'].get('export_format', \"gmeta\")\n final_config['socrata_app_token'] = config['socrata'].get('app_token', None)\n final_config['repository_id'] = None\n\n main_log = HarvestLogger(config['logging'])\n main_log.info(\"Starting... (pid={})\".format(os.getpid()))\n\n dbh = DBInterface(config['db'])\n dbh.setLogger(main_log)\n repo_configs = get_config_json()\n\n if run_harvest:\n # Find any new information in the repositories\n for repoconfig in repo_configs['repos']:\n if repoconfig['type'] == \"oai\":\n repo = OAIRepository(final_config)\n elif repoconfig['type'] == \"ckan\":\n repo = CKANRepository(final_config)\n elif repoconfig['type'] == \"marklogic\":\n repo = MarkLogicRepository(final_config)\n elif repoconfig['type'] == \"csw\":\n repo = CSWRepository(final_config)\n elif repoconfig['type'] == \"socrata\":\n repo = SocrataRepository(final_config)\n repo.setLogger(main_log)\n if 'copyerrorstoemail' in repoconfig and not repoconfig['copyerrorstoemail']:\n main_log.setErrorsToEmail(False)\n repo.setRepoParams(repoconfig)\n repo.setDatabase(dbh)\n repo.crawl()\n repo.update_stale_records(config['db'])\n if 'copyerrorstoemail' in repoconfig and not repoconfig['copyerrorstoemail']:\n main_log.restoreErrorsToEmail()\n\n if run_export:\n # Export the database contents out to files\n if arguments[\"--export-format\"]:\n final_config['export_format'] = arguments[\"--export-format\"]\n if arguments[\"--export-filepath\"]:\n final_config['export_filepath'] = arguments[\"--export-filepath\"]\n if arguments[\"--repository-id\"]:\n final_config['repository_id'] = arguments[\"--repository-id\"]\n exporter = Exporter(dbh, main_log, final_config)\n kwargs = {\n \"export_format\": final_config['export_format'],\n \"export_filepath\": final_config['export_filepath'],\n \"only_new_records\": False,\n \"temp_filepath\": final_config['temp_filepath'],\n \"export_repository_id\": final_config['repository_id']\n }\n if arguments[\"--only-new-records\"] == True:\n kwargs[\"only_new_records\"] = True\n exporter.export_to_file(**kwargs)\n\n formatter = TimeFormatter()\n main_log.info(\"Done after {}\".format(formatter.humanize(time.time() - tstart)))\n\n with open(\"data/last_run_timestamp\", \"w\") as lastrun:\n lastrun.write(str(time.time()))\n instance_lock.unlock()\n","sub_path":"globus_harvester.py","file_name":"globus_harvester.py","file_ext":"py","file_size_in_byte":5591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"296542475","text":"# 1281. 整数的各位积和之差\nclass Solution:\n @staticmethod\n def difference(n: int):\n add, mul = 0, 1\n while n > 0:\n digit = n % 10\n n //= 10\n add += digit\n mul *= digit\n print(mul - add)\n\n\ntest = Solution()\ntest.difference(23)\n \n","sub_path":"20200302_leetcode_1281.py","file_name":"20200302_leetcode_1281.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"308614590","text":"import re\nimport logging\nimport builtins\n\nfrom typing import TYPE_CHECKING, Optional, Union\n\n# Не используется ujson из-за отсутствия в нём object_hook'a\n# Отправка вообще application/x-www-form-urlencoded, а не JSON'a\n# https://github.com/psf/requests/blob/master/requests/models.py#L508\nimport json\n\nimport requests\n\nfrom yandex_music.utils.captcha_response import CaptchaResponse\nfrom yandex_music.utils.response import Response\nfrom yandex_music.exceptions import Unauthorized, BadRequest, NetworkError, YandexMusicError, CaptchaRequired, \\\n CaptchaWrong, TimedOut\n\nif TYPE_CHECKING:\n from yandex_music import Client\n\n\nUSER_AGENT = 'Yandex-Music-API'\nHEADERS = {\n 'X-Yandex-Music-Client': 'YandexMusicAndroid/23020055',\n}\n\nreserved_names = [name.lower() for name in dir(builtins)] + ['client']\n\nlogging.getLogger('urllib3').setLevel(logging.WARNING)\n\n\nclass Request:\n \"\"\"Вспомогательный класс для yandex_music, представляющий методы для выполнения POST и GET запросов, скачивания\n файлов.\n\n Args:\n client (:obj:`yandex_music.Client`, optional): Клиент Yandex Music.\n headers (:obj:`dict`, optional): Заголовки передаваемые с каждым запросом.\n proxy_url (:obj:`str`, optional): Прокси.\n \"\"\"\n\n def __init__(self,\n client=None,\n headers=None,\n proxy_url=None):\n self.headers = headers or HEADERS.copy()\n\n self.client = self.set_and_return_client(client)\n\n self.proxies = {'http': proxy_url, 'https': proxy_url} if proxy_url else None\n\n def set_authorization(self, token: str) -> None:\n \"\"\"Добавляет заголовок авторизации для каждого запроса.\n\n Note:\n Используется при передаче своего экземпляра Request'a клиенту.\n\n Args:\n token (:obj:`str`): OAuth токен.\n \"\"\"\n self.headers.update({'Authorization': f'OAuth {token}'})\n\n def set_and_return_client(self, client) -> 'Client':\n \"\"\"Принимает клиент и присваивает его текущему объекту. При наличии авторизации добавляет заголовок.\n\n Args:\n client (:obj:`yandex_music.Client`): Клиент Yandex Music.\n\n Returns:\n :obj:`yandex_music.Client`: Клиент Yandex Music.\n \"\"\"\n self.client = client\n\n if self.client and self.client.token:\n self.set_authorization(self.client.token)\n\n return self.client\n\n @staticmethod\n def _convert_camel_to_snake(text: str) -> str:\n \"\"\"Конвертация CamelCase в SnakeCase.\n\n Args:\n text (:obj:`str`): Название переменной в CamelCase.\n\n Returns:\n :obj:`str`: Название переменной в SnakeCase.\n \"\"\"\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', text)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n\n @staticmethod\n def _object_hook(obj: dict) -> dict:\n \"\"\"Нормализация имён переменных пришедших с API.\n\n Note:\n В названии переменной заменяет \"-\" на \"_\", конвертирует в SnakeCase, если название является\n зарезервированным именем или \"client\" - добавляет \"_\" в конец. Если название переменной начинается с цифры -\n добавляет в начало \"_\".\n\n Args:\n obj (:obj:`dict`): Словарь, где ключ название переменной, а значение - содержимое.\n\n Returns:\n :obj:`dict`: Тот же словарь, что и на входе, но с нормализованными ключами.\n \"\"\"\n cleaned_object = {}\n for key, value in obj.items():\n key = Request._convert_camel_to_snake(key.replace('-', '_'))\n if key.lower() in reserved_names:\n key += '_'\n\n if len(key) and key[0].isdigit():\n key = '_' + key\n\n cleaned_object.update({key: value})\n\n return cleaned_object\n\n def _parse(self, json_data: bytes) -> Optional[Response]:\n \"\"\"Разбор ответа от API.\n\n Note:\n Если данные отсутствуют в `result`, то переформировывает ответ используя данные из корня.\n\n Args:\n json_data (:obj:`bytes`): Ответ от API.\n\n Returns:\n :obj:`yandex_music.utils.response.Response`: Ответ API.\n\n Raises:\n :class:`yandex_music.exceptions.YandexMusicError`: Базовое исключение библиотеки.\n \"\"\"\n try:\n decoded_s = json_data.decode('utf-8')\n data = json.loads(decoded_s, object_hook=Request._object_hook)\n\n except UnicodeDecodeError:\n logging.getLogger(__name__).debug(\n 'Logging raw invalid UTF-8 response:\\n%r', json_data)\n raise YandexMusicError('Server response could not be decoded using UTF-8')\n except (AttributeError, ValueError):\n raise YandexMusicError('Invalid server response')\n\n if data.get('result') is None:\n data = {'result': data, 'error': data.get('error'), 'error_description': data.get('error_description')}\n\n return Response.de_json(data, self.client)\n\n def _request_wrapper(self, *args, **kwargs):\n \"\"\"Обёртка над запросом библиотеки `requests`.\n\n Note:\n Добавляет необходимые заголовки для запроса, обрабатывает статус коды, следит за таймаутом, кидает\n необходимые исключения, возвращает ответ. Передаёт пользовательские аргументы в запрос.\n\n Args:\n *args: Произвольные аргументы для `requests.request`.\n **kwargs: Произвольные ключевые аргументы для `requests.request`.\n\n Returns:\n :obj:`yandex_music.utils.response.Response`: Ответ API.\n\n Raises:\n :class:`yandex_music.exceptions.TimedOut`: При превышении времени ожидания.\n :class:`yandex_music.exceptions.Unauthorized`: При невалидном токене, долгом ожидании прямой ссылки на файл.\n :class:`yandex_music.exceptions.BadRequest`: При неправильном запросе.\n :class:`yandex_music.exceptions.NetworkError`: При проблемах с сетью.\n :class:`yandex_music.exceptions.CaptchaWrong`: При неправильной капче.\n :class:`yandex_music.exceptions.CaptchaRequired`: При необходимости пройти капчу.\n \"\"\"\n if 'headers' not in kwargs:\n kwargs['headers'] = {}\n\n kwargs['headers']['User-Agent'] = USER_AGENT\n\n try:\n resp = requests.request(*args, **kwargs)\n except requests.Timeout:\n raise TimedOut()\n except requests.RequestException as e:\n raise NetworkError(e)\n\n if 200 <= resp.status_code <= 299:\n return resp\n\n parse = self._parse(resp.content)\n message = parse.error or 'Unknown HTTPError'\n\n if 'CAPTCHA' in message:\n exception = CaptchaWrong if 'Wrong' in message else CaptchaRequired\n raise exception(message, CaptchaResponse.de_json(parse.result, self.client))\n elif resp.status_code in (401, 403):\n raise Unauthorized(message)\n elif resp.status_code == 400:\n raise BadRequest(message)\n elif resp.status_code in (404, 409, 413):\n raise NetworkError(message)\n\n elif resp.status_code == 502:\n raise NetworkError('Bad Gateway')\n else:\n raise NetworkError(f'{message} ({resp.status_code})')\n\n def get(self, url: str, params: dict = None, timeout: Union[int, float] = 5, *args, **kwargs):\n \"\"\"Отправка GET запроса.\n\n Args:\n url (:obj:`str`): Адрес для запроса.\n params (:obj:`str`): GET параметры для запроса.\n timeout (:obj:`int` | :obj:`float`): Используется как время ожидания ответа от сервера вместо указанного\n при создании пула.\n *args: Произвольные аргументы для `requests.request`.\n **kwargs: Произвольные ключевые аргументы для `requests.request`.\n\n Returns:\n :obj:`yandex_music.utils.response.Response`: Ответ API.\n\n Raises:\n :class:`yandex_music.exceptions.YandexMusicError`: Базовое исключение библиотеки.\n \"\"\"\n result = self._request_wrapper('GET', url, params=params, headers=self.headers, proxies=self.proxies,\n timeout=timeout, *args, **kwargs)\n\n return self._parse(result.content).result\n\n def post(self, url, data=None, timeout=5, *args, **kwargs):\n \"\"\"Отправка POST запроса.\n\n Args:\n url (:obj:`str`): Адрес для запроса.\n data (:obj:`str`): POST тело запроса.\n timeout (:obj:`int` | :obj:`float`): Используется как время ожидания ответа от сервера вместо указанного\n при создании пула.\n *args: Произвольные аргументы для `requests.request`.\n **kwargs: Произвольные ключевые аргументы для `requests.request`.\n\n Returns:\n :obj:`yandex_music.utils.response.Response`: Ответ API.\n\n Raises:\n :class:`yandex_music.exceptions.YandexMusicError`: Базовое исключение библиотеки.\n \"\"\"\n result = self._request_wrapper('POST', url, headers=self.headers, proxies=self.proxies, data=data,\n timeout=timeout, *args, **kwargs)\n\n return self._parse(result.content).result\n\n def retrieve(self, url, timeout=5, *args, **kwargs):\n \"\"\"Отправка GET запроса и получение содержимого без обработки (парсинга).\n\n Args:\n url (:obj:`str`): Адрес для запроса.\n timeout (:obj:`int` | :obj:`float`): Используется как время ожидания ответа от сервера вместо указанного\n при создании пула.\n *args: Произвольные аргументы для `requests.request`.\n **kwargs: Произвольные ключевые аргументы для `requests.request`.\n\n Returns:\n :obj:`Response`: Экземляр объекта ответа библиотеки `requests`.\n\n Raises:\n :class:`yandex_music.exceptions.YandexMusicError`: Базовое исключение библиотеки.\n \"\"\"\n return self._request_wrapper('GET', url, proxies=self.proxies, timeout=timeout, *args, **kwargs)\n\n def download(self, url, filename, timeout=5, *args, **kwargs):\n \"\"\"Отправка запроса на получение содержимого и его запись в файл.\n\n Args:\n url (:obj:`str`): Адрес для запроса.\n filename (:obj:`str`): Путь и(или) название файла вместе с расширением.\n timeout (:obj:`int` | :obj:`float`): Используется как время ожидания ответа от сервера вместо указанного\n при создании пула.\n *args: Произвольные аргументы для `requests.request`.\n **kwargs: Произвольные ключевые аргументы для `requests.request`.\n\n Raises:\n :class:`yandex_music.exceptions.YandexMusicError`: Базовое исключение библиотеки.\n \"\"\"\n result = self.retrieve(url, timeout=timeout, *args, *kwargs)\n with open(filename, 'wb') as f:\n f.write(result.content)\n","sub_path":"venv/lib/python3.7/site-packages/yandex_music/utils/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":13054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"74596242","text":"from django.shortcuts import render, redirect\nfrom django.urls import reverse\nfrom django.conf import settings\nfrom django.http import JsonResponse, HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nimport stripe\n\nfrom users.models import UserProfile\nfrom subscriptions.models import Subscription, Plan\nfrom subscriptions.states import SubscriptionState as State\nfrom subscriptions.plans import SubscriptionPlan as Plans\n\nstripe.api_key = settings.STRIPE_PRIVATE_KEY\n\ndef subscribe(request):\n user = request.user\n if user.is_authenticated:\n subscription = Subscription.objects.filter(user=user).order_by('-id').first()\n if not subscription:\n return render(request, 'subscriptions/new_subscription.html')\n elif subscription.status == State.DEACTIVATED:\n return render(request, 'subscriptions/renew_subscription.html')\n elif subscription and subscription.status == State.ACTIVE:\n return redirect(reverse('home'))\n else:\n return redirect(reverse('login'))\n\n\n@csrf_exempt\ndef trial(request):\n session = stripe.checkout.Session.create(\n customer_email = request.user.email,\n payment_method_types = ['card'],\n line_items = [{\n #price id from Stripe Dashboard\n 'price' : 'price_1H60pwI24oCI0OTByU54LFUB',\n 'quantity' : 1,\n }],\n mode = 'payment',\n success_url = request.build_absolute_uri(reverse('home')) + '?session_id={CHECKOUT_SESSION_ID}',\n cancel_url = request.build_absolute_uri(reverse('subscribe')),\n )\n return JsonResponse({\n 'session_id' : session.id,\n 'strip_public_key' : settings.STRIPE_PUBLIC_KEY\n })\n\n\n@csrf_exempt\ndef basic(request):\n session = stripe.checkout.Session.create(\n customer_email = request.user.email,\n payment_method_types = ['card'],\n line_items = [{\n #price id from Stripe Dashboard\n 'price' : 'price_1H60viI24oCI0OTBOf07Jt9e',\n 'quantity' : 1,\n }],\n mode = 'payment',\n success_url = request.build_absolute_uri(reverse('home')) + '?session_id={CHECKOUT_SESSION_ID}',\n cancel_url = request.build_absolute_uri(reverse('subscribe')),\n )\n return JsonResponse({\n 'session_id' : session.id,\n 'strip_public_key' : settings.STRIPE_PUBLIC_KEY\n })\n\n\n@csrf_exempt\ndef premium(request):\n session = stripe.checkout.Session.create(\n customer_email = request.user.email,\n payment_method_types = ['card'],\n line_items = [{\n #price id from Stripe Dashboard\n 'price' : 'price_1H61H2I24oCI0OTBSaW6awEL',\n 'quantity' : 1,\n }],\n mode = 'payment',\n success_url = request.build_absolute_uri(reverse('home')) + '?session_id={CHECKOUT_SESSION_ID}',\n cancel_url = request.build_absolute_uri(reverse('subscribe')),\n )\n return JsonResponse({\n 'session_id' : session.id,\n 'strip_public_key' : settings.STRIPE_PUBLIC_KEY\n })\n\n\n@csrf_exempt\ndef diamond(request):\n session = stripe.checkout.Session.create(\n customer_email = request.user.email,\n payment_method_types = ['card'],\n line_items = [{\n #price id from Stripe Dashboard\n 'price' : 'price_1H61OOI24oCI0OTBmW5Dy2sw',\n 'quantity' : 1,\n }],\n mode = 'payment',\n success_url = request.build_absolute_uri(reverse('home')) + '?session_id={CHECKOUT_SESSION_ID}',\n cancel_url = request.build_absolute_uri(reverse('subscribe')),\n )\n return JsonResponse({\n 'session_id' : session.id,\n 'strip_public_key' : settings.STRIPE_PUBLIC_KEY\n })\n\n\n\n@csrf_exempt\ndef stripe_webhook(request):\n # test\n endpoint_secret = 'whsec_0bFVho9jv8RwpCNQ9oC0AINEKGZDO18O'\n #deployed\n # endpoint_secret = 'whsec_lhBWqqPoYUghCa4ecsjqyrbZWsKMFCQE'\n payload = request.body\n sig_header = request.META['HTTP_STRIPE_SIGNATURE']\n event = None\n\n try:\n event = stripe.Webhook.construct_event(\n payload, sig_header, endpoint_secret\n )\n except ValueError as e:\n return HttpResponse(status=400)\n except stripe.error.SignatureVerificationError as e:\n return HttpResponse(status=400)\n\n print(event['type'])\n if event['type'] == 'checkout.session.completed':\n session = event['data']['object']\n customer_email = session['customer_email']\n line_items = stripe.checkout.Session.list_line_items(session['id'], limit=1)\n price_id = line_items['data'][0]['price']['id']\n\n #price id is of Trial\n if price_id == 'price_1H60pwI24oCI0OTByU54LFUB':\n current_plan, created = Plan.objects.get_or_create(plan=Plans.TRIAL)\n if created:\n current_plan.save()\n\n #price_id is of Basic\n elif price_id == 'price_1H60viI24oCI0OTBOf07Jt9e':\n current_plan, created = Plan.objects.get_or_create(plan=Plans.BASIC)\n if created:\n current_plan.save()\n\n #price_id is of Premium\n elif price_id == 'price_1H61H2I24oCI0OTBSaW6awEL':\n current_plan, created = Plan.objects.get_or_create(plan=Plans.PREMIUM)\n if created:\n current_plan.save()\n\n #price_id is of Diamond\n elif price_id == 'price_1H61OOI24oCI0OTBmW5Dy2sw':\n current_plan, created = Plan.objects.get_or_create(plan=Plans.DIAMOND)\n if created:\n current_plan.save()\n\n user_profile = UserProfile.objects.get(email=customer_email)\n user_profile.subscribed = True\n user_profile.save()\n\n previous_subscription = Subscription.objects.filter(user=user_profile).order_by('-id').first()\n if previous_subscription:\n previous_subscription.status = State.DEACTIVATED\n previous_subscription.save()\n new_subscription = Subscription.objects.create(\n user=user_profile,\n plan=current_plan,\n status=State.ACTIVE)\n new_subscription.save()\n\n return HttpResponse(status=200)\n","sub_path":"ase/subscriptions/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"618166881","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author:\n Lucas Yuki Imamura\n Maria Fernanda Bittelbrunn Toniasso\n Vitor Hugo Homem Marzarotto\n\"\"\"\nfrom images import circle\nfrom core import Spell\nimport time\nimport os\n\n\nclass Teleport(Spell):\n __dist = 400\n __image_duration = 0.5\n __effect_duration = 1\n\n def __init__(self, wizard_id: int, groups: list, screen_size: tuple):\n\n image_dict = {\"1\": {\"path\": os.path.join(\n \"images\", \"spells_img\", \"teleport_img.png\"), \"R\": 25, \"size\": (52, 52)}}\n sound_dict = {\"casting\": \"teleport_sound\"}\n\n super().__init__(\n wizard_id=wizard_id,\n name=\"Teleport\",\n icon=\"teleport_icon\",\n image_dict=image_dict,\n sound_dict=sound_dict,\n ang=0,\n screen_size=screen_size,\n groups=groups,\n )\n self.kill()\n\n def cast(self, wiz):\n super().cast(wiz)\n # move automaticamente a determinada distancia\n dist = (wiz.angle_vector[0] * self.__dist + self.center[0],\n wiz.angle_vector[1] * self.__dist + self.center[1])\n dist = (dist[0] % self.screen_size[0], dist[1] % self.screen_size[1])\n wiz.center = dist\n\n def update(self, dt):\n # magia tem duração de 2s\n if time.time() > self.spawned_time + self.__effect_duration:\n self.kill()\n super().update(dt)\n\n def colision(self, wiz):\n pass\n","sub_path":"core/spells/Teleport.py","file_name":"Teleport.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"193627839","text":"from controller import Robot\nfrom controller import GPS\nfrom controller import PositionSensor\nimport math\n\ndef give_alpha(x,y,gx,gy):\n alpha = ((math.atan2((gy-y),(gx-x))))*(180/3.14)\n return alpha\n\ndef get_dist_to_line(x,y,gx,gy,x_first,y_first):\n return abs((((gx-x_first)*(y_first-y))-((x_first-x)*(gy-y_first)))/(math.sqrt((gx-x_first)**2+(gy-y_first)**2)))\n\ndef run_robot(robot,gps,unit):\n # timestep = 64\n timestep = int(robot.getBasicTimeStep())\n gps.enable(1)\n unit.enable(1)\n \n max_speed = 6.28\n \n gx = 0.5\n # gy = 0.0\n gy = 0.5\n \n left_motor = robot.getDevice('left wheel motor')\n right_motor = robot.getDevice('right wheel motor')\n \n left_motor.setPosition(float('inf'))\n left_motor.setVelocity(0.0)\n \n right_motor.setPosition(float('inf'))\n right_motor.setVelocity(0.0)\n \n left_sensor = robot.getDevice('left wheel sensor')\n right_sensor= robot.getDevice('right wheel sensor')\n \n left_sensor.enable(1)\n right_sensor.enable(1)\n \n prox_sensors = []\n for ind in range(8):\n sensor_name='ps'+str(ind)\n prox_sensors.append(robot.getDevice(sensor_name))\n prox_sensors[ind].enable(timestep)\n\n flag = 0\n start = 5\n \n x_second = 0.0\n y_second = 0.0\n \n pss = [0,0]\n prev_pss = [0,0]\n dists = [0,0]\n \n WHEEL_RADIUS = 0.0205\n WHEEL_CIR = 2*3.14*WHEEL_RADIUS\n WHEEL_UNIT = WHEEL_CIR/max_speed\n WHEELS_DIST= 0.2\n \n robopos = [0, 0, 0]\n\n while robot.step(timestep) != -1:\n \n x = gps.getValues()[0]\n # ry = gps.getValues()[1]\n y = gps.getValues()[2] \n \n pss[0] = left_sensor.getValue()\n pss[1] = right_sensor.getValue()\n \n \n if(start==5):\n prev_pss[0] = pss[0]\n prev_pss[1] = pss[1]\n start = 1\n continue\n \n # print(\"--\",pss[0],\" \",pss[1])\n \n # print(\"--ps--\",pss[0],\" \",pss[1],\"----\")\n for ind in range(2):\n diff = pss[ind] - prev_pss[ind]\n if(diff < 0.001):\n diff = 0\n pss[ind] = prev_pss[ind]\n dists[ind] = diff * WHEEL_UNIT\n \n # print(\"--dists--\",dists[0],\" \",dists[1],\"----\",dists[0] - dists[1])\n \n v = (dists[0] + dists[1])/2.0\n w = (dists[0] - dists[1])/WHEELS_DIST\n \n \n dt = 1\n # print(\"w: \",w)\n # print(\"pos: \",robopos[2])\n robopos[2] += (w * dt)\n \n vx = v*math.cos(robopos[2])\n vy = v*math.sin(robopos[2])\n \n robopos[0] += (vx * dt)\n robopos[1] += (vy * dt)\n \n ang = robopos[2]*(180/3.14)\n # print(ang)\n \n for ind in range(2):\n prev_pss[ind] = pss[ind]\n\n # left_speed = max_speed\n # right_speed = -max_speed\n # left_motor.setVelocity(left_speed)\n # right_motor.setVelocity(right_speed) \n # continue\n \n if(start==1):\n x_first = gps.getValues()[0]\n y_first = gps.getValues()[2]\n x_1st = x_first\n y_1st = y_first\n start=2\n continue\n elif(start==2):\n x_second = gps.getValues()[0]\n y_second = gps.getValues()[2]\n start=2\n orient = give_alpha(x_first,y_first,gx,gy)\n if(abs(orient-ang)<5):\n start = 0\n left_speed = max_speed\n right_speed = -max_speed\n left_motor.setVelocity(left_speed)\n right_motor.setVelocity(right_speed) \n continue\n \n # angs = abs(unit.getRollPitchYaw()[2]*(180/math.pi))\n # print(angs)\n \n dist_to_line = get_dist_to_line(x,y,gx,gy,x_1st,y_1st)\n # print(dist_to_line)\n \n # for ind in range(8):\n # print(\"ind: {}, val: {}\".format(ind,prox_sensors[ind].getValue()))\n \n left_wall = prox_sensors[5].getValue()>80\n left_corner = prox_sensors[6].getValue()>80\n front_wall = prox_sensors[7].getValue()>80\n \n left_wall = prox_sensors[5].getValue()>80\n left_corner = prox_sensors[6].getValue()>80\n front_wall = prox_sensors[7].getValue()>80\n \n left_speed = max_speed\n right_speed = max_speed\n \n \n if(flag==2):\n left_speed = max_speed\n right_speed = -max_speed\n left_motor.setVelocity(left_speed)\n right_motor.setVelocity(right_speed)\n orient = give_alpha(x,y,gx,gy)\n # print(orient,\" \",ang,\" \",abs(orient-ang))\n if(abs(orient-ang)<20):\n start=5\n flag = 0\n continue\n else:\n continue\n \n if(front_wall):\n flag = 1\n \n if(flag==1):\n if(left_wall and dist_to_line<0.03):\n left_speed = max_speed\n right_speed = -max_speed\n x_second = x\n y_second = y\n flag = 2\n continue\n \n if front_wall:\n # print(\"turn right in place\")\n left_speed = max_speed\n right_speed = -max_speed\n else:\n if left_wall:\n # print(\"forward\")\n left_speed = max_speed\n right_speed = max_speed\n else:\n # print(\"turn left\")\n left_speed = max_speed/8\n right_speed = max_speed\n \n if left_corner:\n # print(\"came too close, drive right\")\n left_speed = max_speed\n right_speed = max_speed/8 \n \n left_motor.setVelocity(left_speed)\n right_motor.setVelocity(right_speed)\n\nif __name__ == \"__main__\":\n my_robot = Robot()\n my_gps = my_robot.getDevice(\"gps\")\n my_unit = my_robot.getDevice(\"inertial unit\")\n run_robot(my_robot,my_gps,my_unit)\n","sub_path":"Webots_BUG2/bug2/bug2/controllers/my_controller_bug/my_controller_bug.py","file_name":"my_controller_bug.py","file_ext":"py","file_size_in_byte":6100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"104988940","text":"# level order traversal.\n# connect each line. O(n) space to maintain a queue. \n# max number of nodes is 2 * 12 4096\n\n# recursion?\n# only local view. Do not know what the neighbor node is \n\"\"\" Recursion: time O(N) space(log n)\npreorder: root left right. Since I have access to root, I can connect left and right. \nFor each subtree, do the same. This will only connect nodes in the same level in the same subtree.\nSince it is a perfect tree, the only disconnection will occur during any right subtree for the leftest nodes. \nSo, after processing root's right and left, left.next = right. \nWhen recursing on left, left.right.next = left.next.left\n \"\"\"\n\n# Definition for a Node.\nclass Node:\n def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):\n self.val = val\n self.left = left\n self.right = right\n self.next = next\n\nclass Solution:\n def connect(self, root: 'Node') -> 'Node':\n if root is None:\n return \n if root.left is None:\n return # left and right are None\n else:\n root.left.next = root.right\n if root.next is not None:\n root.right.next = root.next.left\n self.connect(root.left)\n self.connect(root.right)\n return root","sub_path":"116. Populating Next Right Pointers in Each Node.py","file_name":"116. Populating Next Right Pointers in Each Node.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"577830884","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nleft=[]\r\nright=[]\r\nleft+=list(input().strip())\r\nnum = int(input())\r\nfor _ in range(num):\r\n li = list(map(str, input().split()))\r\n if li[0] == \"L\":\r\n if len(left)>0:\r\n right.append(left[-1])\r\n del left[-1]\r\n elif li[0] == \"D\":\r\n if len(right)>0:\r\n left.append(right[-1])\r\n del right[-1]\r\n elif li[0] == \"B\":\r\n if len(left)>0:\r\n del left[-1]\r\n else:\r\n left.append(li[1])\r\nright.reverse()\r\nprint(\"\".join(left)+\"\".join(right))","sub_path":"powerful104/1406.py","file_name":"1406.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"422604685","text":"from decimal import Decimal\nfrom django.conf import settings\nfrom store.models import Product\n\n\nclass ShoppingCart(object):\n # Initialize shopping cart\n def __init__(self, request):\n self.session = request.session\n cart = self.session.get(settings.CART_SESSION_ID)\n if not cart:\n cart = self.session[settings.CART_SESSION_ID] = {}\n self.cart = cart\n\n # Add products to cart and update qty\n def __add__(self, product, quantity=1, override_quantity=False):\n product_id = str(product.id)\n if product_id not in self.cart:\n self.cart[product_id]['quantity'] = quantity\n else:\n self.cart[product_id]['quantity'] += quantity\n self.save()\n\n # save sessions\n def save(self):\n self.session.modified = True\n\n # Remove products from cart\n def remove(self, product):\n product_id = str(product.id)\n if product_id in self.cart:\n del self.cart[product_id]\n self.save()\n\n # Iterate through products in cart to get them from the db\n def __iter__(self):\n product_ids = self.cart.keys()\n products = Product.objects.filter(id__in=product_ids)\n cart = self.cart.copy()\n for product in products:\n cart[str(product.id)] ['product'] = product\n for item in cart.values():\n item['price'] = Decimal(item['price'])\n item['total_price'] = item['price'] * item['quantity']\n yield item\n\n # Count items in the cart\n def __len__(self):\n return sum(item['quantity'] for item in self.cart.values())\n\n # Get total price of items in cart\n def get_total_price(self):\n return sum(Decimal(item['price']) * item['quantity'] for item\n in self.cart.values())\n\n # Clear cart session after or sth\n def clear(self):\n del self.session[settings.CART_SESSION_ID]\n self.save()","sub_path":"cart/cart.py","file_name":"cart.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"371049069","text":"import numpy as np\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom utility.logging.base_logger import BaseLogger\n\nclass SingleStepLogger(BaseLogger):\n def __init__(self, is_logging):\n super().__init__(is_logging)\n self.logger = None\n\n def log_acc_reward_single_planning_step(self, test_name, step, acc_reward, actions, std=None):\n title = f'Single_Step_test/{test_name}'\n self._add_scalar(title, acc_reward, step, self.logger)\n self._add_text(title, f'mean {round(acc_reward,5)} +- {std}, actions: {actions}',\n step=step, logger=self.logger)\n\n def start_log(self, name):\n if not self._is_logging:\n return\n self.logger = SummaryWriter(log_dir=f'{self.log_dir_root}/single_step/{name}')\n\n def commit_log(self):\n if not self._is_logging:\n return\n self.logger.flush()\n\n def end_log(self):\n if not self._is_logging:\n return\n self.commit_log()\n self.logger.close()\n","sub_path":"utility/logging/single_step_logger.py","file_name":"single_step_logger.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"411549965","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3350)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/r/prj/p/kuvalda/env/lib/python3.5/site-packages/kuvalda/__init__.py\n# Compiled at: 2016-12-11 11:35:50\n# Size of source mod 2**32: 348 bytes\nfrom __future__ import absolute_import\nfrom .kuvalda import *\n__all__ = ('validate', 'sanitize', 'sanitize_in_place', 'compose', 'list_of', 'mapping',\n 'default', 'optional', 'kind_of', 'ValidationError')\n__version__ = '0.1.0'\n__author__ = 'Roma Sokolov'\n__license__ = 'MIT'","sub_path":"pycfiles/kuvalda-0.1.0.macosx-10.12-x86_64.tar/__init__.cpython-35.py","file_name":"__init__.cpython-35.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"50238870","text":"from .abstractViewer import AbstractViewer\nfrom request.heroRequest import HeroRequest\nfrom helper.general import print_80\n\n\nclass HeroViewer(AbstractViewer):\n\n def __init__(self, userName, userId, heroId):\n\n self.userName = userName\n self.userId = userId\n self.heroId = heroId\n\n\n heroRequest = HeroRequest(self.userName, self.userId, self.heroId)\n self.hero = heroRequest.GetData()\n self.header = None\n self._set_header('NO INIT')\n\n def print_items(self):\n self._set_header('ITEMS')\n print(self.header)\n\n for itemType, item in self.hero['items'].items():\n print('{:<15}: {}'.format(itemType,\n item['name']))\n\n def print_skills(self):\n self._set_header('SKILLS')\n print(self.header)\n\n for skillName, description in self.hero['skills'].items():\n print('SKILL\\n{}\\n{}\\n\\nRUNE\\n{}'.format(skillName,\n description['skill'],\n description['rune']\n ))\n print_80('-')\n\n def print_stats(self):\n self._set_header('STATS')\n print(self.header)\n for name, stat in self.hero['stats'].items():\n print('{:<20}: {:<25}'.format(name, stat))\n\n","sub_path":"view/HeroViewer.py","file_name":"HeroViewer.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"596110335","text":"import logging\nimport os\n\nimport numpy as np\n\nfrom doom_py import DoomGame, Mode, Button, GameVariable, ScreenFormat, ScreenResolution, Loader\nfrom gym import error, spaces\nfrom gym.envs.doom import doom_env\n\nlogger = logging.getLogger(__name__)\n\nclass DoomHealthGatheringEnv(doom_env.DoomEnv):\n \"\"\"\n ------------ Training Mission 5 - Health Gathering ------------\n This map is a guide on how to survive by collecting health packs.\n It is a rectangle with green, acidic floor which hurts the player\n periodically. There are also medkits spread around the map, and\n additional kits will spawn at interval.\n\n Allowed actions:\n [12] - MOVE_FORWARD - Move forward - Values 0 or 1\n [13] - TURN_RIGHT - Turn right - Values 0 or 1\n [14] - TURN_LEFT - Turn left - Values 0 or 1\n Note: see controls.md for details\n\n Rewards:\n + 1 - Several times per second - Survive as long as possible\n -100 - Death penalty\n\n Goal: 1000 points\n Stay alive long enough to reach 1,000 points (~ 30 secs)\n\n Ends when:\n - Player is dead\n - Timeout (60 seconds - 2,100 frames)\n -----------------------------------------------------\n \"\"\"\n def __init__(self):\n super(DoomHealthGatheringEnv, self).__init__()\n package_directory = os.path.dirname(os.path.abspath(__file__))\n self.loader = Loader()\n self.game = DoomGame()\n self.game.load_config(os.path.join(package_directory, 'assets/health_gathering.cfg'))\n self.game.set_vizdoom_path(self.loader.get_vizdoom_path())\n self.game.set_doom_game_path(self.loader.get_freedoom_path())\n self.game.set_doom_scenario_path(self.loader.get_scenario_path('health_gathering.wad'))\n self.game.set_doom_map('map01')\n self.screen_height = 480 # Must match .cfg file\n self.screen_width = 640 # Must match .cfg file\n # 3 allowed actions [12, 13, 14] (must match .cfg file)\n self.action_space = spaces.HighLow(np.matrix([[0, 1, 0]] * 3))\n self.observation_space = spaces.Box(low=0, high=255, shape=(self.screen_height, self.screen_width, 3))\n self.game.set_window_visible(False)\n self.viewer = None\n self.game.init()\n self.game.new_episode()\n","sub_path":"gym/envs/doom/doom_health_gathering.py","file_name":"doom_health_gathering.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"286233411","text":"import contextlib\n\nimport etcd\n\n\nclass Lock(object):\n\n \"\"\"\n Lock object using etcd's lock module.\n \"\"\"\n\n def __init__(self, client, key, ttl=0, value=None):\n \"\"\"\n Initialize a lock object.\n\n Args:\n client (Client): etcd client to use for communication.\n\n key (string): key to lock.\n\n ttl (int): ttl (in seconds) for the lock to live.\n 0 or None to lock forever.\n\n value (mixed): value to store on the lock.\n \"\"\"\n self.client = client\n if not key.startswith('/'):\n key = '/' + key\n self.key = key\n self.ttl = ttl\n self.value = value\n self._index = None\n\n def __enter__(self):\n self.acquire()\n\n def __exit__(self, type, value, traceback):\n self.release()\n\n @property\n def _path(self):\n return u'/mod/v2/lock{}'.format(self.key)\n\n def acquire(self, timeout=None):\n \"\"\"Acquire the lock from etcd. Blocks until lock is acquired.\"\"\"\n params = {u'ttl': self.ttl}\n if self.value is not None:\n params[u'value'] = self.value\n\n res = self.client.api_execute(\n self._path, self.client._MPOST, params=params, timeout=timeout)\n self._index = res.data.decode('utf-8')\n return self\n\n def get(self):\n \"\"\"\n Get Information on the lock.\n This allows to operate on locks that have not been acquired directly.\n \"\"\"\n res = self.client.api_execute(self._path, self.client._MGET)\n if res.data:\n self.value = res.data.decode('utf-8')\n else:\n raise etcd.EtcdException('Lock is non-existent (or expired)')\n self._get_index()\n return self\n\n def _get_index(self):\n res = self.client.api_execute(\n self._path,\n self.client._MGET,\n {u'field': u'index'})\n if not res.data:\n raise etcd.EtcdException('Lock is non-existent (or expired)')\n self._index = res.data.decode('utf-8')\n\n def is_locked(self):\n \"\"\"Check if lock is currently locked.\"\"\"\n params = {u'field': u'index'}\n res = self.client.api_execute(\n self._path, self.client._MGET, params=params)\n return bool(res.data)\n\n def release(self):\n \"\"\"Release this lock.\"\"\"\n if not self._index:\n raise etcd.EtcdException(\n u'Cannot release lock that has not been locked')\n params = {u'index': self._index}\n res = self.client.api_execute(\n self._path, self.client._MDELETE, params=params)\n self._index = None\n\n def renew(self, new_ttl, timeout=None):\n \"\"\"\n Renew the TTL on this lock.\n\n Args:\n new_ttl (int): new TTL to set.\n \"\"\"\n if not self._index:\n raise etcd.EtcdException(\n u'Cannot renew lock that has not been locked')\n params = {u'ttl': new_ttl, u'index': self._index}\n res = self.client.api_execute(\n self._path, self.client._MPUT, params=params)\n","sub_path":"src/etcd/lock.py","file_name":"lock.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"268386764","text":"from datetime import datetime\nfrom datetime import timezone\nimport pytest\n\npytestmark = [\n pytest.mark.django_db,\n pytest.mark.freeze_time(\"2032-12-01 15:30Z\"),\n]\n\n\n@pytest.fixture(autouse=True)\ndef rebuild_tags(mocker):\n return mocker.patch(\"users.tasks.rebuild_tags.delay\")\n\n\ndef test_works(order):\n assert order.paid is None\n\n order.set_paid()\n order.refresh_from_db()\n\n assert order.paid == datetime(2032, 12, 1, 15, 30, tzinfo=timezone.utc)\n assert order.study is not None\n\n\ndef test_ships(order, course, user, ship):\n order.set_paid()\n\n ship.assert_called_once_with(course, to=user, order=order)\n\n\ndef test_update_user_tags(order, rebuild_tags):\n order.set_paid()\n\n rebuild_tags.assert_called_once_with(order.user.id)\n\n\ndef test_not_ships_if_order_is_already_paid(order, ship):\n order.setattr_and_save(\"paid\", datetime(2032, 12, 1, 15, 30, tzinfo=timezone.utc))\n\n order.set_paid()\n\n ship.assert_not_called()\n\n\ndef test_shipment_date(order):\n order.set_paid()\n order.refresh_from_db()\n\n assert order.shipped == datetime(2032, 12, 1, 15, 30, tzinfo=timezone.utc)\n\n\ndef test_order_is_not_shipped_again_if_already_shipped(order, ship):\n order.shipped = datetime(2000, 11, 12, 1, 13, tzinfo=timezone.utc)\n order.save()\n\n order.set_paid()\n\n ship.assert_not_called()\n\n\ndef test_shipment_date_is_not_reset(order, ship):\n order.shipped = datetime(2000, 11, 12, 1, 13, tzinfo=timezone.utc)\n order.save()\n\n order.set_paid()\n order.refresh_from_db()\n\n assert order.shipped == datetime(2000, 11, 12, 1, 13, tzinfo=timezone.utc)\n\n\ndef test_unpaid_date_is_reset(order):\n order.unpaid = datetime(2032, 12, 1, 15, 13, tzinfo=timezone.utc)\n order.save()\n\n order.set_paid()\n order.refresh_from_db()\n\n assert order.unpaid is None\n\n\ndef test_unpaid_date_is_not_reset_when_order_is_not_paid(order):\n order.paid = datetime(2032, 12, 1, 12, 13, tzinfo=timezone.utc)\n order.unpaid = datetime(2032, 12, 1, 15, 13, tzinfo=timezone.utc)\n order.save()\n\n order.set_paid()\n order.refresh_from_db()\n\n assert order.unpaid == datetime(2032, 12, 1, 15, 13, tzinfo=timezone.utc), \"unpaid has not been changed\"\n\n\ndef test_empty_item_does_not_break_things(order, ship):\n order.setattr_and_save(\"course\", None)\n\n order.set_paid()\n\n ship.assert_not_called()\n","sub_path":"src/orders/tests/order_shipping/tests_order_set_paid.py","file_name":"tests_order_set_paid.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"100792509","text":"from PySide2 import QtCore, QtWidgets, QtGui\n\n\nclass UsernameWindow(QtWidgets.QWidget):\n\n def __init__(self):\n super().__init__()\n self.setWindowTitle('Username')\n self.username = ''\n self.button = QtWidgets.QPushButton(\"Submit\")\n self.text = QtWidgets.QLabel(\"Please input a username...\")\n self.textbox = QtWidgets.QLineEdit(self)\n self.text.setAlignment(QtCore.Qt.AlignCenter)\n self.layout = QtWidgets.QVBoxLayout()\n self.layout.addWidget(self.text)\n self.layout.addWidget(self.textbox)\n self.layout.addWidget(self.button)\n self.setLayout(self.layout)\n self.button.clicked.connect(self.setUsername)\n\n def setUsername(self):\n self.username = self.textbox.text()\n self.close()\n","sub_path":"numberguess_oop/ui/UsernameWindow.py","file_name":"UsernameWindow.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"404937978","text":"# -*- coding: utf-8 -*-\n\nfrom Curve import getCurve\nfrom RSA import RsaMath\n\np, n, a, b, G = getCurve()\n\n\ndef is_on_Curve(point):\n \"Kiểm tra điểm point có thuộc đường cong hay không\"\n\n if point is None:\n return True\n\n x, y = point\n return (y ** 2 - x ** 3 - a * x - b) % p == 0\n\n\ndef negatives_Point(point):\n \"Tìm điểm nghịch đảo của point\"\n\n assert is_on_Curve(point)\n if point is None:\n # -0 = 0\n return None\n x, y = point\n\n return (x, -y % p)\n\n\ndef add_Points(point1, point2):\n \"Cộng hai điểm point1 và point2\"\n\n assert is_on_Curve(point1)\n assert is_on_Curve(point2)\n\n if point1 is None:\n # 0 + point2 = point2\n return point2\n\n if point2 is None:\n # point1 + 0 = point1\n return point1\n\n x1, y1 = point1\n x2, y2 = point2\n\n if x1 == x2 and y1 != y2:\n # point1 + (-point1) = 0\n return None\n if x1 == x2:\n # point1 == point2\n m = (3 * x1 * x1 + a) * RsaMath.mod_inverse(2 * y1, p)\n else:\n m = (y2 - y1) * RsaMath.mod_inverse((x2 - x1) % p, p)\n\n x3 = (m ** 2 - x1 - x2) % p\n y3 = (m * (x1 - x3) - y1) % p\n\n return (x3, y3)\n\n\ndef scalar_Point(k, point):\n \"Nhân k với điểm point k*point\"\n\n assert is_on_Curve(point)\n\n if k % n == 0 or point is None:\n return None\n elif k < 0:\n return scalar_Point(-k, negatives_Point(point))\n\n result = None\n addend = point\n\n while k:\n if k & 1:\n result = add_Points(result, addend)\n\n addend = add_Points(addend, addend)\n k >>= 1 # k = k / 2\n\n return result\n","sub_path":"ECC/EccMath.py","file_name":"EccMath.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"565135545","text":"import tensorflow as tf\nimport numpy as np\n#from tensorflow.keras.layers import * #Bidirectional, CuDNNLSTM, Conv2D, Dense, \\\n #Embedding, Concatenate, LeakyReLU, BatchNormalization\nfrom util.onehot import nchars\n \ndef Conv2D(m, kernel_size, strides=1, **kwargs):\n args = {\n 'padding': \"same\",\n 'kernel_initializer': \"he_normal\",\n 'use_bias': False\n }\n args.update(kwargs)\n return tf.keras.layers.Conv2D(m, kernel_size=3, strides=strides, **args)\n\n# modified from https://github.com/tensorflow/tensorflow/blob/r1.13/tensorflow/python/keras/layers/cudnn_recurrent.py\ndef bias_initializer_two(channels):\n \"\"\"This bias initializer has more or less the effect of unit_forget_bias,\n but the starting bias is two, not just one.\n Hopefully this will help with very-long-term memory.\"\"\"\n return tf.constant_initializer(\n [0.0] * (channels*5) + [2.0] * channels + [0.0] * (channels*2)\n )\n return f\ndef bias_initializer_two_cell(channels):\n \"\"\"This bias initializer has more or less the effect of unit_forget_bias,\n but the starting bias is two, not just one.\n Hopefully this will help with very-long-term memory.\"\"\"\n return tf.constant_initializer(\n [0.0] * (channels) + [2.0] * channels + [0.0] * (channels*2)\n )\n return f\n\ndef LSTM(channels, **kwargs):\n args = dict(\n return_sequences = True,\n unit_forget_bias = False, bias_initializer = bias_initializer_two(channels)\n )\n args.update(kwargs)\n return tf.keras.layers.CuDNNLSTM(channels, **args)\n\ndef LSTMCell(channels, **kwargs):\n args = dict(\n recurrent_activation = \"sigmoid\",\n unit_forget_bias = False, bias_initializer = bias_initializer_two_cell(channels)\n )\n args.update(kwargs)\n return tf.keras.layers.LSTMCell(channels, **args)\n \nfrom tensorflow.keras.layers import BatchNormalization, \\\n LeakyReLU, \\\n GlobalAveragePooling2D, \\\n Dense, \\\n Reshape, \\\n Dropout, \\\n Bidirectional\nfrom tensorflow.keras import Sequential\n\ndef _lstm(size):\n return Bidirectional(LSTM(size))\n\ndef _pyra(size):\n # I'm not sure what the paper means by \"projection layer.\"\n # Zhang et al has a thing where they staple two adjacent\n # frames together and project it down.\n # On the other hand, the Bidirectional layer also returns\n # twice the size by default, I think, so project all four\n # of those things down? Or two separate projections?\n def pyramids(zz):\n # assuming tf.shape(zz)[1] is zero mod BUFPAD\n zz = tf.concat([zz[:, ::2, :], zz[:, 1::2, :]], -1)\n return zz\n proj = Dense(size)\n batc = BatchNormalization()\n def fn(buf):\n return batc(proj(pyramids(buf)))\n return fn\n\ndef whatever_norm(z):\n mean, variance = tf.nn.moments(z, [z.shape.ndims - 1], keep_dims = True)\n return tf.nn.batch_normalization(z, mean, variance, None, None, variance_epsilon=1e-4)\n #return tf.contrib.layers.layer_norm(z, center=False, scale=False)\n\nclass initial_state(tf.keras.layers.Layer):\n def __init__(self, units,\n bias_initializer = \"zeros\",\n bias_regularizer = None,\n bias_constraint = None):\n super(initial_state, self).__init__()\n self.units = units\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.bias_regularizer = tf.keras.regularizers.get(bias_regularizer)\n self.bias_constraint = tf.keras.constraints.get(bias_constraint)\n self.input_spec = tf.keras.layers.InputSpec(min_ndim = 2)\n \n def build(self, input_shape):\n assert len(input_shape) >= 2\n input_dim = input_shape[-1]\n self.bias = self.add_weight(\n shape = (self.units,),\n initializer = self.bias_initializer,\n name = \"initial_state\",\n regularizer = self.bias_regularizer,\n constraint = self.bias_constraint\n )\n self.input_spec = tf.keras.layers.InputSpec(min_ndim = 2, axes = {-1: input_dim})\n self.built = True\n \n def call(self, inputs):\n return self.bias\n \n def compute_output_shape(self, input_shape):\n assert len(input_shape) >= 2\n assert input_shape[-1]\n output_shape = list(input_shape)\n output_shape[-1] = self.units\n return tuple(output_shape)\n \n def get_config(self):\n config = dict(\n units = self.units,\n bias_initializer = tf.keras.initializers.serialize(self.bias_initializer),\n bias_regularizer = tf.keras.initializers.serialize(self.bias_regularizer),\n bias_constraint = tf.keras.constraints.serialize(self.bias_constraint)\n )\n base_config = super(initial_state, self).get_config().copy()\n base_config.update(config)\n return base_config\n \nclass convblock(tf.keras.layers.Layer):\n def __init__(self, channels, stride=1):\n super(convblock, self).__init__()\n self.conv0 = Conv2D(channels, 1, stride)\n self.conv1 = Conv2D(channels, 3, stride)\n #self.bn1 = BatchNormalization(axis=3)\n self.act1 = LeakyReLU()\n self.conv2 = Conv2D(channels, 3)\n \n self.squeeze = tf.keras.Sequential([\n GlobalAveragePooling2D(),\n Reshape((1, 1, channels)),\n Dense(channels // 16, activation='relu', kernel_initializer='he_normal'),\n Dense(channels, activation='sigmoid', kernel_initializer='he_normal')\n ])\n \n #self.bn2 = BatchNormalization(axis=3)\n self.act2 = LeakyReLU()\n \n def _squeeze(self, out):\n squ = self.squeeze(out)\n return tf.keras.layers.multiply([squ, out])\n \n def call(self, x):\n shortcut = self.conv0(x)\n out = self.conv1(x)\n out = whatever_norm(out)\n #out = self.bn1(out)\n out = self.act1(out)\n out = self.conv2(out)\n out = self._squeeze(out)\n #out = self.bn2(out)\n out = self.act2(out)\n # atashi iya ne\n return whatever_norm(tf.keras.layers.add([shortcut, out]))\n \nclass encoder(tf.keras.layers.Layer):\n def __init__(self):\n super(encoder, self).__init__()\n # Different parts of the mel spectrum act differently,\n # so we should probably add location data to\n # the starting image, like rotating between two phases\n # or adding a spinner.\n WIDTH = 160\n \n self.conv1 = convblock(16, 1)\n self.conv2 = convblock(16, 2)\n self.conv3 = convblock(64, 1)\n self.conv4 = convblock(64, 2)\n\n self.flatten_spectrogram = Reshape((-1, 64 * WIDTH // 4))\n\n self.lstm1 = _lstm(512)\n self.drop1 = Dropout(0.1)\n \n self.lstm2 = _lstm(512)\n self.drop2 = Dropout(0.1)\n \n self.lstm3 = _lstm(512)\n self.drop3 = Dropout(0.1)\n \n def call(self, zz):\n for fn in [self.conv1, self.conv2, self.conv3, self.conv4]:\n zz = fn(zz)\n \n zz = self.flatten_spectrogram(zz)\n \n zz = self.lstm1(zz)\n zz = whatever_norm(zz)\n zz = self.drop1(zz)\n \n zz = self.lstm2(zz)\n zz = whatever_norm(zz)\n zz = self.drop2(zz)\n \n zz = self.lstm3(zz)\n zz = whatever_norm(zz)\n zz = self.drop3(zz)\n \n # should i be remembering the state?\n return zz\n\n def initialize_hidden_state(self, bsiz):\n pass\n\nclass AttentionCell(tf.keras.layers.Layer):\n def __init__(self, units):\n super(AttentionCell, self).__init__()\n self.units = units\n self.Wa = Dense(self.units)\n self.va = Dense(1)\n self.cell = LSTMCell(units)\n self.state_size = [units, units]\n \n def build(self, input_shape):\n self.built = True\n \n def call(self, inputs, states, constants):\n speech_encode, encodestate = constants\n print(\"states:\", states)\n state = self.Wa(states[0])\n state = tf.expand_dims(state, axis=1)\n attentions = self.va(tf.tanh(state + encodestate))\n attention_logits = tf.squeeze(attentions, axis=2)\n attention_weights = tf.nn.softmax(attention_logits)\n context = tf.einsum('ai,aij->aj',\n attention_weights, speech_encode)\n lstm_in = tf.concat([inputs, context], axis=1)\n lstmout, hiddenstate = self.cell(lstm_in, states)\n #print(hiddenstate)\n return lstmout, hiddenstate\n \nclass attend(tf.keras.layers.Layer):\n def __init__(self, units):\n super(attend, self).__init__()\n self.units = units\n \"\"\"Bahdanau attention:\n e_{ij} = v_a^T tanh(W_a s_{i-1} + U_a h_j)\n \\alpha_{ij} = exp(e_{ij}) / \\sum_j exp(e_{ij})\n \"\"\"\n self.Ua = Dense(self.units)\n self.cell = tf.keras.layers.RNN(AttentionCell(units),\n return_sequences = True)\n\n def call(self, inputs):\n secrets, speech_encode = inputs\n encodestate = self.Ua(speech_encode)\n return self.cell(secrets, constants=(speech_encode, encodestate))\n\n def get_encode_state(self, speech_encode):\n return self.Ua(speech_encode)\n\n def call_one(self, secrets, last_state, speech_encode):\n encodestate = self.Ua(speech_encode)\n return self.cell.cell.call(secrets, last_state, constants=(speech_encode, encodestate))\n #return self.cell.cell.call(secrets, last_state, constants)\n \nclass decoder(tf.keras.layers.Layer):\n def __init__(self):\n tf.keras.layers.Layer.__init__(self)\n self.units = 256\n self.embedding = Dense(self.units)\n self.attends1 = attend(256)\n self.attends2 = attend(256)\n self.map1 = Dense(256)\n self.map2 = Dense(256)\n self.distrib = Dense(nchars)\n \n def call(self, inputs):\n trans, speech_encode = inputs\n secrets = self.embedding(trans)\n print(tf.shape(secrets))\n print(tf.shape(speech_encode))\n out = self.attends1([secrets, speech_encode])\n out = whatever_norm(out)\n out = self.attends2([out, speech_encode])\n out = whatever_norm(out)\n out = self.map1(out)\n out = whatever_norm(out)\n out = self.map2(out)\n out = whatever_norm(out)\n out = self.distrib(out)\n out = tf.nn.softmax(out)\n return out\n\n def get_encode_state(self, speech_encode):\n return (self.attends1.get_encode_state(speech_encode),\n self.attends2.get_encode_state(speech_encode))\n\n def prepare_encode(self):\n placeholder = lambda *z: tf.placeholder(tf.float32, z)\n speech_encode = placeholder(1, None, None)\n return speech_encode, self.get_encode_state(speech_encode)\n\n def decode_one(self):\n placeholder = lambda *z: tf.placeholder(tf.float32, z)\n trans = placeholder(1, None)\n speech_encode = placeholder(1, None, None)\n last1 = [placeholder(1, 256), placeholder(1, 256)]\n last2 = [placeholder(1, 256), placeholder(1, 256)]\n inputs = [trans, speech_encode, (last1, last2)]\n\n secrets = self.embedding(trans)\n out, last1 = self.attends1.call_one(secrets, last1, speech_encode)\n out = whatever_norm(out)\n out, last2 = self.attends2.call_one(out, last2, speech_encode)\n out = whatever_norm(out)\n out = self.map1(out)\n out = whatever_norm(out)\n out = self.map2(out)\n out = whatever_norm(out)\n out = self.distrib(out)\n out = tf.nn.softmax(out)\n outputs = [out, (last1, last2)]\n return inputs, outputs\n\nclass EncoderDecoder(tf.keras.Model):\n def __init__(self):\n super(EncoderDecoder, self).__init__()\n self.enc = encoder()\n self.dec = decoder()\n \n def call(self, spectrum, transcript):\n speech_encode = self.enc(spectrum)\n return self.dec([transcript, speech_encode])\n \n def loss(self, transcript, decode):\n pass\n","sub_path":"nets/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":12152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"574347468","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 19 11:56:08 2021\n\n@author: duong\n\"\"\"\nimport sys\nif sys.version_info[0] >= 3:\n\tunicode = str\nimport json\nimport os, argparse\nfrom Bio import SeqIO\nimport random\nimport multiprocessing\nparser=argparse.ArgumentParser(prog='mergeClassification.py',\n\t\t\t\t\t\t\t usage=\"%(prog)s [options] -i classificationfiles -o output\",\n\t\t\t\t\t\t\t description='''Script that compares two classifications.''',\n\t\t\t\t\t\t\t epilog=\"\"\"Written by Duong Vu duong.t.vu@gmail.com\"\"\",\n )\n\nparser.add_argument('-i','--classificationfilenames', required=True, help='the classification filenames (the outputs of dnabarcoder.py classify, for example), separated by commas')\nparser.add_argument('-o','--out', required=True, help='The output.')\nparser.add_argument('-idcolumnname','--idcolumnname',default=\"ID\", help='the column name of sequence id in the classification file.')\nparser.add_argument('-scorecolumnname','--scorecolumnname',default=\"score\", help='the column name of similarity scores in the classification file.')\n\nargs=parser.parse_args()\noutput=args.out\n\ndef SelectBetterClassification(taxonomy1,taxonomy2):\n\ttaxonomy={}\n\trank=\"\"\n\tif taxonomy1[\"species\"]!=\"unidentified\" or taxonomy2[\"species\"]!=\"unidentified\":\n\t\trank=\"species\"\n\t\tif taxonomy1[\"species\"]!=\"unidentified\" and taxonomy2[\"species\"]!=\"unidentified\":\n\t\t\tif taxonomy1[\"score\"] >= taxonomy2[\"score\"]:\n\t\t\t\ttaxonomy =taxonomy1\n\t\t\telse:\n\t\t\t\ttaxonomy = taxonomy2\n\t\telif taxonomy1[\"species\"]!=\"unidentified\":\n\t\t\ttaxonomy = taxonomy1\n\t\telse:\n\t\t\ttaxonomy = taxonomy2\n\telif taxonomy1[\"genus\"]!=\"unidentified\" or taxonomy2[\"genus\"]!=\"unidentified\":\n\t\trank = \"genus\"\n\t\tif taxonomy1[\"genus\"]!=\"unidentified\" and taxonomy2[\"genus\"]!=\"unidentified\":\n\t\t\tif taxonomy1[\"score\"] >= taxonomy2[\"score\"]:\n\t\t\t\ttaxonomy =taxonomy1\n\t\t\telse:\n\t\t\t\ttaxonomy = taxonomy2\n\t\telif taxonomy1[\"genus\"]!=\"unidentified\":\n\t\t\ttaxonomy = taxonomy1\n\t\telse:\n\t\t\ttaxonomy = taxonomy2\n\telif taxonomy1[\"family\"]!=\"unidentified\" or taxonomy2[\"family\"]!=\"unidentified\":\n\t\trank = \"family\"\n\t\tif taxonomy1[\"family\"]!=\"unidentified\" and taxonomy2[\"family\"]!=\"unidentified\":\n\t\t\tif taxonomy1[\"score\"] >= taxonomy2[\"score\"]:\n\t\t\t\ttaxonomy =taxonomy1\n\t\t\telse:\n\t\t\t\ttaxonomy = taxonomy2\n\t\telif taxonomy1[\"family\"]!=\"unidentified\":\n\t\t\ttaxonomy = taxonomy1\n\t\telse:\n\t\t\ttaxonomy = taxonomy2\n\telif taxonomy1[\"order\"]!=\"unidentified\" or taxonomy2[\"order\"]!=\"unidentified\":\n\t\trank = \"order\"\n\t\tif taxonomy1[\"order\"]!=\"unidentified\" and taxonomy2[\"order\"]!=\"unidentified\":\n\t\t\tif taxonomy1[\"score\"] >= taxonomy2[\"score\"]:\n\t\t\t\ttaxonomy =taxonomy1\n\t\t\telse:\n\t\t\t\ttaxonomy = taxonomy2\n\t\telif taxonomy1[\"order\"]!=\"unidentified\":\n\t\t\ttaxonomy = taxonomy1\n\t\telse:\n\t\t\ttaxonomy = taxonomy2\n\telif taxonomy1[\"class\"]!=\"unidentified\" or taxonomy2[\"class\"]!=\"unidentified\":\n\t\trank = \"class\"\n\t\tif taxonomy1[\"class\"]!=\"unidentified\" and taxonomy2[\"class\"]!=\"unidentified\":\n\t\t\tif taxonomy1[\"score\"] >= taxonomy2[\"score\"]:\n\t\t\t\ttaxonomy =taxonomy1\n\t\t\telse:\n\t\t\t\ttaxonomy = taxonomy2\n\t\telif taxonomy1[\"class\"]!=\"unidentified\":\n\t\t\ttaxonomy = taxonomy1\n\t\telse:\n\t\t\ttaxonomy = taxonomy2\n\telif taxonomy1[\"phylum\"]!=\"unidentified\" or taxonomy2[\"phylum\"]!=\"unidentified\":\n\t\trank = \"phylum\"\n\t\tif taxonomy1[\"phylum\"]!=\"unidentified\" and taxonomy2[\"phylum\"]!=\"unidentified\":\n\t\t\tif taxonomy1[\"score\"] >= taxonomy2[\"score\"]:\n\t\t\t\ttaxonomy =taxonomy1\n\t\t\telse:\n\t\t\t\ttaxonomy = taxonomy2\n\t\telif taxonomy1[\"phylum\"]!=\"unidentified\":\n\t\t\ttaxonomy = taxonomy1\n\t\telse:\n\t\t\ttaxonomy = taxonomy2\n\telif taxonomy1[\"kingdom\"]!=\"unidentified\" or taxonomy2[\"kingdom\"]!=\"unidentified\":\n\t\trank = \"kingdom\"\n\t\tif taxonomy1[\"kingdom\"]!=\"unidentified\" and taxonomy2[\"kingdom\"]!=\"unidentified\":\n\t\t\tif taxonomy1[\"score\"] >= taxonomy2[\"score\"]:\n\t\t\t\ttaxonomy =taxonomy1\n\t\t\telse:\n\t\t\t\ttaxonomy = taxonomy2\n\t\telif taxonomy1[\"kingdom\"]!=\"unidentified\":\n\t\t\ttaxonomy = taxonomy1\n\t\telse:\n\t\t\ttaxonomy = taxonomy2\n\telse:\n\t\ttaxonomy=taxonomy2\n\ttaxonomy[\"rank\"]=rank\n\treturn taxonomy\n\ndef LoadClassification(classificationdict,classificationfilename):\n\terror=False\n\tclassificationfile=open(classificationfilename)\n\theader=next(classificationfile)\n\tp_s=-1\n\tp_g=-1\n\tp_f=-1\n\tp_o=-1\n\tp_c=-1\n\tp_p=-1\n\tp_k=-1\n\tp_score=-1\n\tp_id=-1\n\tp_cutoff=-1\n\tp_confidence=-1\n\tp_refid=-1\n\tp_rank=-1\n\ti=0\n\tfor text in header.split(\"\\t\"):\n\t\ttext=text.rstrip()\n\t\tif text.lower()==args.idcolumnname.lower():\n\t\t\tp_id=i\n\t\telif \"reference id\" in text.lower() or \"referenceid\" in text.lower() or \"refid\" in text.lower():\n\t\t\tp_refid=i\n\t\telif text.lower()==\"cutoff\":\n\t\t\tp_cutoff=i\n\t\telif text.lower()==\"confidence\":\n\t\t\tp_confidence=i\n\t\telif text.lower() == \"rank\":\n\t\t\tp_rank = i\n\t\telif text.lower()==\"species\":\n\t\t\tp_s=i\n\t\telif text.lower()==\"genus\":\n\t\t\tp_g=i\n\t\telif text.lower()==\"family\":\n\t\t\tp_f=i\n\t\telif text.lower()==\"order\":\n\t\t\tp_o=i\n\t\telif text.lower()==\"class\":\n\t\t\tp_c=i\n\t\telif text.lower()==\"phylum\":\n\t\t\tp_p=i\n\t\telif text.lower()==\"kingdom\":\n\t\t\tp_k=i\n\t\telif text.lower()== args.scorecolumnname:\n\t\t\tp_score=i\n\t\ti=i+1\n\tif p_id==-1:\n\t\tprint(\"Please check the id column name in the classification file \" + classificationfilename + \".\")\n\t\terror=True\n\tfor line in classificationfile:\n\t\ttexts=line.split(\"\\t\")\n\t\tseqid=texts[p_id].rstrip()\n\t\tscore = 0\n\t\tcutoff=0\n\t\tconfidence=0\n\t\trefid=\"\"\n\t\trank=\"\"\n\t\tif p_score > -1 and p_score < len(texts):\n\t\t\tscore = float(texts[p_score].rstrip())\n\t\tif p_cutoff > -1 and p_cutoff < len(texts):\n\t\t\tcutoff = float(texts[p_cutoff].rstrip())\n\t\tif p_confidence > -1 and p_confidence < len(texts):\n\t\t\tconfidence = float(texts[p_confidence].rstrip())\n\t\tif p_rank > -1 and p_rank < len(texts):\n\t\t\trank = texts[p_rank].rstrip()\n\t\tif p_refid > -1 and p_refid < len(texts):\n\t\t\trefid = texts[p_refid].rstrip()\n\t\tspecies=\"unidentified\"\n\t\tif p_s >-1 and p_s < len(texts):\n\t\t\tspecies = texts[p_s].rstrip()\n\t\tgenus = \"unidentified\"\n\t\tif p_g > -1 and p_g < len(texts):\n\t\t\tgenus = texts[p_g].rstrip()\n\t\tfamily = \"unidentified\"\n\t\tif p_f > -1 and p_f < len(texts):\n\t\t\tfamily= texts[p_f].rstrip()\n\t\torder = \"unidentified\"\n\t\tif p_o > -1 and p_o < len(texts):\n\t\t\torder = texts[p_o].rstrip()\n\t\tbioclass = \"unidentified\"\n\t\tif p_c > -1 and p_c < len(texts):\n\t\t\tbioclass = texts[p_c].rstrip()\n\t\tphylum = \"unidentified\"\n\t\tif p_p > -1 and p_p < len(texts):\n\t\t\tphylum = texts[p_p].rstrip()\n\t\tkingdom=\"unidentified\"\n\t\tif p_k > -1 and p_k < len(texts):\n\t\t\tkingdom = texts[p_k].rstrip()\n\t\ttaxonomy = {}\n\t\ttaxonomy.setdefault(\"score\", score)\n\t\ttaxonomy.setdefault(\"cutoff\", cutoff)\n\t\ttaxonomy.setdefault(\"confidence\", confidence)\n\t\ttaxonomy.setdefault(\"rank\", rank)\n\t\ttaxonomy.setdefault(\"referenceid\", refid)\n\t\ttaxonomy.setdefault(\"species\", species)\n\t\ttaxonomy.setdefault(\"genus\", genus)\n\t\ttaxonomy.setdefault(\"family\", family)\n\t\ttaxonomy.setdefault(\"order\", order)\n\t\ttaxonomy.setdefault(\"class\", bioclass)\n\t\ttaxonomy.setdefault(\"phylum\", phylum)\n\t\ttaxonomy.setdefault(\"kingdom\", kingdom)\n\t\tif seqid in classificationdict.keys():\n\t\t\texistingtaxonomy=classificationdict[seqid]\n\t\t\ttaxonomy=SelectBetterClassification(existingtaxonomy,taxonomy)\n\t\t\tclassificationdict[seqid]=taxonomy\n\t\telse:\n\t\t\tclassificationdict.setdefault(seqid, taxonomy)\n\tclassificationfile.close()\n\treturn error\n\ndef SaveClassification(classificationdict,output):\n\toutputfile=open(output,\"w\")\n\toutputfile.write(\"id\\tReferenceID\\tkingdom\\tphylum\\tclass\\torder\\tfamily\\tgenus\\tspecies\\trank\\tscore\\tcutoff\\tconfidence\\n\")\n\tfor id in classificationdict.keys():\n\t\ttaxonomy=classificationdict[id]\n\t\toutputfile.write(id + \"\\t\")\n\t\toutputfile.write(taxonomy[\"referenceid\"] + \"\\t\")\n\t\toutputfile.write(taxonomy[\"kingdom\"] + \"\\t\")\n\t\toutputfile.write(taxonomy[\"phylum\"] + \"\\t\")\n\t\toutputfile.write(taxonomy[\"class\"] + \"\\t\")\n\t\toutputfile.write(taxonomy[\"order\"] + \"\\t\")\n\t\toutputfile.write(taxonomy[\"family\"] + \"\\t\")\n\t\toutputfile.write(taxonomy[\"genus\"] + \"\\t\")\n\t\toutputfile.write(taxonomy[\"species\"] + \"\\t\")\n\t\toutputfile.write(taxonomy[\"rank\"] + \"\\t\")\n\t\toutputfile.write(str(taxonomy[\"score\"]) + \"\\t\")\n\t\toutputfile.write(str(taxonomy[\"cutoff\"]) + \"\\t\")\n\t\toutputfile.write(str(taxonomy[\"confidence\"]) + \"\\n\")\n\tprint(\"The merged classification is saved in file \" + output + \".\")\n###########MAIN########################\nclassificationnames=[]\nif \",\" in args.classificationfilenames:\n\tclassificationnames = args.classificationfilenames.split(\",\")\nelse:\n\tclassificationnames.append(args.classificationfilenames)\nclassificationdict={}\n#merge classifications\nfor classificationname in classificationnames:\n\terror=LoadClassification(classificationdict,classificationname)\n#save merged classification\nSaveClassification(classificationdict,output)\n\n","sub_path":"aidscripts/mergeClassifications.py","file_name":"mergeClassifications.py","file_ext":"py","file_size_in_byte":8534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"558506414","text":"import sys\n\nimport pytest\n\nfrom briefcase.platforms.windows.msi import WindowsMSICreateCommand\n\n\n@pytest.mark.parametrize(\n 'version, version_triple', [\n ('1', '1.0.0'),\n ('1.2', '1.2.0'),\n ('1.2.3', '1.2.3'),\n ('1.2.3.4', '1.2.3'),\n ('1.2.3a4', '1.2.3'),\n ('1.2.3b5', '1.2.3'),\n ('1.2.3rc6', '1.2.3'),\n ('1.2.3.dev7', '1.2.3'),\n ('1.2.3.post8', '1.2.3'),\n ]\n)\ndef test_version_triple(first_app_config, tmp_path, version, version_triple):\n command = WindowsMSICreateCommand(base_path=tmp_path)\n\n first_app_config.version = version\n context = command.output_format_template_context(first_app_config)\n\n assert context['version_triple'] == version_triple\n\n\ndef test_explicit_version_triple(first_app_config, tmp_path):\n command = WindowsMSICreateCommand(base_path=tmp_path)\n\n first_app_config.version = '1.2.3a1'\n first_app_config.version_triple = '2.3.4'\n\n context = command.output_format_template_context(first_app_config)\n\n # Explicit version triple is used.\n assert context['version_triple'] == '2.3.4'\n\n\ndef test_guid(first_app_config, tmp_path):\n \"A preictable GUID will be generated from the bundle.\"\n command = WindowsMSICreateCommand(base_path=tmp_path)\n\n context = command.output_format_template_context(first_app_config)\n\n assert context['guid'] == 'd666a4f1-c7b7-52cc-888a-3a35a7cc97e5'\n\n\ndef test_explicit_guid(first_app_config, tmp_path):\n \"If a GUID is explicitly provided, it is used.\"\n command = WindowsMSICreateCommand(base_path=tmp_path)\n\n first_app_config.guid = 'e822176f-b755-589f-849c-6c6600f7efb1'\n context = command.output_format_template_context(first_app_config)\n\n # Explicitly provided GUID is used.\n assert context['guid'] == 'e822176f-b755-589f-849c-6c6600f7efb1'\n\n\ndef test_support_package_url(first_app_config, tmp_path):\n command = WindowsMSICreateCommand(base_path=tmp_path)\n\n # Set some properties of the host system for test purposes.\n command.host_arch = 'wonky'\n command.platform = 'tester'\n\n # This test result assumes we're on ARM64. However, we will be\n # on almost every Windows box (and definite will be in CI)\n assert command.support_package_url_query == [\n ('platform', 'tester'),\n ('version', '3.{minor}'.format(minor=sys.version_info.minor)),\n ('arch', 'amd64'),\n ]\n","sub_path":"tests/platforms/windows/msi/test_create.py","file_name":"test_create.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"418880329","text":"# -*- coding: utf-8 -*-\nimport os\nHOME = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'bin')\nrtenv = 'product'\n\nLOGFILE = {\n 'root': {\n 'filename': {\n 'DEBUG': os.path.join(HOME, '../log/house_api.log'),\n 'ERROR': os.path.join(HOME, '../log/house_api.error.log'),\n }\n }\n}\n# LOGFILE = 'stdout'\n\ndatabase = {\n 'house_core':{\n 'engine': 'pymysql',\n 'passwd': '123456',\n 'charset': 'utf8',\n 'db': 'house_core',\n 'idle_timeout': 10,\n 'host': '127.0.0.1',\n 'user': 'xuncheng',\n 'port': 3306,\n 'conn': 6\n },\n\n}\n\n# web config\n# URLS配置\nURLS = None\n# 静态路径配置\nSTATICS = {'/static/':'/static/'}\n# 模板配置\nTEMPLATE = {\n 'cache': True,\n 'path': '',\n 'tmp': os.path.join(HOME, '../tmp'),\n}\n# 中间件\nMIDDLEWARE = ()\n# WEB根路径\nDOCUMENT_ROOT = HOME\n# 页面编码\nCHARSET = 'UTF-8'\n# APP就是一个子目录\nAPPS = ()\nDATABASE = {}\n# 调试模式: True/False\n# 生产环境必须为False\nDEBUG = True\n# 模版路径\ntemplate = os.path.join(HOME, 'template')\n\n# 服务地址\nHOST = '0.0.0.0'\n# 服务端口\nPORT = 8085\n# 图标链接基地址\nMIS_DOMAIN = 'mis.xunchengfangfu.com'\nMIS_PORT = ''\nMIS_STATIC_PATH = '/mis/static/upload/icon/'\n# BASE_URL = 'http://' + MIS_DOMAIN + ':' + str(MIS_PORT) + MIS_STATIC_PATH\nBASE_URL = 'http://' + MIS_DOMAIN + MIS_STATIC_PATH\nTEXT_DETAIL_PREFIX_URL = '/v1/api/page/text/detail.html'\n# 个人公积金贷款利率名称\nACCUMULATION_LOAN = 'accumulation_fund_loan'\n# 商业贷款利率名称\nBUSINESS_LOAN = 'business_loan'\nTEXT_DETAIL_PREFIX_URL = '/v1/api/page/text/detail.html'\n# 支付参数\nAPI_KEY = ''\nAPPID = ''\nMCH_ID = ''\nSECRET = ''\n# 协议文件\nAGREEMENT = '/home/xunchengfangfu/house/house_mis/data/agreement.html'\n# 滚动文字文案\nBANNER_TEXT = '/home/xunchengfangfu/house/house_mis/data/banner.txt'\n","sub_path":"conf/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"373158791","text":"#Annie Kelly\n\nimport random\nimport time\n\ndef selsort(A, n):\n\tstart_time = time.time()\n\tfor i in range (0, n-2):\n\t\ti_min = 0\n\t\tfor j in range (i+1, n-1):\n\t\t\tif (A[j] < A[i_min]):\n\t\t\t\ti_min = j\n\t\ttemp = A[i]\n\t\tA[i] = A[i_min]\n\t\tA[i_min] = temp\n\tprint(\"%s seconds\" %(time.time()-start_time))\n\ndef genlist(n):\n\tfor j in range (0,10):\n\t\ta = []\n\t\tfor i in range(0, n-1):\n\t\t\tx = random.randint(0,1000000)\n\t\t\ta.append(x)\n\t\tselsort(a, n)\n\t\t\ngenlist(1000)\ngenlist(10000)\ngenlist(100000)\ngenlist(1000000)\n","sub_path":"HW1/Kelly_Annie_HW1.py","file_name":"Kelly_Annie_HW1.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"648672611","text":"import heapq\nimport numpy as np\n\nclass Solution:\n def kthSmallest(self, matrix: List[List[int]], k: int) -> int:\n one_matrix = np.asarray(matrix.copy()).flatten().tolist()\n heapq.heapify(one_matrix)\n \n \n for _ in range(k - 1):\n heapq.heappop(one_matrix)\n \n return heapq.heappop(one_matrix)\n \n# pointers = [0 for _ in range(len(matrix))]\n \n# while k:\n# ret = float('inf')\n# selected_pointer = 0\n# for i, row in enumerate(matrix):\n# if len(row) - 1 < pointers[i]: continue\n# if row[pointers[i]] < ret:\n# ret = row[pointers[i]]\n# selected_pointer = i\n \n# pointers[selected_pointer] += 1\n# k -= 1\n# return ret\n","sub_path":"378_kth-smallest-element-in-a-sorted-matrix.py","file_name":"378_kth-smallest-element-in-a-sorted-matrix.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"109468209","text":"#------------------------------------------------------------------------------\n# Copyright (c) 2012, Enthought, Inc.\n# All rights reserved.\n#------------------------------------------------------------------------------\nimport bisect\n\nfrom .wx_image import WXImage\n\nfrom ....noncomponents.abstract_icon import AbstractTkIcon\n\n\ndef _scale_size(from_size, to_size):\n \"\"\" An internal function used to scale sizes.\n\n Scales from_size to fit within the confines of to_size as closely as\n possible while maintaining the aspect ratio of from_size.\n\n Parameters\n ----------\n from_size : (width, height)\n The width, height tuple of integers of the scaling size.\n \n to_size : (width, height)\n The width, height tuple of integers of the bounding size.\n\n Returns\n -------\n result : (width, height)\n The width, heigh of the properly scaled size.\n \n \"\"\"\n from_width, from_height = from_size\n to_width, to_height = to_size\n ratio = from_width / float(from_height)\n if abs(from_width - to_width) < abs(from_height - to_height):\n out_width = to_width\n out_height = int(out_width / ratio)\n else:\n out_height = to_height\n out_width = int(out_height * ratio)\n return (out_width, out_height)\n\n\nclass _IconEntry(object):\n \"\"\" An object which holds information about an icon. \n\n Instances of this class are used internally by the WXIcon class to\n implement its icon data store.\n\n \"\"\"\n def __init__(self, image, mode, state):\n \"\"\" Initialize an _IconEntry.\n\n Parameters\n ----------\n image : WXImage\n The WXImage instance held by this icon entry.\n \n mode : string\n The mode of the given image.\n \n state : string\n The state of the given image.\n \n \"\"\"\n self.image = image\n self.mode = mode\n self.state = state\n\n def __lt__(self, other):\n \"\"\" Overridden '<' operator. The comparison is based on the \n size of the entry and allows for instances to be sorted.\n\n \"\"\"\n return self.size < other.size\n\n @property\n def size(self):\n \"\"\" Returns the (width, height) size of the underlying image.\n\n \"\"\"\n return self.image.size\n\n @property\n def area(self):\n \"\"\" Returns the width * height area of the entry.\n\n \"\"\"\n size = self.size\n return size[0] * size[1]\n\n\nclass WXIcon(AbstractTkIcon):\n \"\"\" A Wx implementation of AbstractTkIcon\n \n \"\"\"\n def __init__(self):\n \"\"\" Initialize a WxIcon.\n\n \"\"\"\n # The list of IconEntry instances for this icon. The are kept\n # in sorted order according to their size.\n self._entries = []\n\n #--------------------------------------------------------------------------\n # Private Methods\n #--------------------------------------------------------------------------\n def _match(self, size, mode, state):\n \"\"\" A private method which searches for a suitable _IconEntry\n for the given size, mode, and state. Returns None if no match\n can be made.\n\n \"\"\"\n area = size[0] * size[1]\n match = None\n for entry in self._entries:\n if entry.mode == mode and entry.state == state:\n if entry.area >= area:\n match = entry\n break\n return match\n \n def _best_match(self, size, mode, state):\n \"\"\" A private method which returns the best _IconEntry match for\n the request, or None if no match can be made.\n\n \"\"\"\n # Try the default match and then cycle through the various states\n # in an intelligent fashion trying to find any other match. This \n # logic is more or less identical to QIcon's internal logic.\n match = self._match(size, mode, state)\n if match is None:\n opp_state = 'off' if state == 'on' else 'on'\n if mode == 'disabled' or mode == 'selected':\n opp_mode = 'selected' if mode == 'disabled' else 'disabled'\n new_info = (\n ('normal', state),\n ('active', state),\n (mode, opp_state),\n ('normal', opp_state),\n ('active', opp_state),\n (opp_mode, state),\n (opp_mode, opp_state),\n )\n else:\n opp_mode = 'active' if mode == 'normal' else 'normal'\n new_info = (\n (opp_mode, state),\n (mode, opp_state),\n (opp_mode, opp_state),\n ('disabled', state),\n ('selected', state),\n ('disabled', opp_state),\n ('selected', opp_state),\n )\n for new_mode, new_state in new_info:\n match = self._match(size, new_mode, new_state)\n if match is not None:\n break\n return match\n\n #--------------------------------------------------------------------------\n # Abstract API Implementation\n #--------------------------------------------------------------------------\n @classmethod\n def from_file(cls, path):\n \"\"\" Create a new icon from a file on disk.\n\n Parameters\n ----------\n path : string\n The path to the image to use for the default mode and state.\n \n Returns\n -------\n result : AbstractTkIcon\n An new icon instance.\n\n \"\"\"\n img = WXImage.from_file(path)\n return cls.from_image(img)\n \n @classmethod\n def from_image(cls, image):\n \"\"\" Create a new icon from an instance of AbstractTkImage using\n the default 'normal' and 'on' states.\n \n Parameters\n ----------\n image : AbstractTkImage\n An appropriate instance of AbstractTkImage to use for the\n default mode and state.\n\n Returns\n -------\n result : AbstractTkIcon\n An new icon instance.\n \n \"\"\"\n if not isinstance(image, WXImage):\n msg = 'Image must be an instance of WXImage. Got %s instead.'\n raise TypeError(msg % type(image))\n new_icon = cls()\n new_icon.add_image(image, 'normal', 'on')\n return new_icon\n\n def get_image(self, size, mode='normal', state='on'):\n \"\"\" Get an appropriate image instance for the requested size, \n mode and state.\n \n Parameters\n ----------\n size : (width, height)\n The size of the requested image. The returned image may\n be smaller, but will never be larger than this size.\n \n mode : string\n The mode of the requested image.\n \n state : string\n The state of the requested image.\n \n Returns\n -------\n result : AbstractTkImage\n An appropriate image instance.\n\n \"\"\"\n match = self._best_match(size, mode, state)\n if match is None:\n res = WXImage()\n else:\n res = match.image\n actual_size = match.size\n if actual_size != size:\n scaled_size = _scale_size(actual_size, size)\n res = res.scale(scaled_size)\n return res\n\n def add_image(self, image, mode='normal', state='on'):\n \"\"\" Add an image instance for use by the icon with the given \n mode and state.\n \n Parameters\n ----------\n image : AbstractTkImage\n An appropriate image instance.\n \n mode : string, optional\n The mode of the image. The default is 'normal'.\n \n state : string, optional\n The state of the image. The default is 'on'.\n \n \"\"\"\n if not isinstance(image, WXImage):\n msg = 'Image must be an instance of WXImage. Got %s instead.'\n raise TypeError(msg % type(image))\n size = image.size\n match = self._match(size, mode, state)\n if match is not None:\n match.image = image\n else:\n entry = _IconEntry(image, mode, state)\n bisect.insort(self._entries, entry)\n\n def actual_size(self, size, mode='normal', state='on'):\n \"\"\" Returns the actual size for the requested size, mode, and\n state.\n\n The returned size may be smaller but will never be larger.\n\n Parameters\n ----------\n size : (width, height)\n The size of the requested image. The returned image may\n be smaller, but will never be larger than this size.\n \n mode : string, optional\n The mode of the requested image. The default is 'normal'.\n \n state : string, optional\n The state of the requested image. The default is 'on'.\n \n Returns\n -------\n result : (width, height)\n The actual size for the requested size. If no suitable\n match can be found, the result will be (-1, -1).\n\n \"\"\"\n match = self._best_match(size, mode, state)\n if match is None:\n res = (-1, -1)\n else:\n res = match.size\n if res != size:\n res = _scale_size(res, size)\n return res\n \n def available_sizes(self, mode='normal', state='on'):\n \"\"\" Returns the available image sizes for the given mode and \n state.\n \n Sizes other than these may be requested, but the implemetation \n may scale down the image on the fly to the requested size.\n \n Parameters\n ----------\n mode : string, optional\n The requested image mode. The default is 'normal'.\n \n state : string, optional\n The requested image state. The default is 'on'.\n \n \"\"\"\n sizes = []\n for entry in self._entries:\n if entry.mode == mode and entry.state == state:\n sizes.append(entry.size)\n return sizes\n\n","sub_path":"enaml/backends/wx/noncomponents/wx_icon.py","file_name":"wx_icon.py","file_ext":"py","file_size_in_byte":10213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"59943931","text":"#31 symbols in each line\n\ndef go_right(coeff, step):\n count = 0\n row = 0 \n for x in mylist:\n if not(coeff/step*row).is_integer():\n row+=1\n continue\n pos = int((coeff/step*row)%31)\n if x[pos] == '#':\n count += 1 \n row += 1\n return count\n\n#Part1\n\nf = open(\"data3.txt\", \"r\")\n\nmylist = []\n\nfor x in f:\n mylist.append(x)\n \nprint(go_right(3, 1))\n\n# Solution is: 203\n# ----------------\n# Part2\n\nprint(go_right(1, 1)*go_right(3, 1)*go_right(5, 1)*go_right(7, 1)*go_right(1, 2))\n\n# Solution is: 3316272960\n","sub_path":"Day3/day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"507781617","text":"from decimal import Decimal\nfrom typing import Tuple\n\nfrom cockroachdb.modules.models import Warehouse, District, Customer\nfrom cockroachdb.modules.models.base import BaseModel\nfrom cockroachdb.modules.transactions.base import BaseTransaction\n\n\nclass PaymentTransaction(BaseTransaction):\n \"\"\"\n Payment transaction\n\n Example:\n payment = PaymentTransaction((1, 1, 1), Decimal('99.99'))\n payment.run()\n \"\"\"\n\n def __init__(\n self,\n customer_identifier: Tuple[int, int, int],\n payment_amount: Decimal,\n ):\n \"\"\"\n Initiate a transaction for processing customer payment\n :param customer_identifier: customer identifier in the form (warehouse, district, customer)\n :param payment_amount: payment amount in Decimal format\n \"\"\"\n super().__init__()\n (\n self.warehouse_id,\n self.district_id,\n self.customer_id,\n ) = customer_identifier\n self.payment_amount = payment_amount\n\n def _execute(self) -> Tuple[Customer, District, Warehouse]:\n \"\"\"\n Execute new payment transaction\n :return: relevant model instance information\n \"\"\"\n # Update warehouse, district YTD amount\n Warehouse.update(ytd=Warehouse.ytd + self.payment_amount).where(\n Warehouse.id == self.warehouse_id\n ).execute()\n District.update(ytd=District.ytd + self.payment_amount).where(\n (District.id == self.district_id)\n & (District.warehouse_id == self.warehouse_id)\n ).execute()\n\n # Update customer balance, YTD payment and payment count\n Customer.update(\n balance=Customer.balance - self.payment_amount,\n ytd_payment=Customer.ytd_payment + self.payment_amount,\n payment_count=Customer.payment_count + 1,\n ).where(\n (Customer.warehouse_id == self.warehouse_id)\n & (Customer.district_id == self.district_id)\n & (Customer.id == self.customer_id)\n ).execute()\n\n # Consolidate updated results\n customer: Customer = Customer.get_by_id(\n (self.warehouse_id, self.district_id, self.customer_id)\n )\n district: District = District.get_by_id(\n (self.warehouse_id, self.district_id)\n )\n warehouse: Warehouse = Warehouse.get_by_id(self.warehouse_id)\n\n return customer, district, warehouse\n\n def _output_result(\n self, customer: Customer, district: District, warehouse: Warehouse\n ):\n \"\"\"\n Output execution result\n :param customer: Customer model instance\n :param district: District model instance\n :param warehouse: Warehouse model instance\n :return: None\n \"\"\"\n\n def format_address(model: BaseModel):\n return \", \".join(\n filter(\n lambda x: x is not None,\n [\n model.street_1,\n model.street_2,\n model.city,\n model.state,\n model.zip,\n ],\n )\n )\n\n identifier = (\n f\"({self.warehouse_id}, {self.district_id}, {self.customer_id})\"\n )\n self.print(\n f\"New Payment Details from Customer {identifier}:\", is_heading=True\n )\n self.print_table(\n columns=[\n {\"header\": \"Name\"},\n {\"header\": \"Address\"},\n {\"header\": \"Phone\"},\n {\"header\": \"Since\"},\n {\"header\": \"Credit\"},\n {\"header\": \"Credit Limit\"},\n {\"header\": \"Discount\"},\n {\"header\": \"Balance\"},\n ],\n rows=[\n [\n customer.formatted_name,\n format_address(customer),\n customer.phone_number,\n customer.since.strftime(\"%b %d, %Y\"),\n customer.credit,\n None\n if customer.credit_limit is None\n else \"{:.2%}\".format(customer.credit_limit),\n \"{:.2%}\".format(customer.discount),\n \"{:.2f}\".format(customer.balance),\n ]\n ],\n )\n self.print(f\"Warehouse Address: {format_address(warehouse)}\")\n self.print(f\"District Address: {format_address(district)}\")\n self.print(f\"Payment Amount: {'{:.2f}'.format(self.payment_amount)}\")\n\n @property\n def transaction_name(self):\n \"\"\"\n Transaction name\n :return: transaction name\n \"\"\"\n return \"payment\"\n","sub_path":"cockroachdb/modules/transactions/payment.py","file_name":"payment.py","file_ext":"py","file_size_in_byte":4711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"441063498","text":"import cv2 # OpenCV for perspective transform\nimport numpy as np\n#import matplotlib.image as mpimg\n#import matplotlib.pyplot as plt\n#import matplotlib.pyplot as plt2\n#import scipy.misc # For saving images as needed\n#import glob # For reading in a list of images from a folder\n\nexample_grid = '../calibration_images/example_grid1.jpg'\nexample_rock = '../calibration_images/example_rock1.jpg'\nexample_blue = '../misc/frame.png'\ngrid_img = cv2.imread(example_grid)\nrock_img = cv2.imread(example_rock)\nblue_img = cv2.imread(example_blue)\n\n#flags = [i for i in dir(cv2) if i.startswith('COLOR_BGR')]\n#print (flags)\n\n\n\n\n\n# example works\n# hsv = cv2.cvtColor(blue_img, cv2.COLOR_BGR2HSV)\n# define range of blue color in HSV\n# lower_blue = np.array([110,50,50])\n# upper_blue = np.array([130,255,255])\n# Threshold the HSV image to get only blue colors\n# mask = cv2.inRange(hsv, lower_blue, upper_blue)\n# Bitwise-AND mask and original image\n# res = cv2.bitwise_and(blue_img,rock_img, mask= mask)\n\n# define range of blue color in HSV\n\n\nhsv = cv2.cvtColor(rock_img, cv2.COLOR_BGR2HSV)\nlower_yellow = np.array([0,100,100])\nupper_yellow = np.array([176,255,255])\n\n# Threshold the HSV image to get only yellow colors\nmask = cv2.inRange(hsv, lower_yellow, upper_yellow)\n# Bitwise-AND mask and original image\nresult = cv2.bitwise_and(rock_img,hsv, mask= mask)\n\n\ncv2.imshow('camera image of rock',rock_img)\ncv2.imshow('mask',mask)\ncv2.imshow('result',result)\n\n\ncv2.waitKey(30000)\ncv2.destroyAllWindows()\n\n\n","sub_path":"code/hsv.py","file_name":"hsv.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"266223937","text":"from django.shortcuts import render \nfrom rest_framework.parsers import MultiPartParser, FormParser,FileUploadParser, JSONParser\nfrom rest_framework import viewsets, generics\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom ..p_models.image_model import PImage\nfrom ..p_models.product_model import Product\nfrom ..p_serializers.product_serializer import ProductSerializer, ProductListSerializer\n\nfrom rest_framework import filters\nfrom url_filter.integrations.drf import DjangoFilterBackend\nfrom ..paginations import LinkSetPagination \n\nimport logging\nlogger = logging.getLogger(__name__)\n\nclass ProductListViewSet(viewsets.ModelViewSet):\n queryset = Product.objects.all()\n serializer_class = ProductListSerializer\n filter_backends = [filters.SearchFilter, DjangoFilterBackend]\n \n search_fields = ['id', 'title','description', 'quantity', 'price', 'colors', 'sizes', 'offers', 'stitch', 'stitch_type']\n \n pagination_class = LinkSetPagination\n\n filter_fields = ['id', 'title','description', 'quantity', 'price', 'colors', 'sizes', 'offers', 'stitch', 'stitch_type']\n parser_classes = (JSONParser, FormParser, MultiPartParser, FileUploadParser) # set parsers if not set in settings. Edited\n \n\nclass ProductViewSet(viewsets.ModelViewSet):\n queryset = Product.objects.all()\n serializer_class = ProductSerializer\n filter_backends = [filters.SearchFilter, DjangoFilterBackend]\n \n search_fields = ['id', 'title','description', 'quantity', 'price', 'colors', 'sizes', 'offers', 'stitch', 'stitch_type']\n \n pagination_class = LinkSetPagination\n\n filter_fields = ['id', 'title','description', 'quantity', 'price', 'colors', 'sizes', 'offers', 'stitch', 'stitch_type']\n parser_classes = (JSONParser, FormParser, MultiPartParser, FileUploadParser) # set parsers if not set in settings. Edited\n \n def create(self, request, *args, **kwargs):\n logger.info(\" \\n\\n ----- PRODUCT CREATE initiated -----\")\n product = self.prepareProductData(request.data)\n images = {}\n if request.FILES:\n images = request.FILES\n logger.debug(product)\n logger.debug(\"Data prepared. Sending data to the serializer \")\n product_serializer = ProductSerializer(data= {'data':request.data, 'product':product['data'], 'product_relations':product['additional_data'], 'images': images})\n product_serializer.is_valid(raise_exception=True)\n product_serializer.save()\n logger.debug({'productId':product_serializer.instance.id, \"status\":200})\n logger.debug(\"Product saved successfully!!!\")\n return Response({'productId':product_serializer.instance.id}, status=status.HTTP_201_CREATED)\n \n def update(self, request, *args, **kwargs):\n product = self.prepareProductData(request.data)\n if request.FILES:\n product.additional_data['images'] = request.FILES\n serializer = self.get_serializer(self.get_object(), data= {'data':request.data, 'product':product['data'], 'product_relations':product['additional_data']}, partial=True)\n serializer.is_valid(raise_exception=True)\n self.perform_update(serializer)\n return Response(serializer.data)\n \n def destroy(self, request, *args, **kwargs):\n instance = self.get_object()\n self.perform_destroy(instance)\n instance.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n def perform_destroy(self, instance):\n for e in instance.images.all():\n instance.images.remove(e)\n PImage.objects.get(id=e.id).delete()\n \n #======================== CREATE PRODUCT ========================#\n def prepareProductData(self, product_input, instance=None):\n product = {}\n product['title'] = product_input.get('title')\n product['description'] = product_input.get('description')\n product['quantity'] = product_input.get('quantity')\n product['price'] = product_input.get('price')\n product['in_stock'] = product_input.get('in_stock')\n product['user'] = product_input.get('user')\n\n # Many to Many fields\n additional_data = {}\n additional_data['colors'] = product_input.get('colors')\n additional_data['sizes'] = product_input.get('sizes')\n additional_data['offers'] = product_input.get('offers')\n additional_data['category'] = product_input.get('category')\n additional_data['sub_category'] = product_input.get('sub_category')\n return {'data': product, 'additional_data': additional_data}","sub_path":"products/p_views/product_view.py","file_name":"product_view.py","file_ext":"py","file_size_in_byte":4587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"67285016","text":"from MicrodiffZoom import MicrodiffZoom\n\nimport logging\nfrom camera import camera\n\nimport gevent\n\nclass PX2MicrodiffZoom(MicrodiffZoom):\n \n def __init__(self, name):\n MicrodiffZoom.__init__(self, name)\n self.camera = camera(use_redis=True)\n \n def moveToPosition(self, positionName):\n try:\n position = self.predefinedPositions[positionName]\n gevent.spawn(self.camera.set_zoom, position)\n except:\n logging.getLogger(\"HWR\").exception('Cannot move motor %s: invalid position name.', str(self.userName()))\n","sub_path":"HardwareObjects/SOLEIL/PX2/PX2MicrodiffZoom.py","file_name":"PX2MicrodiffZoom.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"492204017","text":"\"\"\"\nRecord from the default microphone in an infinite loop and create\na Live Playlist (Sliding Window).\n\nReference: https://developer.apple.com/documentation/http_live_streaming/\n\"\"\"\n\nimport hashlib\nimport os\nimport multiprocessing\n\nimport pyaudio\nimport pydub\n\nfrom typing import List, Tuple\n\nMEDIAS_DIR: str = os.environ.get(\"HLS_SERVER_MEDIAS_DIR\", \".\")\nTARGET_SEGMENT_DURATION: int = int(\n os.environ.get(\"HLS_SERVER_TARGET_SEGMENT_DURATION\", 2)\n)\n\n\ndef update_playlist(\n sequence: List[Tuple[str, float]],\n sequence_number: int,\n target_segment_duration: int,\n):\n \"\"\"Update the master.m3u8 with the given sequence.\n\n Args:\n sequence: the sequence of segments described by tuples of\n filename (.ts) and duration.\n sequence_number: the position of the first segment with respect\n to the beginning of the recording.\n target_segment_duration: the expected duration of segments.\n\n \"\"\"\n\n with open(os.path.join(MEDIAS_DIR, \"0\", \"master.m3u8\"), mode=\"w\") as f:\n f.truncate()\n f.write(\n \"#EXTM3U\\n\"\n f\"#EXT-X-TARGETDURATION:{target_segment_duration}\\n\"\n \"#EXT-X-VERSION:4\\n\"\n f\"#EXT-X-MEDIA-SEQUENCE:{sequence_number}\\n\"\n )\n for filename, duration in sequence:\n f.write(f\"#EXTINF:{duration},\\n\" f\"{filename}\\n\")\n\n\ndef make_stream(chunk_size: int, rate: int, channels: int) -> pyaudio.Stream:\n \"\"\"Make an audio stream from the default microphone\n \"\"\"\n p = pyaudio.PyAudio()\n stream = p.open(\n format=pyaudio.paInt16,\n channels=channels,\n rate=rate,\n frames_per_buffer=chunk_size,\n input=True,\n )\n\n return stream\n\n\ndef make_path_from_media_dir(filename: str) -> str:\n \"\"\"Make a path to the given filename relative to the media dir.\n \"\"\"\n return os.path.join(MEDIAS_DIR, \"0\", filename)\n\n\ndef record(*, target_segment_duration: int = 5, output_queue: multiprocessing.Queue):\n \"\"\"Record from the default microphone and write segments.\n\n Write segments as .ts files (MPEG-TS) along with a master.m3u8 playlist.\n\n \"\"\"\n rate = 44100\n chunk_size = rate // 10\n\n stream = make_stream(chunk_size, rate, channels=1)\n\n while True:\n frames = []\n n_frames = round(target_segment_duration / (chunk_size / rate))\n for _ in range(n_frames):\n data = stream.read(chunk_size, exception_on_overflow=True)\n frames.append(data)\n\n segment = pydub.AudioSegment(\n data=b\"\".join(frames), sample_width=2, frame_rate=44100, channels=1\n )\n\n output_queue.put(segment)\n\n\ndef process_segments(input_queue: multiprocessing.Queue, target_segment_duration):\n sequence_number = 1\n rolling_sequence: List[Tuple[str, float]] = []\n rolling_size = 3\n while True:\n segment = input_queue.get()\n\n filename = \"{}.ts\".format(hashlib.sha256(segment.raw_data).hexdigest())\n segment.export(\n make_path_from_media_dir(filename),\n format=\"mpegts\",\n codec=\"mp2\",\n bitrate=\"64k\",\n )\n\n rolling_sequence.append((filename, len(segment) / 1000))\n if len(rolling_sequence) > rolling_size:\n sequence_number += 1\n os.remove(make_path_from_media_dir(rolling_sequence[0][0]))\n rolling_sequence = rolling_sequence[1:]\n\n update_playlist(rolling_sequence, sequence_number, target_segment_duration)\n\n\nq = multiprocessing.Queue()\nmultiprocessing.Process(\n target=process_segments, args=(q, TARGET_SEGMENT_DURATION)\n).start()\nrecord(target_segment_duration=TARGET_SEGMENT_DURATION, output_queue=q)\n","sub_path":"recorder.py","file_name":"recorder.py","file_ext":"py","file_size_in_byte":3674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"627591662","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 16 20:23:37 2018\n\n@author: Abdullah\n\"\"\"\n\nimport re\nimport numpy as np\n\ndef tokenize(line):\n \n hyphenExcl = re.compile(r\"[^a-zA-Z']\") \n #arr1 = []\n string = line.replace('.','')\n string = hyphenExcl.sub(\" \",line)\n arr = string.split() \n arrToken = []\n for i in range(len(arr)):\n skip_flag = False\n arr[i] = arr[i].replace('-',' ') \n if arr[i].startswith('\\'') == True or arr[i].endswith('\\'') == True:\n if arr[i].startswith('\\'') == True and arr[i].endswith('\\'') == True and len(arr[i]) == 2:\n np.delete(arr,i)\n skip_flag = True\n continue\n else: \n arr[i] = arr[i].replace('\\'','') \n if skip_flag == False:\n arr[i] = arr[i].lower()\n x = re.search(r\"\\w+\",arr[i])\n if x is not None:\n arr[i] = x.group()\n arr[i] = arr[i].strip()\n #arr = np.append(arr1,arr)\n idxArr = np.where(arr == \"''\")\n cleanArr = np.delete(arr, idxArr)\n for word in cleanArr:\n arrToken.append(word)\n \n return arrToken\n\n\n","sub_path":"ltokenize.py","file_name":"ltokenize.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"336569429","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy\n\nfrom VibTools.Molecule import VibToolsMolecule\nfrom VibTools.Modes import VibModes\nimport re\n\nclass Molpro(object):\n\n def __init__(self, logname='molpro.out', w=None):\n self.mol = VibToolsMolecule()\n self.modes = None\n self.filename = logname\n self.lwl = w\n\n freqs = property(lambda self: self.modes.freqs)\n\n def read(self):\n self.mol.read_from_molpro(filename=self.filename, oricoord=True)\n self.modes = VibModes(self.mol.nmodes, self.mol)\n if self.lwl is None:\n self.lwl = self.get_pulsation()\n\n def get_pulsation(self):\n \"\"\"\n Get the w value used in the TDHF(or equivalent) approach\n \"\"\"\n# f = open(self.filename, 'r')\n# lines = f.readlines()\n# f.close()\n# pattern = r\"[+-]?0\\.\\d{6}\"\n# lwls = None\n return None\n# for l in lines:\n# if \"Using perturbation frequencies:\" in l:\n# list=re.findall(pattern, l)\n# lwls=[float(vl) for vl in list]\n# break\n# elif \"Frequencies=\" in l:\n# list=re.findall(pattern, l)\n# lwls=[float(vl) for vl in list]\n# break\n # Only take the first non-zero pulsation in the followings\n# lwl=None\n# if lwls is not None:\n# for puls in lwls:\n# if puls > 0.0:\n# lwl=puls\n# break\n# print \"Pulsations in the calculations:\", lwls\n# print \"Pulsation used:\", lwl\n# else :\n# print \"No Pulsation found in the file. Assume Zero\"\n# lwl=0.000\n# return lwl\n\n def read_normalmodes(self):\n f = open(self.filename, 'r')\n lines = f.readlines()\n f.close()\n natoms = self.mol.natoms\n freqs = []\n normalmodes = numpy.zeros((0,))\n for i, l in enumerate(lines):\n if \"Wavenumbers [cm-1]\" in l:\n freqs.extend(l.split()[2:])\n elif \"X1 \" in l:\n block = lines[i:i+3*natoms]\n array = []\n for blockline in block:\n array.append([float(fl) for fl in blockline.split()[1:]])\n array = numpy.array(array)\n array = array.transpose()\n normalmodes = numpy.append(normalmodes, array)\n self.modes = VibModes(self.mol.nmodes, self.mol)\n if normalmodes.shape == (0,):\n print(\"No normal modes found\")\n return\n freqs = numpy.array([float(fl) for fl in freqs])\n normalmodes = normalmodes.reshape((3*natoms, 3*natoms))\n modes_wtr = VibModes(3*natoms, self.mol)\n modes_wtr.set_freqs(freqs)\n modes_wtr.set_modes_c(normalmodes)\n self.modes = modes_wtr.remove_trans_rot()\n self.modes.sort_by_freq()\n\n\n def get_energy_transition(self):\n f = open(self.filename, 'r')\n lines = f.readlines()\n f.close()\n energies = {}\n for line in lines:\n if \"!MCSCF STATE\" in line:\n # match < | DM(XYZ)| >\n pattern = r'!MCSCF STATE\\s+?(\\d+?\\.\\d+?)\\s+?Energy\\s+?([+-]?\\d*\\.\\d*)'\n match = re.search(pattern, line)\n if match is not None:\n key = match.group(1)\n energies[key] = float(match.group(2))\n if len(energies) == 0:\n return None\n # Make the difference with the smallest value of energy\n energies = numpy.sort(numpy.array(list(energies.values())))\n energies = energies[1:] -energies[0]\n\n return energies\n\n\n def get_dipole_transition(self):\n f = open(self.filename, 'r')\n lines = f.readlines()\n f.close()\n diptrans = {}\n dir = [\"X\", \"Y\", \"Z\"]\n for line in lines:\n if \"!MCSCF trans\" in line:\n # match < | DM(XYZ)| >\n pattern = r'<(\\d+?\\.\\d+?)\\|DM([XYZ])\\|(\\d+?.\\d+?)>\\s+?([+-]?\\d*\\.\\d*)\\s+au'\n match = re.search(pattern, line)\n if match is not None:\n key = match.group(1) +\"_\"+match.group(3)\n if key not in diptrans:\n diptrans[key] = numpy.zeros((3))\n diptrans[key][dir.index(match.group(2))] = float(match.group(4))\n if len(diptrans) == 0:\n return None\n return numpy.array(list(diptrans.values()))\n\n\n def get_forces(self):\n natoms = self.mol.natoms\n f = open(self.filename, 'r')\n lines = f.readlines()\n f.close()\n start = 0\n end = 0\n for iline, line in enumerate(lines):\n if \"GRADIENT FOR STATE\" in line:\n start = iline + 4\n end = start + natoms\n break\n lines = lines[start:end]\n if len(lines) == 0:\n return None\n forces = numpy.zeros((natoms, 3))\n for i, line in enumerate(lines):\n pattern = r\"-?\\d+\\.\\d*\"\n alist = re.findall(pattern, line)\n forces[i] = [-float(x) for x in alist]\n return forces\n\n","sub_path":"Tools/PyMolpro.py","file_name":"PyMolpro.py","file_ext":"py","file_size_in_byte":5264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"633856515","text":"#!/usr/bin/python3\nfrom sseclient import SSEClient\nimport requests\nimport time\nimport os\nimport sys\nimport subprocess\n\ndef get_messages(url):\n while True:\n try:\n messages = SSEClient(url)\n for msg in messages:\n yield msg\n except requests.exceptions.HTTPError as e:\n if e.response.status_code / 100 == 5:\n time.sleep(10)\n else:\n raise e\n\ndef master_deploy(message):\n if hasattr(message, 'event') and hasattr(message, 'id'):\n if message.event == 'push' and message.id == 'refs/heads/master' and message.data.isalnum():\n subprocess.call([\"git\", \"fetch\"])\n subprocess.call([\"git\", \"checkout\", message.data])\n if os.path.isfile(\"deploy.sh\"):\n subprocess.call([\"bash\", \"deploy.sh\", message.data])\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n raise Exception(\"Not enough arguments. Please include a URL.\")\n else:\n url = sys.argv[1]\n for message in get_messages(url):\n master_deploy(message)\n","sub_path":"git-watch.py","file_name":"git-watch.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"1523557","text":"\"\"\" Compiled: 2020-09-18 10:38:51 \"\"\"\n\n#__src_file__ = \"extensions/common/FFxCommon.py\"\n#----------------------------------------------------------------------------\n# (c) Copyright 2020 SunGard Front Arena. All rights reserved.\n#----------------------------------------------------------------------------\n\"\"\" ---------------------------------------------------------------------------\nMODULE\n FFxCommon.\n\n Base class for FFxSpotRolloverMMFunding, FFxSpotRolloverSwapFunding\n and FPLSweep\n\nDESCRIPTION\n\n\n----------------------------------------------------------------------------\"\"\"\n\n\nimport math\nfrom collections import namedtuple\n\nimport acm\nimport FBDPCommon\nfrom FBDPValidation import FBDPValidate\nfrom FBDPCurrentContext import Logme\nimport importlib\n\nRESULT_FAIL = 0\nRESULT_SUCCESS = 1\nRESULT_IGNORED = 2\nINVALID_NUMBERS = ['Infinity', '-Infinity', '1.#INF', '-1.#INF', '1.#IND',\n '-1.#IND', 'NaN', '-NaN', '1.#QNAN', '-1.#QNAN']\nCURRENCY = 'Curr'\nCURRENCY_PAIR = 'CurrencyPair'\nPOSITION_PAIR = 'PositionPair'\nPOSITIONORCURRENCY_PAIR = 'PositionOrCurrencyPair'\n\nSETTLE = 'Settle'\n\nQuotedRate = namedtuple('QuotedRate', ['currencyPair', 'rate'])\nPriceRate = namedtuple('PriceRate', 'currency1 currency2 price rate')\n\nspace = acm.FCalculationMethods().CreateStandardCalculationsSpaceCollection()\n\n\ndef currencyPair(curr1, curr2):\n currPair = acm.FCurrencyPair[curr1.Name() + \"/\" + curr2.Name()]\n if currPair:\n return currPair\n return acm.FCurrencyPair[curr2.Name() + \"/\" + curr1.Name()]\n\n\ndef currencyPairsAreConnected(pair1, pair2):\n return (pair1.Currency1() in (pair2.Currency1(), pair2.Currency2) or\n pair1.Currency2() in (pair2.Currency1(), pair2.Currency2))\n\n\ndef addBankingDays(curr, date, spot):\n cal = curr.Calendar()\n return cal.AdjustBankingDays(date, spot)\n\n\ndef maxOfSpotDates(currObjects, date):\n if not currObjects:\n return date\n return max([x.SpotDate(date) for x in currObjects])\n\n\ndef checkDenominatedValue(value):\n try:\n if not value:\n return False\n if value.Type() and value.Type().Text() == 'InvalidPrice':\n return False\n if str(value.Number()) in INVALID_NUMBERS:\n return False\n if not value.Number() >= 0.0 and not value.Number() < 0.0:\n return False\n except Exception:\n return False\n return True\n\n\ndef getBidAsk(cashflow):\n if cashflow < 0.0:\n bidAsk = 'Ask'\n counterBidAsk = 'Bid'\n else:\n bidAsk = 'Bid'\n counterBidAsk = 'Ask'\n return bidAsk, counterBidAsk\n\n\ndef getFxRate(date, curr1, curr2):\n if acm.Time().DateDifference(date, acm.Time().DateToday()) < 0:\n date = acm.Time().DateToday()\n val = curr1.Calculation().FXRate(space, curr2, date).Value()\n if checkDenominatedValue(val):\n rate = val.Number()\n msg = \"FX rate for {0}:{1} on {2} is {3:.5f}\".format(curr1.Name(),\n curr2.Name(), date, rate)\n Logme()(msg, 'DEBUG')\n else:\n rate = 0.\n msg = \"Invalid FX rate found for {0}:{1} on {2}. Returning 0.\".format(\n curr1.Name(), curr2.Name(), date)\n Logme()(msg, 'WARNING')\n return rate\n\n\ndef getTotalSwapPoints(currPair, fromDate, toDate, points):\n days = acm.Time().DateDifference(toDate, fromDate)\n pointValue = currPair.PointValue()\n totalPoints = points * pointValue * days\n Logme()('Total forward points for {0} day(s) from {1} to {2}: {3}'.format(\n days, fromDate, toDate, totalPoints), 'DEBUG')\n return totalPoints\n\n\ndef getLatestPointsPerDay(instr, curr, fromDate, bidAsk):\n calcValue = instr.Calculation().MarketPrice(space, fromDate,\n 0, curr, 1, None, 0, 'Average' + bidAsk + 'Price', 1).Value()\n if not checkDenominatedValue(calcValue):\n message = ('Invalid swap points found for funding instrument {0} '\n '({1}) on {2}'.format(instr.Name(), curr.Name(),\n fromDate))\n Logme()(message, 'ERROR')\n return 0.0\n\n points = calcValue.Number()\n Logme()(\"Latest forward {0} points for {1} ({2}) on {3}: {4}.\".format(\n bidAsk.lower(), instr.Name(), curr.Name(), fromDate, points,\n 'DEBUG'))\n return points\n\n\ndef getLatestSwapPoints(currPair, fundInstr, fromDate, toDate, bidAsk):\n \"\"\"\n Returns the latest forward FX points stored on the swap funding instrument\n passed in.\n \"\"\"\n points = getLatestPointsPerDay(fundInstr, fundInstr.Currency(),\n fromDate, bidAsk)\n return getTotalSwapPoints(currPair, fromDate, toDate, points)\n\n\ndef getMtMSwapPoints(currPair, fundInstr, fromDate, toDate, mtmMarket,\n bidAsk=''):\n \"\"\"\n Returns the marked-to-market forward FX points stored on the swap funding\n instrument passed in.\n \"\"\"\n points = getMtMRate(fundInstr, fundInstr.Currency(), bidAsk,\n mtmMarket, fromDate)\n return getTotalSwapPoints(currPair, fromDate, toDate, points)\n\n\ndef getMtMFXRate(instr, curr, mtmMarket, date=acm.Time().DateToday()):\n \"\"\"\n Returns the latest available mark-to-market settle FX rate, up to the\n specified date, for the instrument and currency requested.\n\n An inverse rate might be returned if that is how MtM rates are stored.\n\n A cross rate inferred from the instrument, currency and the FX base\n currency in the valuation parameters may also be returned in rates are not\n quoted directly for the instrument and currency.\n \"\"\"\n rate = getMtMRate(instr, curr, SETTLE, mtmMarket, date)\n if rate != 0.0:\n return rate\n\n baseCurr = acm.ObjectServer().UsedValuationParameters().FxBaseCurrency()\n message = (\"No MtM rates found for {0}:{1} or {1}:{0} on {2} in MtM \"\n \"market {3}. Getting cross rate from base currency {4}...\".format(\n instr.Name(), curr.Name(), date, mtmMarket.Name(),\n baseCurr.Name()))\n Logme()(message, \"INFO\")\n crossRate = getCrossRate(instr, curr, baseCurr, mtmMarket, date)\n return crossRate\n\n\ndef getCrossRate(instr, curr, baseCurr, mtmMarket, date):\n rateQuotes = []\n for lookup in (instr, curr):\n p_rate = getLatestMtMPriceRate(lookup, baseCurr, SETTLE, mtmMarket,\n date)\n rate = p_rate.rate\n if rate != 0.:\n currPair = '{0}:{1}'.format(p_rate.currency1.Name(),\n p_rate.currency2.Name())\n rateQuotes.append(QuotedRate(currPair, rate))\n else:\n message = ('Could not get cross rate for {0}:{1} on {2} from '\n 'base currency {3} - no rate available for '\n '{0}:{3}'.format(instr.Name(), curr.Name(), date,\n baseCurr.Name()))\n Logme()(message, \"DEBUG\")\n return 0.\n\n assert len(rateQuotes) == 2, \"Need 2 rates to cross - found {0}\".format(\n len(rateQuotes))\n\n crossRate = rateQuotes[0].rate / rateQuotes[1].rate\n message = (\"{0}:{1} rate for {2} implied by MtM rates for {3} and {4}: \"\n \"{5:.6f}\".format(instr.Name(), curr.Name(), date,\n rateQuotes[0].currencyPair, rateQuotes[1].currencyPair,\n crossRate))\n Logme()(message, \"DEBUG\")\n return crossRate\n\ndef getMtMRate(instr, curr, valueType, mtmMarket, date, checkInverse=True):\n p_rate = getLatestMtMPriceRate(instr, curr, valueType, mtmMarket, date,\n checkInverse)\n return p_rate.rate\n\ndef getLatestMtMPriceRate(instr, curr, valueType, mtmMarket, date,\n checkInverse=True):\n p_rate = getMtMPriceRate(instr, curr, valueType, mtmMarket, date)\n if checkInverse:\n inv_p_rate = getMtMPriceRate(curr, instr, valueType, mtmMarket, date)\n invert = False\n if p_rate:\n invert = inv_p_rate and \\\n (inv_p_rate.price.Day() > p_rate.price.Day())\n elif inv_p_rate:\n invert = True\n else:\n p_rate = None\n\n if invert:\n message = ('Using the more recent historical price for {0}:{1} '\n ' instead of {1}:{0}.'.format(inv_p_rate.currency1.Name(),\n inv_p_rate.currency2.Name()))\n Logme()(message, 'DEBUG')\n p_rate = PriceRate(inv_p_rate.currency1, inv_p_rate.currency2,\n inv_p_rate.price, 1.0 / inv_p_rate.rate)\n\n if not p_rate:\n p_rate = PriceRate(instr, curr, None, 0.0)\n\n return p_rate\n\ndef getMtMPriceRate(instr, curr, valueType, mtmMarket, date):\n histPrices = instr.HistoricalPrices()\n if not histPrices:\n message = (\"No mark-to-market rates available for {0} ({1}).\".format(\n instr.Name(), curr.Name()))\n Logme()(message, \"WARNING\")\n return None\n\n last = None\n for hp in histPrices:\n if hp.Market() == mtmMarket and hp.Currency() == curr:\n if hp.Day() == date:\n last = hp\n break\n\n # Find latest available before MtM date\n if hp.Day() < date:\n if not last:\n last = hp\n elif hp.Day() > last.Day():\n last = hp\n\n if not last:\n message = (\"No mark-to-market rates available for {2} ({1}) in \"\n \"MtM market '{0}' before {3}.\".format(mtmMarket.Name(),\n curr.Name(), instr.Name(), date))\n Logme()(message, \"WARNING\")\n return None\n\n if last.Day() != date:\n assert last.Day() < date, (\"Found historical price on \"\n \"{0} after MtM date {1}\".format(last.Day(), date))\n message = (\"No mark-to-market rate available for {0} ({1}) in \"\n \"MtM market '{2}' on {3}. Using latest available on \"\n \"{4}.\".format(instr.Name(), curr.Name(), mtmMarket.Name(),\n date, last.Day()))\n Logme()(message, \"WARNING\")\n\n if valueType == 'Bid':\n rate = last.Bid()\n elif valueType == 'Ask':\n rate = last.Ask()\n elif valueType == 'High':\n rate = last.High()\n elif valueType == 'Low':\n rate = last.Low()\n elif valueType == 'Last':\n rate = last.Last()\n else:\n rate = last.Settle()\n\n if math.isnan(rate):\n message = (\"Invalid mark-to-market {3} rate found for {0} ({1}) on \"\n \"{2}.\".format(instr.Name(), curr.Name(), last.Day(),\n valueType.lower()))\n Logme()(message, 'ERROR')\n return None\n\n Logme()(\"MtM {0} rate for {1} ({2}) {3}: {4}.\".format(valueType.lower(),\n instr.Name(), curr.Name(), date, rate, 'DEBUG'))\n return PriceRate(instr, curr, last, rate)\n\n\ndef printEmp(msg, char, mode='INFO'):\n n = len(msg)\n Logme()(char * n, mode)\n Logme()(msg, mode)\n Logme()(char * n, mode)\n\n\ndef addDicToDic(dic1, dic2):\n for k in dic1.keys():\n if k in dic2:\n dic2[k].extend(dic1[k])\n else:\n dic2[k] = dic1[k]\n\n\ndef callGrouperHook(grouperName):\n try:\n import FBDPHook\n importlib.reload(FBDPHook)\n except ImportError:\n return None\n try:\n func = getattr(FBDPHook, \"get_grouping_attribute_from_grouper\")\n except:\n return None\n else:\n return func(grouperName)\n\n\ndef calcToNum(c):\n if not c:\n return 0\n try:\n val = c.Value()\n if val.IsKindOf(acm.FArray):\n val = [x.Number() for x in val]\n if len(val) == 1:\n return val[0]\n return val\n if not val.IsKindOf(acm.FDenominatedValue):\n return 0\n return val.Number()\n except Exception as ex:\n Logme()(str(ex), 'ERROR')\n return None\n\n\ndef setColumnConfig(currencies):\n if type(currencies) == type(acm.FCurrency()):\n currencies = [currencies]\n Data = acm.FArray()\n if currencies:\n for curr in currencies:\n if curr and curr.Name():\n DataBucket = acm.FNamedParameters()\n DataBucket.AddParameter('currency', curr)\n Data.Add(DataBucket)\n return acm.Sheet().Column().ConfigurationFromVector(Data)\n\n\nclass TimeOrPorfolioSheet(FBDPValidate):\n\n def __init__(self):\n\n FBDPValidate.__init__(self)\n\n self.config = None\n self.portfolio = None\n self.calcSpace = None\n self.topNode = None\n self.grouping = []\n self.container = None\n self.optKeys = {}\n\n def __initData(self):\n self.config = None\n self.calcSpace = None\n self.topNode = None\n self.grouping = []\n self.container = None\n self.optKeys = {}\n\n def calcToNum(self, c):\n return calcToNum(c)\n\n def getTrades(self, obj):\n if obj.IsKindOf(acm.FPortfolio):\n return obj.Trades()\n elif obj.IsKindOf(acm.FStoredASQLQuery) and \\\n obj.SubType() == 'FTrade':\n return obj.Query().Select()\n return []\n\n def createCalcSpace(self, tradingObjects, grouper=None,\n timeBucketGrouper=None):\n\n TimeOrPorfolioSheet.__initData(self)\n\n newGroupers = []\n if grouper and grouper.IsKindOf(acm.FChainedGrouper):\n for g in grouper.Groupers():\n self.grouping.append(self.extractNodeGrouperMethod(g))\n newGroupers.append(g)\n\n if (timeBucketGrouper and\n timeBucketGrouper.IsKindOf(acm.FTimeBucketGrouper)):\n config = acm.Report().CreatePortfolioSheetGridConfiguration(\n timeBucketGrouper.TimeBuckets())\n csc = acm.Calculations().CreateCalculationSpaceCollection()\n self.calcSpace = csc.GetSpace('FPortfolioSheet', 'Standard',\n config)\n else:\n self.calcSpace = acm.FCalculationSpace('FPortfolioSheet')\n\n self.container = self.createObjectUnion(tradingObjects)\n accCurr = acm.ObjectServer().UsedValuationParameters(\n ).AccountingCurrency()\n self.calcSpace.SimulateValue(\n self.container, 'Portfolio Currency', accCurr)\n self.topNode = self.calcSpace.InsertItem(self.container)\n\n self.topNode.ApplyGrouper(acm.FChainedGrouper(newGroupers))\n self.calcSpace.Refresh()\n\n def createObjectUnion(self, objects):\n unionObject = acm.FAdhocPortfolio()\n unionObject.Name(\"Trade Union\")\n settmp = acm.FIdentitySet()\n for obj in objects:\n settmp.AddAll(self.getTrades(obj))\n for t in settmp.AsIndexedCollection():\n unionObject.Add(t)\n return unionObject\n\n def extractNodeGrouperMethod(self, grouper):\n method = None\n if grouper.IsKindOf(acm.FAttributeGrouper):\n if str(grouper.Method()) == 'Instrument.Currency':\n method = 'Instrument.Currency'\n else:\n label = grouper.Label()\n method = str(label) if label else str(grouper.Method())\n else:\n method = str(grouper.Label())\n\n attr = callGrouperHook(method)\n if attr:\n method = attr\n\n if \"Trade \" in method:\n method = method.split(\"Trade \")[1]\n elif \"Trade.OptKey\" in method:\n return method\n elif method.startswith('Trade.'):\n method = method.split(\".\")[1]\n elif method == \"Currency Pair\":\n method = CURRENCY_PAIR\n\n return method\n\n def buildOptKeysBasedOldMapping(self):\n tradekeys = acm.FChoiceList.Select(\"list='Trade Keys'\")\n i = 1\n for key in tradekeys:\n optKeyName = \"OptKey%s\" % i\n self.optKeys[optKeyName] = key.Name()\n i = i + 1\n\n def buildOptKeysDictionary(self):\n self.optKeys.clear()\n trdkeys = acm.FChoiceList.Select(\"list = 'ADM Choicelist Mappings'\")\n if not trdkeys:\n self.buildOptKeysBasedOldMapping()\n return\n\n i = 1\n for k in trdkeys:\n kName = \"Trade.optkey%s_chlnbr\" % i\n if kName.upper() in k.Name().upper():\n optKeyName = \"OptKey%s\" % i\n self.optKeys[optKeyName] = k.Description()\n i = i + 1\n\n def getOptKeyObject(self, key, val):\n return acm.FChoiceList.Select(\"list='%s' AND name=%s\" %\n (self.optKeys[key], val))[0]\n\n def extractNodeAttribute(self, grouper, item, attrib_out):\n method = self.extractNodeGrouperMethod(grouper)\n\n if \"Trade.OptKey\" in method:\n self.buildOptKeysDictionary()\n try:\n attribName = method.split(\".\")[1]\n attrib_out[attribName] = self.getOptKeyObject(\n attribName, item.Value()).Name()\n except:\n pass\n else:\n attrib_out[method] = item.StringKey()\n\n def getPositionNodes(self, node=None, attrib_in={}):\n if not node:\n node = self.topNode\n positions = {}\n it = node.Iterator().FirstChild()\n while it:\n tree = it.Tree()\n item = tree.Item()\n trades = item.Trades().AsList()\n if trades.Size() > 0:\n portfolio = (trades and trades[0].Portfolio() or\n item.Portfolio())\n grouper = item.Grouping().Grouper()\n attrib_out = attrib_in.copy()\n self.extractNodeAttribute(grouper, item, attrib_out)\n if (tree.Depth() - 1 == len(self.grouping)):\n if portfolio not in positions:\n positions[portfolio] = []\n positions[portfolio].append((tree, attrib_out))\n else:\n addDicToDic(self.getPositionNodes(tree, attrib_out),\n positions)\n it = it.NextSibling()\n return positions\n\n def printTree(self, node, colNames=[\"Portfolio Projected Payments\"],\n max_depth=0, start_depth=0, currencies=None):\n if Logme().LogMode < 2:\n return\n if not start_depth:\n start_depth = node.Depth()\n if type(currencies) == type(acm.FCurrency()):\n currencies = [currencies]\n elif not currencies:\n currencies = [acm.ObjectServer().UsedValuationParameters(\n ).AccountingCurrency()]\n\n values = []\n for name in colNames:\n self.setColumnConfig(currencies)\n val = self.getCalculation(node, name)\n values.append(val)\n\n Logme()(\"%s (Node Level %s) %s => %s : %s \" %\n (\" \" * (node.Depth() - start_depth) * 5, node.Depth(),\n node.Item().StringKey(), str(colNames), str(values)), \"DEBUG\")\n\n if (node.Iterator().HasChildren() and\n (not max_depth or node.Depth() <= max_depth)):\n child = node.Iterator().FirstChild()\n while child:\n self.printTree(child.Tree(), colNames, max_depth, start_depth,\n currencies)\n child = child.NextSibling()\n\n def setColumnConfig(self, currencies):\n self.config = setColumnConfig(currencies)\n\n def getColumnValue(self, node, colName, day=0):\n\n # Return the topnode value\n if not day:\n return self.getCalculation(node, colName)\n\n # Return the correct bucket value\n treeIterator = node.Iterator()\n\n res = treeIterator.Find(day)\n if res:\n node = res.Tree()\n return self.getCalculation(node, colName)\n return None\n\n def getCalculation(self, node, colName):\n\n val = self.calcSpace.CreateCalculation(node, colName, self.config)\n val = self.calcToNum(val.Value())\n return val\n\n def getCashflow(self, node, day=0, configCurr=None):\n\n self.setColumnConfig(configCurr)\n val = self.getColumnValue(node, \"Portfolio Projected Payments\", day)\n return val\n\n def hasCashBalance(self, node, curr):\n\n self.setColumnConfig(curr)\n val = self.getColumnValue(node, \"Portfolio Projected Payments\", 0)\n cash = val\n if cash and math.fabs(cash) > 0.0001:\n return True\n return False\n\n def hasCashBalances(self, node, currencies):\n\n cashes = [self.hasCashBalance(node, c) for c in currencies]\n return reduce(lambda x, y: x or y, cashes)\n\n\nclass FxGroupingProcess(TimeOrPorfolioSheet):\n\n def __init__(self):\n\n TimeOrPorfolioSheet.__init__(self)\n\n self.tradingObjects = None\n self.portfolioGrouper = None\n self.nextTradingDate = None\n self.refreshCalcSpace = None\n self.attributes = None\n\n def requiredAttributesNotSet(self):\n\n msg = \"\"\n if not self.tradingObjects:\n msg = (\"At least one of the fields in 'Stored Folder', \"\n \"'Trade Filter' and 'Portfolio' needs to be set.\")\n if not msg:\n msg = self.requiredSubAttributesNotSet()\n if msg:\n Logme()(\"%s \" % msg, 'ERROR')\n return msg\n\n def getTradingObjects(self, dictionary):\n\n objects = acm.FArray()\n for field in ['TradeQuery', 'TradeFilter', 'TradingPortfolios']:\n if field in dictionary and dictionary[field]:\n objects.AddAll(dictionary[field])\n return objects\n\n def requiredSubAttributesNotSet(self):\n\n return ''\n\n def cleanAndRecreateCalcSpace(self):\n\n self.calcSpace.Clear()\n acm.PollDbEvents()\n acm.Calculations().ResetEvaluatorBuilders()\n acm.Memory().GcWorldStoppedCollect()\n self.defineCalcSpace()\n\n def performCalculation(self):\n\n self.refreshCalcSpace = False\n positions = self.getPositionNodes()\n for portfolio, nodes in positions.iteritems():\n self.processPortfolio(portfolio, nodes)\n\n def performProcess(self, args):\n\n self.readArguments(args)\n\n if self.requiredAttributesNotSet():\n return\n\n if type(self.portfolioGrouper) == type([]):\n self.portfolioGrouper = self.portfolioGrouper[0]\n\n if type(self.portfolioGrouper) == type(\"\"):\n self.portfolioGrouper = acm.FChainedGrouper[self.portfolioGrouper]\n\n if self.portfolioGrouper:\n if self.portfolioGrouper.IsKindOf(acm.FStoredPortfolioGrouper):\n self.portfolioGrouper = self.portfolioGrouper.Grouper()\n elif not self.portfolioGrouper.IsKindOf(acm.FChainedGrouper):\n self.portfolioGrouper = None\n\n self.portfolioGrouper = self.createOrModifyGrouper(\n self.portfolioGrouper)\n\n if self.nextTradingDate:\n Logme()('Process from day %s.' % (self.nextTradingDate))\n self.defineCalcSpace()\n self.performCalculation()\n\n def defineCalcSpace(self):\n\n self.createCalcSpace(self.tradingObjects, self.portfolioGrouper)\n\n def createOrModifyGrouper(self, grouper):\n\n groupers = acm.FArray()\n portGrouperNeeded = True\n if grouper and grouper.IsKindOf(acm.FChainedGrouper):\n for g in grouper.Groupers():\n groupers.Add(g)\n if str(g.Label()) == 'Trade Portfolio':\n portGrouperNeeded = False\n\n if portGrouperNeeded:\n portGrouper = acm.FAttributeGrouper(\"Trade.Portfolio\")\n portGrouper.Label(\"Trade Portfolio\")\n groupers.AtInsert(0, portGrouper)\n grouper = acm.FChainedGrouper(groupers)\n return grouper\n\n def getPortfoliosFromTradingObjects(self, objects):\n\n settmp = acm.FSet()\n for obj in objects:\n for t in self.getTrades(obj):\n settmp.Add(t.Portfolio())\n return settmp.AsList()\n\n def readArguments(self, args):\n self.tradingObjects = self.getTradingObjects(args)\n self.portfolioGrouper = ('PortfolioGrouper' in args and\n args['PortfolioGrouper'])\n\n def processPortfolio(self, portfolio, nodes):\n raise NotImplementedError(\"processPortfolio\")\n\n def callAdjustTradeHook(self, trade, scriptName):\n try:\n import FBDPHook\n importlib.reload(FBDPHook)\n except ImportError:\n return\n try:\n FBDPHook.adjust_fx_ftrade(trade, scriptName)\n except:\n return\n\n def instrumentAtNode(self, node):\n t = self.tradeAtNode(node)\n if t:\n return t.Instrument()\n return None\n\n def tradeAtNode(self, node):\n nodeTrades = node.Item().Trades().AsIndexedCollection()\n if nodeTrades and nodeTrades.Size():\n return nodeTrades[0]\n return None\n\n def setTradePropertiesFromGrouper(self, trade, excludedAttributes=()):\n if self.attributes:\n for key, val in self.attributes.iteritems():\n if key == CURRENCY_PAIR:\n continue\n if key not in excludedAttributes:\n try:\n if 'OptKey' in key:\n val = self.getOptKeyObject(key, val)\n if 'PositionOrCurrencyPair' in key:\n key = 'PositionPair'\n if 'AdditionalInfo' in key:\n continue\n trade.SetProperty(key, val)\n except Exception as e:\n msg = \"Ignored setting {0} on trade as {1}\".format(\n key, str(e))\n Logme()(msg, \"DEBUG\")\n\n\nclass FxPortfolioProcess(FxGroupingProcess):\n def __init__(self):\n FxGroupingProcess.__init__(self)\n\n self.defaultPortfolio = None\n self.defaultAcquirer = None\n self.mappedPortfolios = None\n self.mappedAcquirers = None\n self.mappedFundingInstruments = None\n self.mappedPositionPairs = None\n\n def readArguments(self, args):\n FxGroupingProcess.readArguments(self, args)\n self.defaultPortfolio = args['DefaultPortfolio'][0]\n if args['DefaultAcquirer']:\n self.defaultAcquirer = args['DefaultAcquirer'][0]\n else:\n self.defaultAcquirer = None\n if (self.defaultAcquirer and\n not self.defaultAcquirer.IsKindOf(acm.FInternalDepartment)):\n message = ('Default Acquirer \\'%s\\' is '\n 'not an Internal Department' %\n self.defaultAcquirer.Name())\n Logme()(message, 'ERROR')\n raise Exception(message)\n\n self.mappedPortfolios = (args['MappedPortfolios'] if\n 'MappedPortfolios' in args else {})\n self.mappedPositionPairs = (args['MappedPositionPairs'] if\n 'MappedPositionPairs' in args else {})\n self.mappedAcquirers = (args['MappedAcquirers'] if\n 'MappedAcquirers' in args else {})\n if self.mappedAcquirers:\n for k in self.mappedAcquirers:\n mappedAcquirer = acm.FParty[self.mappedAcquirers[k]]\n if not mappedAcquirer.IsKindOf(acm.FInternalDepartment):\n message = ('Mapped Acquirer \\'%s\\' '\n 'is not an Internal Department' % mappedAcquirer.Name())\n Logme()(message, 'ERROR')\n raise Exception(message)\n\n def requiredAttributesNotSet(self):\n msg = FxGroupingProcess.requiredAttributesNotSet(self)\n if msg:\n pass\n elif not self.defaultPortfolio:\n msg = \"No default portfolio specified.\"\n\n if not msg:\n msg = self.requiredSubAttributesNotSet()\n if msg:\n Logme()(\"%s \" % msg, 'ERROR')\n return msg\n\n def defineCalcSpace(self):\n portfolios = self.getPortfoliosFromTradingObjects(self.tradingObjects)\n currenyPairs = [x.CurrencyPair() for x in portfolios]\n currObjects = acm.FSet().AddAll(\n [self.getCurrObjects(currPair) for currPair in currenyPairs])\n currObjects = currObjects.AsList()\n if not currObjects.IsEmpty():\n currObjects = [c for c in currObjects[0]]\n\n endDate = maxOfSpotDates(currObjects, self.nextTradingDate)\n today = acm.Time.DateToday()\n days = acm.Time.DateDifference(endDate, self.nextTradingDate)\n diff = acm.Time.DateDifference(self.nextTradingDate, today)\n\n bucketDefinitions = acm.FArray()\n bucketDefinitions.Add(self.createDefinition(diff))\n\n for n in range(1, days + 1):\n defi = self.createDefinition(n + diff)\n bucketDefinitions.Add(defi)\n bucketDefinitions.Add(acm.FRestTimeBucketDefinition())\n bucks = acm.Time().CreateTimeBucketsFromDefinitions(0,\n bucketDefinitions, None, 0, 0, 0, 0, 0, 0)\n\n self.createCalcSpace(self.tradingObjects, self.portfolioGrouper,\n acm.FTimeBucketGrouper(bucks))\n\n def createDefinition(self, n):\n # startDate = '0000-01-01', endDate = '9999-12-12'):\n today = acm.Time.DateToday()\n definition = acm.FDatePeriodTimeBucketDefinition()\n name = '%sd' % n\n definition.DatePeriod(name)\n definition.Name(acm.Time.DateAddDelta(today, 0, 0, n))\n definition.Adjust(False)\n definition.RelativeSpot(False)\n return definition\n\n def getDefaultPortfolio(self):\n return self.defaultPortfolio\n\n def getDefaultAcquirer(self):\n return self.defaultAcquirer\n\n def getDefaultPortfolioAndAcquirer(self):\n return self.defaultPortfolio, self.defaultAcquirer\n\n def getPortfolioAndAcquirer(self, currObject):\n mappedPortfolio, mappedAcquirer = self.getMappedPortfolioAndAcquirer(\n currObject)\n\n if not currObject:\n Logme()(\"Transactions within currency, using default portfolio \"\n \"and acquirer.\", \"DEBUG\")\n\n if not mappedPortfolio:\n mappedPortfolio = self.defaultPortfolio\n message = 'Using default portfolio.'\n Logme()(message, 'INFO')\n\n if not mappedAcquirer:\n mappedAcquirer = self.defaultAcquirer\n if 'Acquirer' not in self.attributes:\n message = 'Using default acquirer.'\n Logme()(message, 'INFO')\n\n return mappedPortfolio, mappedAcquirer\n\n def getMappedPortfolioAndAcquirer(self, currObject):\n mappedPortfolio = None\n mappedAcquirer = None\n\n if not currObject:\n return mappedPortfolio, mappedAcquirer\n\n if currObject.Name() in self.mappedPortfolios:\n mappedPortfolio = acm.FPhysicalPortfolio[self.mappedPortfolios[\n currObject.Name()]]\n if not mappedPortfolio:\n message = ('No mapped portfolio specified for %s.' %\n (currObject.Name()))\n Logme()(message, 'INFO')\n elif mappedPortfolio.Compound():\n message = ('Compound portfolio as mapped Portfolio %s .' %\n (mappedPortfolio.Name()))\n Logme()(message, 'ERROR')\n raise Exception(message)\n else:\n Logme()(\"Using mapped portfolio: %s\" % mappedPortfolio.Name(),\n \"DEBUG\")\n\n if currObject.Name() in self.mappedAcquirers:\n mappedAcquirer = acm.FParty[self.mappedAcquirers[\n currObject.Name()]]\n if not mappedAcquirer:\n message = ('No mapped acquirer specified for %s.' % (\n currObject.Name()))\n Logme()(message, 'INFO')\n else:\n # Exception if not Int dEp or CounterParty\n Logme()(\"Using mapped acquirer: %s\" % mappedAcquirer.Name(),\n \"DEBUG\")\n return mappedPortfolio, mappedAcquirer\n\n def getSplittingCurr(self, rollCurr, counterCurr, fundCurr1, fundCurr2):\n if fundCurr1 == rollCurr:\n if fundCurr2 != counterCurr:\n return fundCurr2\n if fundCurr1 == counterCurr:\n if fundCurr2 != rollCurr:\n return fundCurr2\n if fundCurr2 == rollCurr:\n if fundCurr1 != counterCurr:\n return fundCurr1\n if fundCurr2 == counterCurr:\n if fundCurr1 != rollCurr:\n return fundCurr1\n return None\n\n def getSplittingCurrency(self, rollCurr, counterCurr, fundPort):\n fundPortCurrencyPair = fundPort.CurrencyPair()\n if fundPortCurrencyPair:\n fundCurr1 = fundPortCurrencyPair.Currency1()\n fundCurr2 = fundPortCurrencyPair.Currency2()\n return self.getSplittingCurr(rollCurr, counterCurr, fundCurr1,\n fundCurr2)\n mappedPositionPairList = None\n if fundPort.Name() in self.mappedPositionPairs:\n mappedPositionPairList = \\\n self.mappedPositionPairs[fundPort.Name()]\n if not mappedPositionPairList:\n return None\n\n for p in mappedPositionPairList:\n currPair = acm.FCurrencyPair[p]\n fundCurr1 = currPair.Currency1()\n fundCurr2 = currPair.Currency2()\n splitCurr = self.getSplittingCurr(rollCurr, counterCurr,\n fundCurr1, fundCurr2)\n if splitCurr:\n return splitCurr\n return None\n\n def getMappedFundingInstrument(self, currObject):\n mappedFundingInstrument = None\n if not currObject:\n return mappedFundingInstrument\n\n if currObject.Name() in self.mappedFundingInstruments:\n mappedFundingInstrument = acm.FInstrument[\n self.mappedFundingInstruments[currObject.Name()]]\n\n return mappedFundingInstrument\n\n def getCounterCurrencyOfCurrPair(self, currPair):\n accountingCurr = acm.ObjectServer().UsedValuationParameters(\n ).AccountingCurrency()\n if (accountingCurr == currPair.Currency1() or\n accountingCurr == currPair.Currency2()):\n return accountingCurr\n else:\n cc = currPair.SweepCurrency()\n if (not cc or\n (cc != currPair.Currency1() and cc != currPair.Currency2())):\n if cc:\n Logme()('CurrencyPairs SweepCurrency is not equal to '\n 'Currency1 or Currency2', 'WARNING')\n cc = currPair.Currency2()\n return cc\n\n def createFxTrade(self, instrument, currency, portfolio, acquirer,\n counterparty, date, price, quantity, premium, tradetype,\n trade_process):\n\n trade = acm.FTrade()\n trade.Instrument(instrument)\n trade.Currency(currency)\n # overridden from self.attributes if grouped on acq\n trade.Acquirer(acquirer)\n trade.Counterparty(counterparty)\n trade.Trader(acm.User())\n trade.Type(tradetype)\n trade.Status('Internal')\n trade.Price(price)\n trade.ReferencePrice(price)\n roundingForInstrument = FBDPCommon.getPremiumRounding(instrument)\n trade.Quantity(self.roundValueForInstrument(quantity,\n roundingForInstrument))\n roundingForCurrency = FBDPCommon.getPremiumRounding(currency)\n trade.Premium(self.roundValueForInstrument(premium,\n roundingForCurrency))\n tradeTime = acm.Time.DateNow()\n if acm.Time().DateDifference(tradeTime, date) > 0:\n tradeTime = date\n trade.TradeTime(tradeTime)\n trade.AcquireDay(date)\n trade.ValueDay(date)\n trade.TradeProcess(trade_process)\n trade.Portfolio(portfolio)\n return trade\n\n def getPremiumRounding(self, instrument):\n\n if instrument is None:\n raise TypeError(\"Parameter 'instrument' must not be null\")\n roundingSpec = instrument.RoundingSpecification()\n if roundingSpec is None:\n return None\n roundings = acm.FRounding.Select(\"attribute='Premium'\")\n rounding = next((r for r in roundings if\n r.RoundingSpec().Name() == roundingSpec.Name()), None)\n return rounding\n\n def getPnLRounding(self, instrument):\n\n if instrument is None:\n raise TypeError(\"Parameter 'instrument' must not be null\")\n roundingSpec = instrument.RoundingSpecification()\n if roundingSpec is None:\n return None\n roundings = acm.FRounding.Select(\"attribute='Profit And Loss'\")\n rounding = next((r for r in roundings if\n r.RoundingSpec().Name() == roundingSpec.Name()), None)\n return rounding\n\n def roundValueForInstrument(self, value, rounding):\n\n if not isinstance(value, float) and not isinstance(value, int):\n raise TypeError(\"Parameter 'value' should be of type float or int\")\n value = float(value)\n if rounding is None:\n return value\n if type(rounding) != type(acm.FRounding()):\n raise TypeError(\"Parameter 'rounding' should be of type FRounding \"\n \"not of type \" + str(type(rounding)))\n roundingFunction = acm.GetFunction('round', 3)\n return roundingFunction(value, rounding.Decimals(), rounding.Type())\n\n def createGroupedFxTrade(self, instrument, currency, portfolio, acquirer,\n date, price, quantity, premium, tradetype=\"Spot Roll\",\n trade_process=4096):\n\n trade = self.createFxTrade(instrument, currency, portfolio, acquirer,\n self.defaultAcquirer, date, price, quantity, premium,\n tradetype, trade_process)\n self.setTradePropertiesFromGrouper(trade)\n trade.Portfolio(portfolio)\n return trade\n\n def getCurrObjects(self, currPair):\n raise NotImplementedError(\"getCurrObjects\")\n","sub_path":"Extensions/Default/FPythonCode/FFxCommon.py","file_name":"FFxCommon.py","file_ext":"py","file_size_in_byte":37730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"502859035","text":"import random\n\nfrom flask import Blueprint, abort, render_template, redirect, url_for, flash\nfrom app.extensions import oauth, db\nfrom flask_login import current_user, login_user\nfrom app.models import User\n\nimport os\n\noauth_bp = Blueprint('oauth', __name__)\n\n\n# 注册远程程序\ngithub = oauth.remote_app(\n name='github',\n consumer_key=os.getenv('GITHUB_CLIENT_ID'),\n consumer_secret=os.getenv('GITHUB_CLIENT_SECRET'),\n request_token_params={'scope': 'user'},\n base_url='https://api.github.com/',\n request_token_url=None,\n access_token_method='POST',\n access_token_url='https://github.com/login/oauth/access_token',\n authorize_url='https://github.com/login/oauth/authorize',\n)\n\ngoogle = oauth.remote_app(\n name='google',\n consumer_key=os.getenv('GOOGLE_CLIENT_ID'),\n consumer_secret=os.getenv('GOOGLE_CLIENT_SECRET'),\n request_token_params={'scope': 'email'},\n base_url='https://www.googleapis.com/oauth2/v1/',\n request_token_url=None,\n access_token_method='POST',\n access_token_url='https://accounts.google.com/o/oauth2/token',\n authorize_url='https://accounts.google.com/o/oauth2/auth',\n)\n\ntwitter = oauth.remote_app(\n name='twitter',\n consumer_key=os.getenv('TWITTER_CLIENT_ID'),\n consumer_secret=os.getenv('TWITTER_CLIENT_SECRET'),\n base_url='https://api.twitter.com/1.1',\n request_token_url='https://api.twitter.com/oauth/request_token',\n access_token_url='https://api.twitter.com/oauth/access_token',\n authorize_url='https://api.twitter.com/oauth/authorize',\n)\n\n\n# 服务提供商\nproviders = {\n 'github': github,\n 'google': google,\n 'twitter': twitter,\n}\n\n# 获取用户资料端点\nprofile_endpoints = {\n 'github': 'user',\n 'google': 'userinfo',\n 'twitter': 'account/verify_credentials.json?include_email=true'\n}\n\n\n# 登录\n@oauth_bp.route('/login/')\ndef oauth_login(provider_name):\n if provider_name not in providers.keys():\n abort(404)\n if current_user.is_authenticated:\n return redirect(url_for('app.home'))\n\n callback = url_for('.oauth_callback', provider_name=provider_name, _external=True)\n return providers[provider_name].authorize(callback=callback)\n\n\n# 用于获取用户 详细资料, 由于每个提供商资料对应的key名称不一样, 比如github的bio是用户简介, twitter是description\ndef get_social_profile(provider, access_token):\n\n profile_endpoint = profile_endpoints[provider.name]\n response = provider.get(profile_endpoint, token=access_token)\n\n if provider.name == 'twitter':\n username = response.data.get('name')\n website = response.data.get('url')\n github = ''\n email = response.data.get('email')\n bio = response.data.get('description')\n elif provider.name == 'google':\n username = response.data.get('name')\n website = response.data.get('link')\n github = '' # github 设为空\n email = response.data.get('email')\n bio = '' # google 没有 bio简介\n else:\n username = response.data.get('name')\n website = response.data.get('blog')\n github = response.data.get('html_url')\n email = response.data.get('email')\n bio = response.data.get('bio')\n\n return username, website, github, email, bio\n\n\n# 用获取到的详细资料, 来查验数据库中是否存在此email, 如果不存在就给此email注册, 使用github拿来的详细资料填充到catchat中\n@oauth_bp.route('/callback/')\ndef oauth_callback(provider_name):\n if provider_name not in providers.keys():\n abort(404)\n\n provider = providers[provider_name] # 从字典里拿到服务商\n response = provider.authorized_response() # 像服务提供商出起post请求, 拿到服务商的响应\n\n if response is not None: # 从响应中获取 access令牌\n access_token = response.get('access_token')\n else:\n access_token = None\n\n if access_token is None: # 如果分析出令牌是 None, 提示被 拒绝\n flash('Access denied, please try again')\n return redirect(url_for('auth.login'))\n # 令牌是true , 就获取详细的用户数据\n username, website, github, email, bio = get_social_profile(provider, access_token)\n\n user = User.query.filter_by(email=email).first()\n if user is None: # 用分析出的详细数据 email, 来查找catchat数据库中是否存在此用户\n # password = str(random.randint(1111, 9999))\n user = User(email=email, nickname=username, github=github, website=website, bio=bio)\n db.session.add(user)\n # user.set_password(password)\n db.session.commit()\n login_user(user, remember=True) # 不存在此用户, 就用此email注册用户, 再登录\n # flash('your password is: %s ' % password)\n return redirect(url_for('app.profile'))\n\n login_user(user, remember=True) # 存在此用户, 就直接登录\n return redirect(url_for('app.home'))\n","sub_path":"app/blueprints/oauth.py","file_name":"oauth.py","file_ext":"py","file_size_in_byte":5241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"22024705","text":"from sklearn import datasets\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neural_network import MLPClassifier\n\nwine = datasets.load_wine()\nx = wine.data\ny = wine.target\n\nprint(np.shape(x), np.shape(y))\n\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)\n\nmodel = LogisticRegression().fit(x_train, y_train)\nmodelSvm = SVC().fit(x_train, y_train)\nmodelTree = DecisionTreeClassifier().fit(x_train, y_train)\nmodelMLPClassifier = MLPClassifier(alpha=1e-2, hidden_layer_sizes=(800,), solver='lbfgs', random_state=2).fit(x_train,\n y_train)\n\n\ndef method_name(mdl):\n print(\"训练数据上的准确率为:%f\" % (mdl.score(x_train, y_train)))\n print(\"测试数据上的准确率为:%f\" % (mdl.score(x_test, y_test)))\n print(\"-------------\")\n\n\nmethod_name(model)\nmethod_name(modelSvm)\nmethod_name(modelTree)\nmethod_name(modelMLPClassifier)\n","sub_path":"skLearnTest.py","file_name":"skLearnTest.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"367459557","text":"#!/usr/bin/python3\n\"\"\" Student class module\n\"\"\"\n\n\nclass Student:\n \"\"\" Student class\n \"\"\"\n\n def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age\n\n def to_json(self, attrs=None):\n \"\"\" Returns a dictionary representation of a Student instance.\n attrs (list): (optional) a list of strings to return the attributes of.\n \"\"\"\n if attrs is None:\n return self.__dict__\n mydict = {}\n for key in attrs:\n if key in self.__dict__.keys():\n mydict[key] = self.__dict__.get(key)\n return mydict\n","sub_path":"0x0B-python-input_output/12-student.py","file_name":"12-student.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"355328990","text":"'''\nDictionaries cannot use lists, as keys. Just use \n'''\n'''\nRecursvie manner of implementing this, can be done with the choose.\nThis is a weird manner of doing this to me, under the guise of ininite allocation space. \nSolution with the combinatorial manner is done in the C++ version of this.\n\nExplanation:\n Go through the list, One path is found when it is no longer possible to go \n from one x,y pair. Amasses all possible paths from each of the x,y pairs.\n The last x,y pair that is ammassed is the 20,20 which is the one that is \n returned. Interesting concept.\n'''\ndef gridSize(xAxis, yAxis, dictOfVars):\n if xAxis == 0 or yAxis == 0: # one path is found, when no more possible\n return 1 # Reached the end of the possible paths, for this pair.\n if (xAxis, yAxis) in dictOfVars:\n return dictOfVars[(xAxis,yAxis)] #The tuple is in the dict, return it\n \n #At every point populate the hastable.\n dictOfVars[(xAxis, yAxis)]= (gridSize(xAxis-1, yAxis,dictOfVars) + \n gridSize(xAxis, yAxis-1, dictOfVars))\n return dictOfVars[(xAxis, yAxis)]\n\n#Create them empty dict:\n#Keys, Tuples (xAxis,yAxis), Values: Length of the path from that tuple point.\ndictOf = dict()\nprint(gridSize(20,20, dictOf))\nprint(dictOf)\n","sub_path":"project-euler/python/elevenToTwenty/latticePaths.py","file_name":"latticePaths.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"209907194","text":"import datetime as dt\nimport requests as req\nimport json\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\nimport os\nfrom urllib.parse import urlparse\nfrom config import *\nfrom flask_cors import CORS\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\nfrom sqlalchemy import create_engine, MetaData\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, Numeric, Text, Float\n\nimport psycopg2\nfrom flask import Flask, jsonify\n\n#################################################\n# Flask Setup\n#################################################\napp = Flask(__name__)\nCORS(app)\n\n#################################################\n# Flask Routes\n#################################################\n\n@app.route('/')\ndef index():\n\t\"\"\"List all available api routes.\"\"\"\n\treturn (\n\t\tf\"Avalable Routes:

\"\n\t\tf\"
    \"\n\t\tf\"\t
  • /api/v1.0/dates
  • \"\n\t\tf\"\t\t
      \"\n\t\tf\"\t\t\t
    • List available dates
    • \"\n\t\tf\"\t\t
    \"\n\t\tf\"\t
  • /api/v1.0/positions/<date>\"\n\t\tf\"\t\t
      \"\n\t\tf\"\t\t\t
    • List of holdings from file date
    • \"\n\t\tf\"\t\t
    \"\n\t\tf\"\t
  • /api/v1.0/positions/<start date>/<end date>
  • \"\n\t\tf\"\t\t
      \"\n\t\tf\"\t\t\t
    • List of holdings from filings in the date range
    • \"\n\t\tf\"\t\t
    \"\n\t\tf\"\t
  • /api/v1.0/srr/<start date>/<end date>
  • \"\n\t\tf\"\t\t
      \"\n\t\tf\"\t\t\t
    • List of holdings on end_date with a simple rate of return
    • \"\n\t\tf\"\t\t
    \"\n\t\tf\"\t
  • /api/v1.0/indgrp/<start date>/<end date>\"\n\t\tf\"\t\t
      \"\n\t\tf\"\t\t\t
    • List of industry groups held from filings in the date range
    • \"\n\t\tf\"\t\t
    \"\n\t\tf\"\t
  • api/v1.0/indsec/<start date>/<end date>
  • \"\n\t\tf\"\t\t
      \"\n\t\tf\"\t\t\t
    • List of industry sectors held from filings in the date range
    • \"\n\t\tf\"\t\t
    \"\n\t\tf\"\t
  • /api/v1.0/ticker/<ticker>
  • \"\n\t\tf\"\t\t
      \"\n\t\tf\"\t\t\t
    • List of all holdings across all filings for a specific ticker.
    • \"\n\t\tf\"\t\t
    \"\n\t\tf\"\t
  • /api/v1.0/cshares/negative/<file date>
  • \"\n\t\tf\"\t\t
      \"\n\t\tf\"\t\t\t
    • List of holdings with a negative change of shares from filings on or after the specified file date.
    • \"\n\t\tf\"\t\t
    \"\n\t\tf\"\t
  • /api/v1.0/cshares/positive/<file date>
  • \"\n\t\tf\"\t\t
      \"\n\t\tf\"\t\t\t
    • List of holdings with a positive change of shares from filings on or after the specified file date.
    • \"\n\t\tf\"\t\t
    \"\n\t\tf\"
\"\n\t)\n\n#################################################\n# Reformat date from 6/30/2017 12:00 AM to 2017-06-30\n#################################################\n\ndef reformat_date(str_date):\n\treturn datetime.strptime(str_date, '%m/%d/%Y %I:%M %p').strftime('%Y-%m-%d')\n\n\n#################################################\n# Database Setup\n#################################################\n# create an engine to postgresql db\nuser = config['psql_user']\npassword = config['psql_pwd']\nhost = 'localhost'\nport = '5432'\ndb = config['psql_db']\n\nurl = 'postgresql://{}:{}@{}:{}/{}'\nurl = url.format(user, password, host, port, db)\n\n# The return value of create_engine() is our connection object\nengine = sqlalchemy.create_engine(url, client_encoding='utf8')\n\ndburl = 'dbname={} user={} password={}'\ndburl = dburl.format(db, user, password)\ndb = psycopg2.connect(dburl)\n\n# reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\n# Save references to the invoices and invoice_items tables\nSecuritiesEx = Base.classes.securitiesex\nProcessedPositions = Base.classes.processed_positions\nPositions = Base.classes.positions\nLatestPositions = Base.classes.latest_positions\n\n# Create our session (link) from Python to the DB\nsession = Session(engine)\n\n# Create a connection to the engine called conn\nconn = engine.connect()\n\n\n#################################################\n@app.route(\"/api/v1.0/dates\")\ndef getDates():\n\t\"\"\"Return a list of available dates\"\"\"\n\tresults1 = session.query(LatestPositions.currentreportdate).\\\n\t\tgroup_by(LatestPositions.currentreportdate).all()\n\n\tresults2 = session.query(ProcessedPositions.file_date).\\\n\t\tgroup_by(ProcessedPositions.file_date).all()\n\n\t# Convert list of tuples into normal list\n\tdates_list = list(np.ravel(results1)) + list(np.ravel(results2))\n\tdates_list.sort()\n\treturn jsonify(dates_list)\n\n#################################################\n\n@app.route(\"/api/v1.0/srr//\")\ndef getSRR(start_date, end_date):\n\t## List of holdings on end_date with a simple rate of return\n\tsql = ('SELECT p2.name as name, p2.ticker as ticker, ' +\n\t\t\t'p1.file_date as date1, p2.file_date as date2, ' +\n\t\t\t'p1.mval as mval1, p1.shares as shares1, p1.price as price1, ' +\n\t\t\t'p2.mval as mval2, p2.shares as shares2, p2.price as price2, ' +\n\t\t\t'case when p1.price = 0 then 0 ' +\n\t\t\t'\twhen p2.shares = 0 then 0 ' +\n\t\t\t'\telse ((p2.price-p1.price)/p1.price) * 100 end as SSR ' +\n\t\t\t'FROM vPositions as p2 ' +\n\t\t\t'LEFT JOIN vPositions as p1 ON ' +\n\t\t\t'p1.name = p2.name AND ' +\n\t\t\t'p1.ticker = p2.ticker AND ' +\n\t\t\t\"p1.file_date = '\" + start_date + \"' \" +\n\t\t\t\"WHERE p2.file_date = '\" + end_date + \"';\")\n\tcursor = db.cursor()\n\tcursor.execute(sql)\n\tresponse = cursor.fetchall()\n\tpositions = []\n\tfor record in response:\n\t\tif record[4] is None: \n\t\t\tmval1 = None \n\t\telse: \n\t\t\tmval1 = int(record[4])\n\t\tif record[5] is None: \n\t\t\tshares1 = None \n\t\telse: \n\t\t\tshares1 = int(record[5])\n\t\tif record[6] is None: \n\t\t\tprice1 = None \n\t\telse: \n\t\t\tprice1 = float(record[6])\n\t\tif record[7] is None: \n\t\t\tmval2 = None \n\t\telse: \n\t\t\tmval2 = int(record[7])\n\t\tif record[8] is None: \n\t\t\tshares2 =None \n\t\telse: \n\t\t\tshares2 = int(record[8])\n\t\tif record[9] is None: \n\t\t\tprice2 = None \n\t\telse: \n\t\t\tprice2 = float(record[9])\n\t\tif record[10] is None: \n\t\t\tsrr = None \n\t\telse: \n\t\t\tsrr = float(record[10])\n\t\tpositions.append({\n\t\t\t\"name\": record[0],\n\t\t\t\"ticker\": record[1],\n\t\t\t\"file_date1\": record[2],\n\t\t\t\"file_date2\": record[3],\n\t\t\t\"mval1\": mval1,\n\t\t\t\"shares1\": shares1,\n\t\t\t\"price1\": price1,\n\t\t\t\"mval2\": mval2,\n\t\t\t\"shares2\": shares2,\n\t\t\t\"price2\": price2,\n\t\t\t\"srr\": srr\n\t\t})\n\n\treturn jsonify(positions)\n\n#################################################\n\n@app.route(\"/api/v1.0/indsec//\")\ndef getIndSector(start_date, end_date):\n\t## List of market values by industry sector for a date range\n\tsql = ('SELECT p.file_date, s.indsec, SUM(p.mval) as mval, SUM(p.shares) as shares ' +\n\t\t\t'FROM vPositions as p ' +\n\t\t\t'JOIN securitiesex as s ON p.ticker = s.ticker ' +\n\t\t\t\"WHERE indsec <> 'Nan' and '\" + start_date + \"' <= p.file_date and p.file_date <= '\" + end_date + \"' \" +\n\t\t\t\"GROUP BY p.file_date, s.indsec \" +\n\t\t\t\"HAVING '\" + start_date + \"' <= file_date and file_date <= '\" + end_date + \"' \" +\n\t\t\t\"ORDER BY indsec, file_date;\")\n\tprint(sql)\n\tcursor = db.cursor()\n\tcursor.execute(sql)\n\tresponse = cursor.fetchall()\n\tpositions = []\n\tfor record in response:\n\t\tif record[2] is None: \n\t\t\tmval = None \n\t\telse: \n\t\t\tmval = int(record[2])\n\t\tif record[3] is None: \n\t\t\tshares = None \n\t\telse: \n\t\t\tshares = int(record[3])\n\t\tpositions.append({\n\t\t\t\"file_date\": record[0],\n\t\t\t\"indsec\": record[1],\n\t\t\t\"mval\": mval,\n\t\t\t\"shares\":shares\n\t\t})\n\n\treturn jsonify(positions)\n\n#################################################\n\n@app.route(\"/api/v1.0/indgrp//\")\ndef getIndGroup(start_date, end_date):\n\t## List of market values by industry group for a date range\n\tsql = ('SELECT p.file_date, s.indgrp, SUM(p.mval) as mval, SUM(p.shares) as shares ' +\n\t\t\t'FROM vPositions as p ' +\n\t\t\t'JOIN securitiesex as s ON p.ticker = s.ticker ' +\n\t\t\t\"WHERE indgrp <> 'Nan' and '\" + start_date + \"' <= p.file_date and p.file_date <= '\" + end_date + \"' \" +\n\t\t\t\"GROUP BY p.file_date, s.indgrp \" +\n\t\t\t\"HAVING '\" + start_date + \"' <= file_date and file_date <= '\" + end_date + \"' \" +\n\t\t\t\"ORDER BY indgrp, file_date;\")\n\tprint(sql)\n\tcursor = db.cursor()\n\tcursor.execute(sql)\n\tresponse = cursor.fetchall()\n\tpositions = []\n\tfor record in response:\n\t\tif record[2] is None: \n\t\t\tmval = None \n\t\telse: \n\t\t\tmval = int(record[2])\n\t\tif record[3] is None: \n\t\t\tshares = None \n\t\telse: \n\t\t\tshares = int(record[3])\n\t\tpositions.append({\n\t\t\t\"file_date\": record[0],\n\t\t\t\"indgrp\": record[1],\n\t\t\t\"mval\": mval,\n\t\t\t\"shares\": shares\n\t\t})\n\n\treturn jsonify(positions)\n\n#################################################\n# WITH data AS\n# ( SELECT file_date, name, ticker, mval, shares, price\n# FROM vPositions\n# WHERE '2015-06-30' <= file_date and file_date <= '2017-06-30'\n# ), all_dates AS \n# (\n# SELECT DISTINCT file_date\n# FROM vPositions\n# WHERE '2015-06-30' <= file_date and file_date <= '2017-06-30'\n# ), all_secs AS \n# (\n# SELECT DISTINCT ticker, name\n# FROM vPositions\n# WHERE '2015-06-30' <= file_date and file_date <= '2017-06-30'\n# )\n# SELECT x.ticker, x.file_date, x.name, data.mval, data.shares, data.price\n# from (\n# SELECT file_date, ticker, name\n# FROM all_dates\n# CROSS JOIN all_secs\n# ) x\n# LEFT JOIN data on x.file_date = data.file_date AND x.ticker = data.ticker\n# ORDER BY x.ticker, x.file_date;\n\n@app.route(\"/api/v1.0/positions//\")\ndef getPositionsOverTime(start_date, end_date):\n\t## List of holdings from start date to end date\n\tsql = ('WITH data AS ' +\n\t\t\t'( SELECT file_date, name, ticker, mval, shares, price ' +\n\t\t\t' FROM vPositions ' +\n\t\t\t\" WHERE '\" + start_date + \"' <= file_date and file_date <= '\" + end_date + \"' \" +\n\t\t\t'), all_dates AS ' +\n\t\t\t'( ' +\n\t\t\t' SELECT DISTINCT file_date ' +\n\t\t\t' FROM vPositions ' +\n\t\t\t\" WHERE '\" + start_date + \"' <= file_date and file_date <= '\" + end_date + \"' \" +\n\t\t\t'), all_secs AS ' +\n\t\t\t'( ' +\n\t\t\t' SELECT DISTINCT ticker, name ' +\n\t\t\t' FROM vPositions ' +\n\t\t\t\" WHERE '\" + start_date + \"' <= file_date and file_date <= '\" + end_date + \"' \" +\n\t\t\t') ' +\n\t\t\t'SELECT x.ticker, x.file_date, x.name, data.mval, data.shares, data.price ' +\n\t\t\t'from ( ' +\n\t\t\t' SELECT file_date, ticker, name ' +\n\t\t\t' FROM all_dates ' +\n\t\t\t' CROSS JOIN all_secs ' +\n\t\t\t') x ' +\n\t\t\t'LEFT JOIN data on x.file_date = data.file_date AND x.ticker = data.ticker' +\n\t\t\t' ORDER BY x.ticker, x.file_date;')\n\n\tcursor = db.cursor()\n\tcursor.execute(sql)\n\tresponse = cursor.fetchall()\n\tpositions = []\n\tfor record in response:\n\t\tif record[3] is None:\n\t\t\tmval = None\n\t\telse:\n\t\t\tmval = float(record[3])\n\t\tif record[4] is None:\n\t\t\tshares = None\n\t\telse:\n\t\t\tshares = float(record[4])\n\t\tif record[5] is None:\n\t\t\tprice = None\n\t\telse:\n\t\t\tprice = float(record[5])\n\t\tpositions.append({\n\t\t\t\"ticker\": record[0],\n\t\t\t\"file_date\": record[1],\n\t\t\t\"name\": record[2],\n\t\t\t\"mval\": mval,\n\t\t\t\"shares\": shares,\n\t\t\t\"price\": price\n\t\t})\n\n\treturn jsonify(positions)\n\n#################################################\n@app.route(\"/api/v1.0/positions/\")\ndef getPositions(date):\n\t## Get current holdings\n\n\t# # Use `declarative_base` from SQLAlchemy to connect your class to your PostgreSQL database\n\t# Base2 = declarative_base()\n\n\t# class Lastest_Positions(Base2):\n\t# \t__tablename__ = 'latest_positions'\n\t# \tid = Column(Integer, primary_key=True)\n\t# \tquerydate = Column(Text)\n\t# \tfilerid = Column(Text)\n\t# \tcik = Column(Text)\n\t# \tcurrentreportdate = Column(Text)\n\t# \tpriorreportdate = Column(Text)\n\t# \townername = Column(Text)\n\t# \tissueid = Column(Text)\n\t# \tticker = Column(Text)\n\t# \tcompanyname = Column(Text)\n\t# \tissuetitle = Column(Text)\n\t# \texchangeid = Column(Text)\n\t# \tstreet1 = Column(Text)\n\t# \tcity = Column(Text)\n\t# \tstate = Column(Text)\n\t# \tzipcode = Column(Text)\n\t# \tcountry = Column(Text)\n\t# \tphonecountrycode = Column(Text)\n\t# \tphoneareacode = Column(Text)\n\t# \tphonenumber = Column(Text)\n\t# \tsharesout = Column(Text)\n\t# \tsharesoutdate = Column(Text)\n\t# \tprice = Column(Text)\n\t# \tpricedate = Column(Text)\n\t# \tsharesheld = Column(Text)\n\t# \tsharesheldchange = Column(Text)\n\t# \tsharesheldpercentchange = Column(Text)\n\t# \tmarketvalue = Column(Text)\n\t# \tmarketvaluechange = Column(Text)\n\t# \tportfoliopercent = Column(Text)\n\t# \tsharesoutpercent = Column(Text)\n\t# \tmarketoperator = Column(Text)\n\t# \tmarketoperatorid = Column(Text)\n\t# \tmarkettier = Column(Text)\n\t# \tmarkettierid = Column(Text)\n\n\t# \tdef __repr__(self):\n\t# \t\treturn f\"companyname={self.companyname}, ticker={self.ticker}, marketvalue={self.marketvalue}, sharesheld={self.sharesheld}\"\n\n\n\t# # Use `create_all` to create the latest_positions table in the database\n\t# Base2.metadata.create_all(engine)\n\n\t# # Use MetaData from SQLAlchemy to reflect the tables\\n\",\n\t# metadata = MetaData(bind=engine)\n\t# metadata.reflect()\n\n\t# # Save the reference to the `latest_positions` table as a variable called `table`\n\t# table = sqlalchemy.Table('latest_positions', metadata, autoload=True)\n\n\t# query_url = \"http://edgaronline.api.mashery.com/v2/ownerships/currentownerholdings?ciks=%s&appkey=%s\" % (config['sec13f_brkcik'], config['sec13f_appkey'])\n\t# sec_data = req.get(query_url).json()\n\n\t# #with open('data.txt', 'w') as outfile:\n\t# #\tjson.dump(sec_data, outfile)\n\t\n\t# file_date = None\n\t# securities = []\n\t# for row in sec_data[\"result\"][\"rows\"]:\n\t# \tquerydate = None\n\t# \tfilerid = None\n\t# \tcik = None\n\t# \tcurrentreportdate = None\n\t# \tpriorreportdate = None\n\t# \townername = None\n\t# \tissueid = None\n\t# \tticker = None\n\t# \tcompanyname = None\n\t# \tissuetitle = None\n\t# \texchangeid = None\n\t# \tstreet1 = None\n\t# \tcity = None\n\t# \tstate = None\n\t# \tzipcode = None\n\t# \tcountry = None\n\t# \tphonecountrycode = None\n\t# \tphoneareacode = None\n\t# \tphonenumber = None\n\t# \tsharesout = None\n\t# \tsharesoutdate = None\n\t# \tprice = None\n\t# \tpricedate = None\n\t# \tsharesheld = None\n\t# \tsharesheldchange = None\n\t# \tsharesheldpercentchange = None\n\t# \tmarketvalue = None\n\t# \tmarketvaluechange = None\n\t# \tportfoliopercent = None\n\t# \tsharesoutpercent = None\n\t# \tmarketoperator = None\n\t# \tmarketoperatorid = None\n\t# \tmarkettier = None\n\t# \tmarkettierid = None\n\t# \tfor value in row[\"values\"]:\n\t# \t\tif not(file_date) and value[\"field\"] == \"currentreportdate\":\n\t# \t\t\tfile_date = reformat_date(value[\"value\"])\n\t# \t\tif value[\"field\"] == \"querydate\":\n\t# \t\t\tquerydate = reformat_date(value[\"value\"])\n\t# \t\tif value[\"field\"] == \"filerid\":\n\t# \t\t\tfilerid = value[\"value\"]\n\t# \t\tif value[\"field\"] == \"cik\":\n\t# \t\t\tcik = value[\"value\"]\n\t# \t\tif value[\"field\"] == \"currentreportdate\":\n\t# \t\t\tcurrentreportdate = reformat_date(value[\"value\"])\n\t# \t\tif value[\"field\"] == \"priorreportdate\":\n\t# \t\t\t priorreportdate = reformat_date(value[\"value\"])\n\t# \t\tif value[\"field\"] == \"owername\":\n\t# \t\t\townername = value[\"value\"]\n\t# \t\tif value[\"field\"] == \"issueid\":\n\t# \t\t\tissueid = value[\"value\"]\n\t# \t\tif value[\"field\"] == \"ticker\":\n\t# \t\t\tticker = value[\"value\"]\n\t# \t\tif value[\"field\"] == \"companyname\":\n\t# \t\t\tcompanyname = value[\"value\"]\n\t# \t\tif value[\"field\"] == \"issuetitle\":\n\t# \t\t\tissuetitle = value[\"value\"]\n\t# \t\tif value[\"field\"] == \"exchangeid\":\n\t# \t\t\texchangeid = value[\"value\"]\n\t# \t\tif value[\"field\"] == \"street1\":\n\t# \t\t\tstreet1 = value[\"value\"]\n\t# \t\tif value[\"field\"] == \"city\":\n\t# \t\t\tcity = value[\"value\"]\n\t# \t\tif value[\"field\"] == \"state\":\n\t# \t\t\tstate = value[\"value\"]\n\t# \t\tif value[\"field\"] == \"zip\":\n\t# \t\t\tzipcode = value[\"value\"]\n\t# \t\tif value[\"field\"] == \"country\":\n\t# \t\t\tcountry = value[\"value\"]\n\t# \t\tif value[\"field\"] == \"phonecountrycode\":\n\t# \t\t\tphonecountrycode = value[\"value\"]\n\t# \t\tif value[\"field\"] == \"phoneareacode\":\n\t# \t\t\tphoneareacode = value[\"value\"]\n\t# \t\tif value[\"field\"] == \"phonenumber\":\n\t# \t\t\tphonenumber = value[\"value\"]\n\t# \t\tif value[\"field\"] == \"sharesout\":\n\t# \t\t\tsharesout = value[\"value\"]\n\t# \t\tif value[\"field\"] == \"sharesoutdate\":\n\t# \t\t\tsharesoutdate = value[\"value\"]\n\t# \t\tif value[\"field\"] == \"price\":\n\t# \t\t\tprice = value[\"value\"]\n\t# \t\tif value[\"field\"] == \"pricedate\":\n\t# \t\t\tpricedate = reformat_date(value[\"value\"])\n\t# \t\tif value[\"field\"] == \"sharesheld\":\n\t# \t\t\tsharesheld = value[\"value\"]\n\t# \t\tif value[\"field\"] == \"sharesheldchange\":\n\t# \t\t\tsharesheldchange = value[\"value\"]\n\t# \t\tif value[\"field\"] == \"sharesheldpercentchange\":\n\t# \t\t\tsharesheldpercentchange = value[\"value\"]\n\t# \t\tif value[\"field\"] == \"marketvalue\":\n\t# \t\t\tmarketvalue = value[\"value\"]\n\t# \t\tif value[\"field\"] == \"marketvaluechange\":\n\t# \t\t\tmarketvaluechange = value[\"value\"]\n\t# \t\tif value[\"field\"] == \"portfoliopercent\":\n\t# \t\t\tportfoliopercent = value[\"value\"]\n\t# \t\tif value[\"field\"] == \"sharesoutpercent\":\n\t# \t\t\tsharesoutpercent = value[\"value\"]\n\t# \t\tif value[\"field\"] == \"marketoperator\":\n\t# \t\t\tmarketoperator = value[\"value\"]\n\t# \t\tif value[\"field\"] == \"marketoperatorid\":\n\t# \t\t\tmarketoperatorid = value[\"value\"]\n\t# \t\tif value[\"field\"] == \"markettier\":\n\t# \t\t\tmarkettier = value[\"value\"]\n\t# \t\tif value[\"field\"] == \"markettierid\":\n\t# \t\t\tmarkettierid = value[\"value\"]\n\t# \tsecurities.append({\n\t# \t\t \"querydate\" : querydate\n\t# \t\t,\"filerid\" : filerid\n\t# \t\t,\"cik\" : cik\n\t# \t\t,\"currentreportdate\" : currentreportdate\n\t# \t\t,\"priorreportdate\" : priorreportdate\n\t# \t\t,\"ownername\" : ownername\n\t# \t\t,\"issueid\" : issueid\n\t# \t\t,\"ticker\" : ticker\n\t# \t\t,\"companyname\" : companyname\n\t# \t\t,\"issuetitle\" : issuetitle\n\t# \t\t,\"exchangeid\" : exchangeid\n\t# \t\t,\"street1\" : street1\n\t# \t\t,\"city\" : city\n\t# \t\t,\"state\" : state\n\t# \t\t,\"zipcode\" : zipcode\n\t# \t\t,\"country\" : country\n\t# \t\t,\"phonecountrycode\" : phonecountrycode\n\t# \t\t,\"phoneareacode\" : phoneareacode\n\t# \t\t,\"phonenumber\" : phonenumber\n\t# \t\t,\"sharesout\" : sharesout\n\t# \t\t,\"sharesoutdate\" : sharesoutdate\n\t# \t\t,\"price\" : price\n\t# \t\t,\"pricedate\" : pricedate\n\t# \t\t,\"sharesheld\" : sharesheld\n\t# \t\t,\"sharesheldchange\" : sharesheldchange\n\t# \t\t,\"sharesheldpercentchange\" : sharesheldpercentchange\n\t# \t\t,\"marketvalue\" : marketvalue\n\t# \t\t,\"marketvaluechange\" : marketvaluechange\n\t# \t\t,\"portfoliopercent\" : portfoliopercent\n\t# \t\t,\"sharesoutpercent\" : sharesoutpercent\n\t# \t\t,\"marketoperator\" : marketoperator\n\t# \t\t,\"marketoperatorid\" : marketoperatorid\n\t# \t\t,\"markettier\" : markettier\n\t# \t\t,\"markettierid\" : markettierid\n\t# \t})\n\t\n\t# #with open('data_list.txt', 'w') as outfile:\n\t# #\tfor item in securities:\n\t# #\t\toutfile.write(\"%s\\n\" % item)\n\t\n\t# # check if holdings for the date exist\n\t# results = session.query(\n\t# \t\tLatestPositions.companyname,\\\n\t# \t\tLatestPositions.ticker,\\\n\t# \t\tsqlalchemy.sql.expression.literal_column(\"''\").label(\"cusip\"),\\\n\t# \t\tfunc.sum(func.cast(LatestPositions.marketvalue, Float)).label('mval'),\\\n\t# \t\tfunc.sum(func.cast(LatestPositions.marketvaluechange, Float)).label('cmval'),\\\n\t# \t\tfunc.sum(func.cast(LatestPositions.sharesheld, Float)).label('shares'),\\\n\t# \t\tfunc.sum(func.cast(LatestPositions.sharesheldchange, Float)).label('cshares'))\\\n\t# \t.group_by(LatestPositions.companyname, LatestPositions.ticker)\\\n\t# \t.filter(LatestPositions.currentreportdate == file_date).all()\n\n\t# # delete existing file date data from table\n\t# sql = f\"DELETE FROM latest_positions WHERE currentreportdate = '{file_date}'\"\n\t# conn.execute(sql)\n\n\t# sql = f\"DELETE FROM processed_positions WHERE file_date = '{file_date}'\"\n\t# conn.execute(sql)\n\n\n\t# conn.execute(table.insert(), securities)\n\n\tsql = (\"SELECT file_Date, name, ticker, mval, cmval, shares, cshares, price \" +\n\t\t \" FROM vPositions \" +\n\t\t\t\" WHERE '\" + date + \"' = file_date; \")\n\n\tcursor = db.cursor()\n\tcursor.execute(sql)\n\tresponse = cursor.fetchall()\n\tpositions = []\n\tfor record in response:\n\t\tif record[3] is None:\n\t\t\tmval = None\n\t\telse:\n\t\t\tmval = float(record[3])\n\t\tif record[4] is None:\n\t\t\tcmval = None\n\t\telse:\n\t\t\tcmval = float(record[4])\n\t\tif record[5] is None:\n\t\t\tshares = None\n\t\telse:\n\t\t\tshares = float(record[5])\n\t\tif record[6] is None:\n\t\t\tcshares = None\n\t\telse:\n\t\t\tcshares = float(record[6])\n\t\tif record[7] is None:\n\t\t\tprice = None\n\t\telse:\n\t\t\tprice = float(record[7])\n\t\tpositions.append({\n\t\t\t\"file_date\": record[0],\n\t\t\t\"name\": record[1],\n\t\t\t\"ticker\": record[2],\n\t\t\t\"mval\": mval,\n\t\t\t\"cmval\": cmval,\n\t\t\t\"shares\": shares,\n\t\t\t\"cshares\": cshares,\n\t\t\t\"price\": price\n\t\t})\n\n\treturn jsonify(positions)\n\n#################################################\n@app.route(\"/api/v1.0/ticker/\")\ndef getTickerPositions(ticker):\n\t## Get current holdings for a specific ticker\n\n\n\tsql = (\"SELECT file_Date, name, ticker, mval, cmval, shares, cshares, price \" +\n\t\t \" FROM vPositions \" +\n\t\t\t\" WHERE ticker ilike '%\" + ticker + \"%' ORDER BY file_date;\")\n\tprint(sql)\n\tcursor = db.cursor()\n\tcursor.execute(sql)\n\tresponse = cursor.fetchall()\n\tpositions = []\n\tfor record in response:\n\t\tif record[3] is None:\n\t\t\tmval = None\n\t\telse:\n\t\t\tmval = float(record[3])\n\t\tif record[4] is None:\n\t\t\tcmval = None\n\t\telse:\n\t\t\tcmval = float(record[4])\n\t\tif record[5] is None:\n\t\t\tshares = None\n\t\telse:\n\t\t\tshares = float(record[5])\n\t\tif record[6] is None:\n\t\t\tcshares = None\n\t\telse:\n\t\t\tcshares = float(record[6])\n\t\tif record[7] is None:\n\t\t\tprice = None\n\t\telse:\n\t\t\tprice = float(record[7])\n\t\tpositions.append({\n\t\t\t\"file_date\": record[0],\n\t\t\t\"name\": record[1],\n\t\t\t\"ticker\": record[2],\n\t\t\t\"mval\": mval,\n\t\t\t\"cmval\": cmval,\n\t\t\t\"shares\": shares,\n\t\t\t\"cshares\": cshares,\n\t\t\t\"price\": price\n\t\t})\n\n\treturn jsonify(positions)\n\n#################################################\n@app.route(\"/api/v1.0/cshares/negative/\")\ndef getNegativeCShares(date):\n\t## Get current holdings for securities with a negative change in shares as of a specific date\n\n\n\tsql = (\"SELECT file_Date, name, ticker, mval, cmval, shares, cshares, price \" +\n\t\t \" FROM vPositions \" +\n\t\t\t\" WHERE cshares < 0 and file_date >= '\" + date + \"' ORDER BY ticker, file_date;\")\n\tprint(sql)\n\tcursor = db.cursor()\n\tcursor.execute(sql)\n\tresponse = cursor.fetchall()\n\tpositions = []\n\tfor record in response:\n\t\tif record[3] is None:\n\t\t\tmval = None\n\t\telse:\n\t\t\tmval = float(record[3])\n\t\tif record[4] is None:\n\t\t\tcmval = None\n\t\telse:\n\t\t\tcmval = float(record[4])\n\t\tif record[5] is None:\n\t\t\tshares = None\n\t\telse:\n\t\t\tshares = float(record[5])\n\t\tif record[6] is None:\n\t\t\tcshares = None\n\t\telse:\n\t\t\tcshares = float(record[6])\n\t\tif record[7] is None:\n\t\t\tprice = None\n\t\telse:\n\t\t\tprice = float(record[7])\n\t\tpositions.append({\n\t\t\t\"file_date\": record[0],\n\t\t\t\"name\": record[1],\n\t\t\t\"ticker\": record[2],\n\t\t\t\"mval\": mval,\n\t\t\t\"cmval\": cmval,\n\t\t\t\"shares\": shares,\n\t\t\t\"cshares\": cshares,\n\t\t\t\"price\": price\n\t\t})\n\n\treturn jsonify(positions)\n\n#################################################\n@app.route(\"/api/v1.0/cshares/positive/\")\ndef getPositiveCShares(date):\n\t## Get current holdings for securities with a positive change in shares as of a specific date\n\n\n\tsql = (\"SELECT file_Date, name, ticker, mval, cmval, shares, cshares, price \" +\n\t\t \" FROM vPositions \" +\n\t\t\t\" WHERE cshares > 0 and file_date >= '\" + date + \"' ORDER BY ticker, file_date;\")\n\tprint(sql)\n\tcursor = db.cursor()\n\tcursor.execute(sql)\n\tresponse = cursor.fetchall()\n\tpositions = []\n\tfor record in response:\n\t\tif record[3] is None:\n\t\t\tmval = None\n\t\telse:\n\t\t\tmval = float(record[3])\n\t\tif record[4] is None:\n\t\t\tcmval = None\n\t\telse:\n\t\t\tcmval = float(record[4])\n\t\tif record[5] is None:\n\t\t\tshares = None\n\t\telse:\n\t\t\tshares = float(record[5])\n\t\tif record[6] is None:\n\t\t\tcshares = None\n\t\telse:\n\t\t\tcshares = float(record[6])\n\t\tif record[7] is None:\n\t\t\tprice = None\n\t\telse:\n\t\t\tprice = float(record[7])\n\t\tpositions.append({\n\t\t\t\"file_date\": record[0],\n\t\t\t\"name\": record[1],\n\t\t\t\"ticker\": record[2],\n\t\t\t\"mval\": mval,\n\t\t\t\"cmval\": cmval,\n\t\t\t\"shares\": shares,\n\t\t\t\"cshares\": cshares,\n\t\t\t\"price\": price\n\t\t})\n\n\treturn jsonify(positions)\n\n#################################################\nif __name__ == '__main__':\n app.run()\n\n\n#################################################\n# POSTGRESQL VIEW\n#################################################\n# CREATE VIEW vPositions\n# AS\n# SELECT file_date, name, ticker, mval, cmval, shares, cshares, price \n# FROM ( \n# SELECT file_date, name, ticker, SUM(mval) as mval, SUM(cmval) as cmval, \n# SUM(shares) as shares, SUM(cshares) as cshares, MIN(price) as price \n# FROM ( \n# SELECT p.file_date, p.name, s.ticker, CAST(p.mval AS NUMERIC) as mval, \n# CAST(p.cmval AS NUMERIC) as cmval, CAST(p.shares AS NUMERIC) as shares, \n# CAST(p.cshares AS NUMERIC) as cshares, CAST(p.price AS NUMERIC) as price \n# FROM processed_positions p \n# LEFT JOIN securitiesex s ON p.cusip = s.cusip\n# ) x \n# GROUP BY file_date, name, ticker \n# UNION ALL \n# SELECT lp.currentreportdate, lp.companyname, lp.ticker, \n# CAST(lp.marketvalue AS NUMERIC), CAST(lp.marketvaluechange AS NUMERIC), \n# CAST(lp.sharesheld AS NUMERIC), CAST(lp.sharesheldchange AS NUMERIC), \n# CAST(lp.price AS NUMERIC)\n# FROM latest_positions lp \n# ) t;","sub_path":"SEC13F/SEC13F_STEP9_API.py","file_name":"SEC13F_STEP9_API.py","file_ext":"py","file_size_in_byte":26226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"528682486","text":"#-*- coding: utf-8 -*-\n\nfrom django.conf.urls import patterns, url\n\nfrom user.views import (\n HomeView, AllProjectView, DeleteProjectView\n)\n\n\nurlpatterns = patterns('',\n url(r'^home/$', view=HomeView.as_view(), name='home'),\n url(r'^projects/$', view=AllProjectView.as_view(), name=\"projects\"),\n url(r'^project-delete/(?P([0-9]+))?(/)?$', view=DeleteProjectView.as_view(), name=\"project-delete\"),\n)\n","sub_path":"bugsnews/apps/user/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"591003880","text":"# Author: Berin\n# Sketches repo: https://github.com/berinhard/sketches\n# Adapted fro Alexandre B A Villares' grid function\n# Source: http://abav.lugaralgum.com/sketch-a-day/\n\n# Cool bug found while studying the code\nfrom lines import LinesPositioning, BLACK, WHITE\n\n\nclass GridControl():\n\n def __init__(self, x, y, order, spacing, cell_width=100, cell_height=100):\n self.x, self.y = x, y\n self.order, self.spacing = order, spacing\n self.cell_width, self.cell_height = cell_width, cell_height\n self.reset()\n\n def reset(self):\n self.callables_grid = [[[] for j in range(self.order)] for i in range(self.order)]\n\n def register_functions_per_cell(self, factory, *args, **kwargs):\n for i in range(self.order):\n gx = i * self.spacing\n for j in range(self.order):\n gy = j * self.spacing\n self.callables_grid[i][j].extend(factory(gx, gy, self.cell_width, self.cell_height, *args, **kwargs))\n\n def display(self):\n with pushMatrix():\n translate(self.x, self.y)\n for i in range(self.order):\n gx = i * self.spacing\n for j in range(self.order):\n gy = j * self.spacing\n for function, args, kwargs in self.callables_grid[i][j]:\n function(gx, gy, *args, **kwargs)\n\n\ncustom_grid = GridControl(25, 12.5, 10, 100)\n\n\ndef cell_border(gx, gy, cell_width, cell_height):\n strokeWeight(1)\n stroke(BLACK)\n rect(gx, gy, cell_width, cell_height)\n\n\ndef per_cell_function_factory(gx, gy, cell_width, cell_height):\n max_lines = int(map(random(1), 0, 1, 10, 80))\n positions = LinesPositioning(\n gx,\n gy,\n max_width=gx + cell_width,\n max_height=gy + cell_height,\n max_lines=max_lines)\n\n return [\n (positions.new_random_line_and_display, (), {}),\n (positions.refresh, (), {}),\n #(cell_border, (cell_width, cell_height), {}),\n ]\n\ndef setup():\n noFill()\n size(1050, 1050)\n frameRate(10)\n strokeWeight(2)\n custom_grid.register_functions_per_cell(per_cell_function_factory)\n\ndef draw():\n background(WHITE)\n custom_grid.display()\n\n if not frameCount % 333:\n custom_grid.reset()\n custom_grid.register_functions_per_cell(per_cell_function_factory)\n print(frameCount)\n\n # if frameCount >= 6000:\n # noLoop()\n # saveFrame(\"####.png\")\n\n\ndef keyPressed():\n if key == \" \":\n custom_grid.reset()\n custom_grid.register_functions_per_cell(per_cell_function_factory)\n redraw()\n\n","sub_path":"s_037/s_037.pyde","file_name":"s_037.pyde","file_ext":"pyde","file_size_in_byte":2607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"455375509","text":"import pickle\nimport logging\n\nimport experiment_parser\nimport bdm_experiment_parser\nfrom plotters import heatmap_plotter\n\nfrom datasets import pixel_data, heatmap_data\nfrom models import LeNet\nfrom predictors import binary_classificaion, multiclass_classification\n\nEXP_NAME = 'scale_ranking_bmm_short'\nEXP_NAME2 = 'bdm_bmm_short'\n\nDATA_FOLDER = EXP_NAME + '_data'\nDATA_FOLDER2 = EXP_NAME2 + '_data'\n\nPICKLE_FILE = EXP_NAME + '.pik'\nPICKLE_FILE2 = EXP_NAME2 + '.pik'\n\nUSE_EXP1 = True\nUSE_EXP2 = False\n\nRUN_PARSER = False\nRUN_PLOTTER = False\n\nBINARY = True\nMULTI = False\nNUM_OF_CLASSES = 4\n\nPIXEL_DATA = True\nHEATMAP_DATA = False\n\ndef parse_and_save(exp, data, pik):\n if exp[0]=='s':\n experiment = experiment_parser.Experiment(exp, data)\n else:\n experiment = bdm_experiment_parser.Experiment(exp, data)\n with open(pik, 'wb') as f:\n print('Saving file...')\n pickle.dump(experiment, f, -1)\n print('finished saving')\n return experiment\n\nif USE_EXP1:\n try:\n with open(PICKLE_FILE, 'rb') as f:\n if RUN_PARSER:\n experiment = parse_and_save(EXP_NAME, DATA_FOLDER, PICKLE_FILE)\n else:\n print('Opening file: ' + PICKLE_FILE)\n experiment = pickle.load(f)\n except FileNotFoundError:\n experiment = parse_and_save(EXP_NAME, DATA_FOLDER, PICKLE_FILE)\n\n\nif USE_EXP2:\n try:\n with open(PICKLE_FILE2, 'rb') as f:\n if RUN_PARSER:\n experiment2 = parse_and_save(EXP_NAME2, DATA_FOLDER2, PICKLE_FILE2)\n else:\n print('Opening file: ' + PICKLE_FILE2)\n experiment2 = pickle.load(f)\n except FileNotFoundError:\n experiment2 = parse_and_save(EXP_NAME2, DATA_FOLDER2, PICKLE_FILE2)\n\n\nif RUN_PLOTTER:\n trials = experiment2.get_trials(list_type='all')\n real_labels = [trial.stim_type_ind for trial in trials]\n labels = heatmap_plotter.run_ploter(trials, save=True, plot=False)\n for i in range(len(trials)):\n if real_labels[i]!=labels[i]:\n print('i = %d, real = %d, fake = %d' % (i, real_labels[i], labels[i]))\n\n\n\n# Hyperparameters\nLR = 1e-3\nEPOCHS = 1000000\nTRAIN_RATIO = 0.8\nIMG_DIM = 400\n\ntrials = experiment.get_trials(list_type='all') #+ experiment2.get_trials(list_type='all')\n#trials = [trial for trial in trials if trial.stim_type_ind==2]\n\nif BINARY:\n label_set = [0,1]\n if PIXEL_DATA:\n X, Y = pixel_data.create_dataset(trials, label_set, binary=True)\n if HEATMAP_DATA:\n X, Y = heatmap_data.create_dataset(trials, binary=True)\n print(X.shape, Y.shape)\n print(\"Dataset distribution: Y==0: %.2f, Y==1: %.2f\" % (len(Y[Y==0])/len(Y), len(Y[Y==1])/len(Y)))\n net = LeNet.LeNet_binary(LR=LR)\n print(net)\n print('BINARY')\n binary_classificaion.train(X, Y, net, LR, EPOCHS, TRAIN_RATIO, IMG_DIM, heatmap=HEATMAP_DATA)\n\nif MULTI:\n if PIXEL_DATA:\n X, Y = pixel_data.create_dataset(trials, binary=False)\n if HEATMAP_DATA:\n X, Y = heatmap_data.create_dataset(trials, binary=False)\n print(X.shape, Y.shape)\n net = LeNet.LeNet_multiclass(LR=LR, num_of_classes=NUM_OF_CLASSES)\n print(net)\n print('MULTICLASS')\n multiclass_classification.train(X, Y, net, LR, EPOCHS, TRAIN_RATIO, IMG_DIM, heatmap=HEATMAP_DATA)\n\n\n\n\nprint('Opening shell...')\nprint('')\n\n","sub_path":"run_experiment.py","file_name":"run_experiment.py","file_ext":"py","file_size_in_byte":3339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"505217066","text":"#!/usr/bin/env python\n\nimport os\nimport sys\n\ndef getReactantsFrom(rule):\n # assumes bimolecular reaction\n lhs = rule.split('<->')[0]\n reactants = lhs.split('+')\n mol1 = reactants[0].strip()\n mol2 = reactants[1].strip()\n return (mol1[:mol1.index('(')], mol2[:mol2.index('(')])\n\ndef getAllReactantsFrom(file_):\n reactantPairs = []\n with open(file_, 'r') as ruleFile:\n for line in ruleFile:\n line = line.strip()\n if not line.startswith('#') and len(line) > 0:\n #print line\n reactantPairs.append(getReactantsFrom(line))\n \n return reactantPairs\n \ndef genSifRxnsFrom(reactantPairs, intr):\n sifRxns = []\n for rPair in reactantPairs:\n sif = '%s %s %s' % (rPair[0], intr, rPair[1])\n sifRxns.append(sif)\n return sifRxns\n\ndef writeSifRxnsToFile(sifRxns, filename):\n with open(filename, 'w') as sifFile:\n for line in sifRxns:\n sifFile.write('%s\\n' % line)\n \n\ndef writeCSV(filename):\n with open(filename, 'w') as csvFile:\n head = 'MOL,TYPE\\n'\n csvFile.write(head)\n for i in range(1, 11):\n csvFile.write('ceRNA%d,red\\n' % i)\n for i in range(1, 11):\n csvFile.write('miRNA%d,blue\\n' % i)\n\ndef processRuleFile(ruleFile, sifFile):\n reactantPairs = getAllReactantsFrom(ruleFile)\n sifRxns = genSifRxnsFrom(reactantPairs, 'binds')\n writeSifRxnsToFile(sifRxns, sifFile)\n\n## testing ##\nif __name__ == '__main__':\n #rule = 'miRNA1(c) + ceRNA1(m) <-> miRNA1(c!1).ceRNA1(m!1) kf, kb'\n #print getReactantsFromRule(rule)\n #reactantPairs = getAllReactantsFrom('./testdata/bnglrules')\n #sifRxns = genSifRxnsFrom(reactantPairs, 'binds')\n #writeSifRxnsToFile(sifRxns, './testdata/sifrules.sif')\n #writeCSV('./testdata/sifrulesdata.csv')\n os.chdir('testdata')\n processRuleFile('AlaFigS5', 'AlaFigS5.sif')\n processRuleFile('BosiaFig4', 'BosiaFig4.sif')\n \n","sub_path":"bngl2sif.py","file_name":"bngl2sif.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"393005099","text":"import numpy as np\nimport time\nimport numpy.matlib\nfrom function import *\nfrom model import model as mod\nfrom l2_weights import *\n\n\ndef MRVFLtrain(trainX,trainY,option,Weighted_Matrix):\n \n rand_seed= np.random.RandomState(option.seed)\n\n [Nsample,Nfea] = trainX.shape\n N = option.N\n L = option.L\n C = option.C\n s = option.scale #scaling factor\n\n\n weights = []\n biases = []\n mu = []\n sigma = []\n\n\n A1 = trainX\n A_merge = trainX\n\n\n time_start=time.time()\n\n\n for i in range(L):\n\n if i==0:\n w = s*2*rand_seed.rand(Nfea,N)-1\n\n else:\n w = s*2*rand_seed.rand(N,N)-1\n\n b = s*rand_seed.rand(1,N)\n weights.append(w)\n biases.append(b)\n\n A1 = np.matmul(A1,w)\n # layer normalization\n A1_mean = np.mean(A1,axis=0)\n A1_std = np.std(A1,axis=0)\n A1 = (A1-A1_mean)/A1_std\n mu.append(A1_mean)\n sigma.append(A1_std)\n\n A1 = A1+np.matlib.repmat(b,Nsample,1)\n A1 = relu(A1)\n A_merge = np.concatenate([A_merge,A1],axis=1)\n\n\n\n A_merge = np.concatenate([A_merge,np.ones((Nsample,1))],axis=1)\n\n beta=l2_weights(A_merge,trainY,C,Nsample,Weighted_Matrix)\n\n time_end = time.time()\n Training_time = time_end-time_start\n\n\n ## Calculate the training accuracy\n trainY_temp = np.matmul(A_merge,beta)\n\n\n #softmax to generate probabilites\n index_max=np.argmax(trainY_temp,axis=1)\n max = trainY_temp[range(trainY_temp.shape[0]), index_max].reshape(-1,1)\n trainY_temp_max=np.matlib.repmat(max,1,trainY_temp.shape[1])\n trainY_temp1=trainY_temp-trainY_temp_max\n \n prob_scores=softmax(trainY_temp1)\n indx=np.argmax(prob_scores,axis=1)\n ind_corrClass=np.argmax(trainY,axis=1)\n fault=np.array(indx != ind_corrClass)\n TrainingAccuracy = np.mean(indx == ind_corrClass)\n model = mod(L,weights,biases,beta,mu,sigma)\n\n return model,TrainingAccuracy,Training_time,fault\n\n","sub_path":"Weighted_dRVFL/MRVFLtrain.py","file_name":"MRVFLtrain.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"520662918","text":"def checkio(game_result):\n if check(game_result):\n return 'O'\n elif check(game_result, 'X'):\n return 'X'\n else:\n return \"D\"\n\n\ndef check(game_result, player='O'):\n r = rows(game_result, player * 3)\n c = columns(game_result, player * 3)\n d = diagonal(game_result, player * 3)\n if r or c or d:\n return True\n return False\n\n\ndef columns(result, player):\n cols = ['', '', '']\n for row in result:\n cols[0] += row[0]\n cols[1] += row[1]\n cols[2] += row[2]\n return True if player in cols else False\n\n\ndef rows(result, player):\n return True if player in result else False\n\n\ndef diagonal(result, player):\n position = (\n ((0, 0), (1, 1), (2, 2)),\n ((2, 0), (1, 1), (0, 2)),\n )\n for pos in position:\n diag = ''\n for p in pos:\n diag += result[p[0]][p[1]]\n\n if player == diag:\n return True\n return False\n\n\n# These \"asserts\" using only for self-checking and\n# not necessary for auto-testing\nif __name__ == '__main__':\n assert checkio([\n u\"X.O\",\n u\"XX.\",\n u\"XOO\"]) == \"X\", \"Xs wins\"\n assert checkio([\n u\"OO.\",\n u\"XOX\",\n u\"XOX\"]) == \"O\", \"Os wins\"\n assert checkio([\n u\"OOX\",\n u\"XXO\",\n u\"OXX\"]) == \"D\", \"Draw\"\n assert checkio([\n u\"O.X\",\n u\"XX.\",\n u\"XOO\"]) == \"X\", \"Xs wins again\"\n\n","sub_path":"home/x-o-referee/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"422684447","text":"# This functionality is not needed; see mp_shared_mem.py\n\n# Test for a simple inter-process buffer that might be used to communicate between \n# the virtual environment sim and the main simbox program. Uses a lock to ensure\n# reading and writing don't conflict. The test below creates two process that \n# try to use the same buffer at the same time (every 10 sec). The test doesn't work.\n\nfrom types import BuiltinFunctionType\n\nclass Buffer:\n \"\"\"Simple one value buffer that prevents concurrency conflicts\"\"\"\n def __init__(self, lock):\n self.value = None\n if isinstance(lock, BuiltinFunctionType):\n self._lock = lock()\n else:\n self._lock = lock\n \n def put(self, value):\n with self._lock:\n self.value = value\n\n def get(self):\n with self._lock:\n return self.value\n\n\nif __name__ == \"__main__\":\n import threading as th\n import multiprocessing as mp \n import time\n from math import floor\n\n def f(buff, method):\n i = 0\n while i <= 100:\n if not floor(time.time()) % 10:\n if method == \"put\":\n buff.put(i)\n else:\n print(buff.get(i))\n i += 1\n time.sleep(1)\n \n buff = Buffer(th.Lock())\n putter = mp.Process(target=f, args=(buff, \"put\"))\n getter = mp.Process(target=f, args=(buff, \"get\"))\n putter.start()\n getter.start()\n","sub_path":"Pi testing/buff_test.py","file_name":"buff_test.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"352871004","text":"#! Python3\n# encoding: utf8\n\nimport logging\nlogging.basicConfig(level=logging.DEBUG, format=' %(asctime)s - %(levelname)s - %(message)s')\nlog = logging.getLogger(__name__)\n\nfrom tkinter import ttk\nimport tkinter as tk\nfrom functools import partial\n\ndef DocumentsFrame(parent, items, display_method, download_method):\n\n if len(items) == 0:\n item_frame = ttk.Frame(parent, borderwidth=5)\n ttk.Label(parent, text=\"Geen resultaten gevonden.\").pack(fill=tk.BOTH, padx=20, pady=20)\n else:\n item_idx = 0\n for item in items:\n item_frame = ttk.Frame(parent, borderwidth=5)\n\n ttk.Separator(item_frame, orient=tk.HORIZONTAL).grid(row=0, column=0, sticky=\"new\", columnspan=3, pady=2)\n ttk.Button(item_frame, text=\"Meer Info\", command=partial(display_method, item_idx)).grid(row=1, column=2, rowspan=2, padx=10)\n ttk.Button(item_frame, text=\"Download\", command=partial(download_method, item_idx)).grid(row=3, column=2, padx=10)\n\n i = 1\n for key, value in item.items():\n ttk.Label(item_frame, text=key).grid(row=i, column=0, padx=5)\n ttk.Label(item_frame, text=value, width=50, wraplength=300).grid(row=i, column=1)\n i += 1\n\n item_frame.pack(fill=tk.X, padx=20)\n item_idx += 1\n","sub_path":"documents_frame.py","file_name":"documents_frame.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"360722449","text":"import PIL\nfrom PIL import Image\nim = Image.open (\"dog.jpg\")\nim.show()\npix = im.load()\nprint (im.size)\n\n\n\ndarkBlue = (0, 51, 76)\nred = (217, 26, 33)\nlightBlue = (112, 150, 158)\nyellow = (252, 227, 166)\nnew_image=[]\n#for j in range (1200):\n #for i in range (630):\n #print (pix [i,j])\n#x=1200\n#y=630\n#width=1200\n#height=630\n#for x in width:\n #for y in height:\n #current_color = im.getpixel((x, y))\n #new_color= (0, 51, 76)\n #im.putpixel((x,y), new_color)\n\nfor i in range(1200):\n for j in range(630): \n r=(pix[i, j][0])\n g=(pix[i, j][1])\n b=(pix[i, j][2])\n total=(r+b+g)\n \n if total<182:\n new_image.append((0, 51, 76))\n if total<364 or total>182:\n new_image.append((217, 26, 33))\n \n if total<546 or total>364:\n new_image.append((112, 150, 158))\n if total>546:\n new_image.append((252, 227, 127))\n \n\nim.putdata(new_image)\nim.show()\n","sub_path":"week2-python/obamicontest.py","file_name":"obamicontest.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"53383595","text":"\"\"\"\nscrapeMaps.py\nRetrieve map data automatically\n\nnote that the selenium webdriver for mozilla is required to be in the project folder.\n\nDownload a copy at https://github.com/mozilla/geckodriver/releases\n\"\"\"\n\n# Selenium is a web driver wrapper\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\n# to check for alerts:\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\n\n# some essential python libraries to make it chooch\nimport os\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\nimport numpy as np\nimport random\nimport time\n\n\ndef draw_map():\n \"\"\"\n returns world_map: a Basemap object that contains the entire world. This will tell us whether or not a random GPS coordinate is on land\n \"\"\"\n bottomlat = -89.0\n toplat = 89.0\n bottomlong = -170.0\n toplong = 170.0\n gridsize = 0.1\n world_map = Basemap(projection=\"merc\", resolution = 'c', area_thresh = 0.1,llcrnrlon=bottomlong, llcrnrlat=bottomlat, urcrnrlon=toplong, urcrnrlat=toplat)\n world_map.drawcoastlines(color='black')\n return world_map\n\n\ndef main():\n # instantiate a chrome options object so you can set the size and headless preference\n # chrome_options = Options()\n # chrome_options.add_argument(\"--headless\")\n # chrome_options.add_argument(\"--window-size=1920x1080\")\n\n # download the chrome driver from https://sites.google.com/a/chromium.org/chromedriver/downloads and put it in the\n # current directory\n # chrome_driver = os.getcwd() +\"\\\\chromedriver.exe\"\n\n\n # To prevent download dialog\n profile = webdriver.FirefoxProfile()\n profile.set_preference('browser.download.folderList', 2) # custom location\n profile.set_preference('browser.download.manager.showWhenStarting', False)\n profile.set_preference('browser.download.dir', '/tmp')\n profile.set_preference('browser.helperApps.neverAsk.saveToDisk', 'text/csv')\n profile.set_preference(\"browser.download.dir\", \"C:\\\\Users\\\\Bret Nestor\\\\Downloads\\\\\");\n profile.set_preference(\"browser.download.useDownloadDir\", True);\n profile.set_preference(\"browser.helperApps.neverAsk.saveToDisk\", \"zip\");\n\n\n\n driver=webdriver.Firefox(profile)\n # driver = webdriver.Chrome(chrome_options=chrome_options, executable_path=chrome_driver)\n\n # capture the screen\n # driver.get_screenshot_as_file(\"capture.png\")\n\n # navigate to the page you want to acquire data from\n driver.get(\"http://terrain.party/\")\n search_button = driver.find_element_by_css_selector(\"[title=Search]\")\n save_button = driver.find_element_by_css_selector(\"[title=Export]\")\n smaller_scale = driver.find_element_by_css_selector(\"[title=Contract]\")\n\n # make the scale smaller untilit is 8km large\n while True:\n scale=driver.find_element_by_css_selector(\"[class=scale]\").text.split(\"\\n\")[0]\n print(scale+\" km\")\n if int(float(scale))==8:\n break\n smaller_scale.click()\n\n # instantiate map object\n world_map=draw_map()\n print(\"map is drawn\")\n\n # try to acquire 50000 images\n for i in range(50000):\n while True:\n # # generate a random point that is on land\n lon, lat = random.uniform(-179,179), random.uniform(-89, 89)\n xpt, ypt = world_map( lon, lat ) # convert to projection map\n\n # Check if that point is on the map\n if world_map.is_land(xpt,ypt):\n # if it is on the map, print the name and break\n name=\"map_lon{:4.2f}_lat{:4.2f}\".format(lon, lat) # note that the precision can be changed here.\n print(name)\n print(\"\\n\")\n break\n\n try:\n search_button.click() # click the search button on terrain.party\n search_alert = driver.switch_to_alert()\n print(search_alert.text)\n # enter gps coordinate string of land coordinates\n search_alert.send_keys(\"{}, {}\".format(lat, lon))\n # click ok\n search_alert.accept()\n time.sleep(random.uniform(1,3)) # data should be pulled slowly\n\n # check if it can find that location\n try:\n WebDriverWait(driver, 1).until(EC.alert_is_present(),\n 'Timed out waiting for PA creation ' +\n 'confirmation popup to appear.')\n alert = driver.switch_to.alert\n alert.accept()\n continue\n except TimeoutException:\n # if it doesn't find the location we can continue\n pass\n\n # save the data to a zip folder by clicking the save button\n save_button.click()\n save_alert = driver.switch_to_alert()\n\n # enter name to save to in the popup\n name=\"map_lon{:4.2f}_lat{:4.2f}\".format(lat, lon) # the precision can be saved here\n # click ok\n save_alert.send_keys(name)\n save_alert.accept()\n time.sleep(random.uniform(1,7)) # again.... download time is the holdup in this script\n\n # check if there is a problem saving\n try:\n WebDriverWait(driver, 1).until(EC.alert_is_present(),\n 'Timed out waiting for PA creation ' +\n 'confirmation popup to appear.')\n alert = driver.switch_to.alert\n # print(alert.text)\n alert.accept()\n continue\n except TimeoutException:\n # print(\"no alert\")\n pass\n except:\n input(\"did not work\") # break the script and try again\n\n\n\n\nif __name__==\"__main__\":\n main()\n","sub_path":"useful_scripts/scrapeMaps.py","file_name":"scrapeMaps.py","file_ext":"py","file_size_in_byte":5896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"624065537","text":"import numpy as np\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom collections import OrderedDict\n\n\nclass Feedforward(nn.Module):\n def __init__(self, input_size, hidden_size=32):\n super(Feedforward, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n \n # hidden layers\n self.fc1 = torch.nn.Linear(self.input_size, self.hidden_size)\n self.fc2 = torch.nn.Linear(self.hidden_size, self.hidden_size)\n self.fc3 = torch.nn.Linear(self.hidden_size, 1)\n \n def forward(self, x):\n x = torch.relu(self.fc1(x))\n x = torch.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n \n \nclass CNN(nn.Module):\n def __init__(self):\n super(CNN, self).__init__()\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n self.fc1 = nn.Linear(320, 50)\n self.fc2 = nn.Linear(50, 10)\n\n def forward(self, x):\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2(x), 2))\n x = x.view(-1, 320)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n \n \nclass FFN(nn.Module):\n def __init__(self):\n super(FFN, self).__init__()\n self.fc1 = nn.Linear(784, 500)\n self.fc2 = nn.Linear(500, 500)\n self.fc3 = nn.Linear(500, 10)\n\n def forward(self, x):\n x = x.view(-1, 784)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x ","sub_path":"src/models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"337417515","text":"# Borrowed from https://github.com/divamgupta/image-segmentation-keras\n# Thanks dude.\n# Please use this file in a Jupyter Notebook and uncomment matplotlib inline magic\nimport glob\nimport numpy as np\nimport cv2\nimport random\n# import argparse\nimport matplotlib.pyplot as plt\n# import sys\nimport os\n# %matplotlib inline\n\ndef imageSegmentationGenerator( images_path, GT_path, segs_path, n_classes, n_visualize = 4):\n assert images_path[-1] == '/'\n assert segs_path[-1] == '/'\n assert GT_path[-1] == '/'\n\n segmentations = glob.glob( segs_path + \"*.png\" )\n #segmentations.sort()\n random.seed(12)\n random.shuffle(segmentations)\n colors = [ ( random.randint(0,255),random.randint(0,255),random.randint(0,255) ) for _ in range(n_classes) ]\n\n #assert len( images ) == len(segmentations)\n\n rows, columns = (min(n_visualize, len(segmentations)), 3)\n fig=plt.figure(figsize=(20,20))\n for idx, seg_fn in enumerate(segmentations):\n seg_fn_handle = os.path.splitext(os.path.basename(seg_fn))[0]\n im_fn = os.path.join(images_path, seg_fn_handle+'.jpg')\n gt_fn = os.path.join(GT_path, seg_fn_handle+'.png')\n print(idx, '/', len(segmentations), \":\", seg_fn_handle)\n\n img = cv2.imread( im_fn )\n gt = cv2.imread( gt_fn )\n seg = cv2.imread( seg_fn )\n print(img.shape, gt.shape, seg.shape)\n print(np.unique(seg))\n\n seg_img = np.zeros_like( img )\n gt_img = np.zeros_like( img )\n\n for c in range(n_classes):\n seg_img[:,:,0] += ( (seg[:,:,0] == c )*( colors[c][0] )).astype('uint8')\n seg_img[:,:,1] += ((seg[:,:,0] == c )*( colors[c][1] )).astype('uint8')\n seg_img[:,:,2] += ((seg[:,:,0] == c )*( colors[c][2] )).astype('uint8')\n gt_img[:,:,0] += ( (gt[:,:,0] == c )*( colors[c][0] )).astype('uint8')\n gt_img[:,:,1] += ((gt[:,:,0] == c )*( colors[c][1] )).astype('uint8')\n gt_img[:,:,2] += ((gt[:,:,0] == c )*( colors[c][2] )).astype('uint8')\n\n fig.add_subplot(rows, columns, 3*idx+1)\n plt.imshow( img )\n fig.add_subplot(rows, columns, 3*idx+2)\n plt.imshow( gt_img )\n fig.add_subplot(rows, columns, 3*idx+3)\n plt.imshow( seg_img )\n if idx >= n_visualize-1:\n break\n plt.show()\n\nif __name__ == '__main__':\n img_path = '/home/niu/Liang_Niu3/IIT_Affordances_2017/rgb/'\n pred_path = '/home/niu/src/Keras-FCN/Models/AtrousFCN_Resnet50_16s/res/' # predicted result\n GT_path = '/home/niu/Liang_Niu3/IIT_Affordances_2017/affordances_labels_png/' # Ground Truth\n n_classes = 10\n print(f\"img_path:{img_path}, pred_path:{pred_path}, GT_path:{GT_path}, n_classes:{n_classes}\")\n imageSegmentationGenerator(img_path, GT_path, pred_path, n_classes)\n","sub_path":"visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"23838031","text":"# coding: utf8\n\nfrom celery import Celery\nfrom config.config import DevelopmentConfig\n\nbroker = DevelopmentConfig.CELERY_BROKER\nbackend = DevelopmentConfig.CELERY_BACKEND\n\ncelery_app = Celery(\n 'my-celery',\n broker=broker,\n backend=backend,\n include=['app.job.job_tasks', ]\n)\n\ncelery_app.conf.timezone = 'Asia/Shanghai'\ncelery_app.conf.enable_utc = False\n\n\nif __name__ == '__main__':\n celery_app.start()\n\n","sub_path":"scheduler/celery.py","file_name":"celery.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"300264947","text":"# imediately after the sandbox scan finishes without breaking build, execute this as a post step to promote the latest sandbox scan\n#\nimport os\nimport subprocess\nimport sys\nimport json\nimport xml.etree.ElementTree as ET\n#\nJavaWrapperVersion=\"21.6.8.0\" # capture latest version https://search.maven.org/search?q=a:vosp-api-wrappers-java \n#\ndef promote_sandbox():\n \n try:\n # GET APP LIST\n # applist_xml = os.system(\"java -jar VeracodeJavaAPI.jar -action getapplist\")\n # print(applist_xml)\n \n # DOWNLOAD API WRAPPER \n os.system('curl -sSo VeracodeJavaAPI.jar https://repo1.maven.org/maven2/com/veracode/vosp/api/wrappers/vosp-api-wrappers-java/'+str(JavaWrapperVersion)+'/vosp-api-wrappers-java-'+str(JavaWrapperVersion)+'.jar')\n \n #obtain the app id from the command line argument\n appid = str(sys.argv[1])\n \n #run the java call as a subprocess and store results in buildinfo_xml\n buildinfo_xml = subprocess.run([\"java\", \"-jar\", \"VeracodeJavaAPI.jar\", \"-action\", \"getbuildinfo\" , \"-appid\", appid], stdout=subprocess.PIPE, text=True)\n \n # write xml response to buildinfo as a dict\n buildinfo = ET.fromstring(buildinfo_xml.stdout)\n \n for x in buildinfo:\n # set variables\n build_id = str(x.attrib['build_id'])\n results_ready = x.attrib['results_ready']\n rules_status = x.attrib['rules_status']\n # if results are ready and policy evaluattion passed proceed with promoting scan\n if results_ready=='true' and rules_status=='Pass':\n # execute sandbox promotion\n promote_sandbox_cmd = subprocess.run([\"java\", \"-jar\", \"VeracodeJavaAPI.jar\", \"-action\", \"promotesandbox\" , \"-buildid\", build_id], stdout=subprocess.PIPE, text=True)\n print(promote_sandbox_cmd.stdout)\n\n else:\n # break\n print(\"Alert, build didn't promote\")\n except:\n sys.exit(0)\n\n\ndef main():\n #\n promote_sandbox()\n #\n\nmain()","sub_path":"veracode-promote-sandbox.py","file_name":"veracode-promote-sandbox.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"414269118","text":"import os\n\nfrom charismatic.config import ConfigManager\n\nfrom fabric.api import puts, local\nfrom fabric.colors import green\n\ndef hostsfile():\n etchosts = os.environ[\"HOSTSFILE\"]\n metadata = ConfigManager(os.environ[\"CONFIGFILE\"])\n\n puts(green(\"opening [%s] for writing.\" % etchosts))\n\n with open(etchosts, 'w') as f:\n f.write(\"\"\"\n127.0.0.1 localhost.localdomain localhost\n\"\"\")\n for server in sorted(metadata.servers):\n fip = metadata.servers[server][\"fip\"]\n ip = metadata.servers[server][\"ip\"]\n f.write(\"\"\"\n{} {}.external {}\n{} {}.internal\n\"\"\".format(fip, server, server, ip, server, server))\n\n","sub_path":"rounds/round001/hostsfile/fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"124505974","text":"import matplotlib\nmatplotlib.use('Agg')\nfrom locker import analysis as ana\nimport datajoint as dj\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nlock = dj.U('cell_id', 'contrast', 'delta_f') & (ana.Cells() * ana.Runs() * ana.SecondOrderSignificantPeaks() \\\n & dict(cell_type='p-unit', stimulus_coeff=1, baseline_coeff=0,\n eod_coeff=0, am=0, n_harmonics=0) \\\n & 'MOD(delta_f, 100) = 0 and contrast >= 10')\n\ntested = dj.U('cell_id', 'contrast', 'delta_f') & (ana.Cells() * ana.Runs() \\\n & dict(cell_type='p-unit', stimulus_coeff=1, baseline_coeff=0,\n eod_coeff=0, am=0, n_harmonics=0) \\\n & 'MOD(delta_f, 100) = 0 and contrast >= 10')\n\ndf_num = pd.DataFrame(lock.fetch())\ndf_denom = pd.DataFrame(tested.fetch())\n# df_num['locking'] = 1\n\ngr = ['contrast', 'delta_f']\ndf_num = df_num.groupby(gr).count()\ndf_denom = df_denom.groupby(gr).count()\nperc = df_num / df_denom * 100\nperc = perc.reset_index()\nperc.columns = ['contrast', 'delta_f', 'lock']\nperc['delta_f'] = ['%.0f' % c for c in perc['delta_f']]\nperc['contrast'] = ['%.0f%%' % c for c in perc['contrast']]\nsns.set_context('paper')\n\ndfn = np.arange(-500, 600,100)\ndf = ['%.0f' % c for c in dfn]\nwith sns.axes_style('ticks'):\n fig, ax = plt.subplots(2, 1, sharex=True, figsize=(4,4))\nperc = perc.set_index(['contrast','delta_f'])\n\ndf_num = df_num.reset_index()\ndf_num['contrast'] = ['%.0f%%' % c for c in df_num['contrast']]\ndf_num = df_num.set_index(['contrast','delta_f'])\nw = 30\nfor contrast, shift, color in zip(['10%', '20%'], [+1,-1], ['lightgrey','grey']):\n\n ax[0].bar(dfn - shift*w/2, df_num.ix[contrast].ix[dfn,'cell_id'], align='center', width=w, lw=0, color=color,\n label=contrast)\n ax[1].bar(dfn - shift*w/2, perc.ix[contrast].ix[df,'lock'], align='center', width=w, lw=0, color=color)\n\nax[0].legend(title='contrast', ncol=2, bbox_to_anchor=(.5, 0.95), frameon=False)\nax[1].set_xticks(dfn)\nax[1].set_xticklabels(df, rotation='vertical')\nax[1].set_xlabel(r'$\\Delta f$ [Hz]')\nax[0].yaxis.set_label_coords(-0.1, 0.5)\nax[1].yaxis.set_label_coords(-0.1, 0.5)\nax[0].set_ylabel('locking cells')\nax[1].set_ylabel('locking cells [% tested]')\nsns.despine(fig, trim=True)\nfor a in ax:\n a.tick_params('both', length=3, width=1, which='both')\n for axis in ['top', 'bottom', 'left', 'right']:\n a.spines[axis].set_linewidth(1)\n\nfig.tight_layout()\nfig.subplots_adjust(top=.9)\nplt.savefig('figures/summary.pdf')\n","sub_path":"scripts/fig4_summary.py","file_name":"fig4_summary.py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"38660249","text":"# This function is used to collect the metadata of the GSV panoramas based on the sample point shapefile\n\n# Copyright(C) Xiaojiang Li, Ian Seiferling, Marwa Abdulhai, Senseable City Lab, MIT \n\ndef GSVpanoMetadataCollector(samplesFeatureClass,num,ouputTextFolder, key):\n '''\n This function is used to call the Google API url to collect the metadata of\n Google Street View Panoramas. The input of the function is the shpfile of the create sample site, the output\n is the generate panoinfo matrics stored in the text file\n \n Parameters: \n samplesFeatureClass: the shapefile of the create sample sites\n num: the number of sites proced every time\n ouputTextFolder: the output folder for the panoinfo\n \n '''\n \n import xmltodict\n import ogr, osr\n import time\n import os,os.path\n import sys\n import json\n\n\n if not os.path.exists(ouputTextFolder):\n os.makedirs(ouputTextFolder)\n \n driver = ogr.GetDriverByName('ESRI Shapefile')\n \n\n # change the projection of shapefile to the WGS84\n dataset = driver.Open(samplesFeatureClass)\n layer = dataset.GetLayer()\n \n sourceProj = layer.GetSpatialRef()\n targetProj = osr.SpatialReference()\n targetProj.ImportFromEPSG(4326)\n transform = osr.CoordinateTransformation(sourceProj, targetProj)\n \n # loop all the features in the featureclass\n feature = layer.GetNextFeature()\n featureNum = layer.GetFeatureCount()\n # batch = featureNum/num \n batch = int(featureNum/num + 0.5)\n\n print(batch)\n for b in range(batch):\n # for each batch process num GSV site\n start = b*num\n end = (b+1)*num\n if end > featureNum:\n end = featureNum\n \n ouputTextFile = 'Pnt_start%s_end%s.txt'%(start,end)\n ouputGSVinfoFile = os.path.join(ouputTextFolder,ouputTextFile)\n \n # skip over those existing txt files\n if os.path.exists(ouputGSVinfoFile):\n continue\n \n time.sleep(1)\n \n with open(ouputGSVinfoFile, 'w') as panoInfoText:\n # process num feature each time\n for i in range(start, end):\n feature = layer.GetFeature(i) \n geom = feature.GetGeometryRef()\n \n # trasform the current projection of input shapefile to WGS84\n #WGS84 is Earth centered, earth fixed terrestrial ref system\n geom.Transform(transform)\n lon = geom.GetX()\n lat = geom.GetY()\n \n\n # get the meta data of panoramas \n # urlAddress = r'http://maps.google.com/cbk?output=xml&ll=%s,%s&key=%s'%(lat,lon,key)\n urlAddress = r'https://maps.googleapis.com/maps/api/streetview/metadata?size=600x300&location=%s,%s&heading=-45&pitch=42&fov=110&key=%s'%(lon, lat, key)\n time.sleep(0.1)\n \n\n # using different url reading method in python2 and python3\n if sys.version_info[0] == 2:\n # from urllib2 import urlopen\n import urllib\n \n metaData = urllib.urlopen(urlAddress).read()\n \n if sys.version_info[0] == 3:\n import urllib.request\n \n request = urllib.request.Request(urlAddress)\n metaData = urllib.request.urlopen(request).read()\n\n \n data = json.loads(metaData)\n panoDate = data['date']\n panoId = data['pano_id']\n panoLat = data['location']['lat']\n panoLon = data['location']['lng']\n\n # print ('The coordinate (%s,%s), panoId is: %s, panoDate is: %s')%(panoLon,panoLat,panoId, panoDate)\n lineTxt = 'panoID: %s panoDate: %s longitude: %s latitude: %s\\n'%(panoId, panoDate, panoLon, panoLat)\n panoInfoText.write(lineTxt)\n \n panoInfoText.close()\n\n\n# ------------Main Function ------------------- \nif __name__ == \"__main__\":\n\n import os, os.path\n import sys\n import argparse\n\n\n parser = argparse.ArgumentParser(\n description=\"parameters\"\n )\n\n parser.add_argument(\n \"--inputshp\",\n required=True,\n type=str,\n help=\"the path of the shapefile\"\n )\n\n parser.add_argument(\n \"--outdir\",\n default=\"mosaic-mrt.tif\",\n help=\"the output dir for the meta txt file\",\n type=str,\n )\n\n parser.add_argument(\n \"--key\",\n default=\"\",\n help=\"Google Street View key\",\n type=str,\n )\n\n\n args = parser.parse_args()\n inputshp = args.inputshp\n outdir = args.outdir\n key = args.key\n\n print(inputshp, outdir, key)\n GSVpanoMetadataCollector(inputshp, 1000, outdir, key)\n \n\n ## call example\n # python metadataCollector.py \\\n # --inputshp='/Users/senseablecity/Dropbox (MIT)/ResearchProj/treepedia/cities-proj/Oakland/OaklandSlowStreets/SlowStreets_points/SS_20m.shp' \\\n # --outdir='/Users/senseablecity/Dropbox (MIT)/ResearchProj/treepedia/cities-proj/Oakland/OaklandSlowStreets/SlowStreets_points' \\\n # --key=''\n\n","sub_path":"Treepedia/metadataCollector.py","file_name":"metadataCollector.py","file_ext":"py","file_size_in_byte":5286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"91330929","text":"import os\nimport sys\nimport numpy as np\nimport torch\nimport utils\nimport logging\nimport argparse\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch.autograd import Variable\nfrom model_search import Network\n\n\nclass Architect(object):\n \"\"\"\n Base class for the architect, which is just an optimizer (different from\n the one used to update the model parameters) for the architectural parameters.\n \"\"\"\n def __init__(self, model):\n \"\"\"\n :model: nn.Module; search model\n \"\"\"\n self.model = model\n self.criterion = nn.CrossEntropyLoss()\n self.arch_optimizer = torch.optim.Adam(\n self.model.arch_parameters,\n lr=3e-4,\n betas=(0.5, 0.999),\n weight_decay=1e-3\n )\n\n def step(self, input_valid, target_valid):\n \"\"\"\n This method computes a gradient step in the architecture space, i.e.\n updates the self.model.alphas_normal and self.model.alphas_reduce by\n the gradient of the validation loss with respect to these alpha\n parameters.\n :input_valid: torch.Tensor; validation mini-batch\n :target_valid: torch.Tensor: ground truth labels of this mini-batch\n \"\"\"\n self.arch_optimizer.zero_grad()\n #TODO: do a forward pass using the validation mini-batch input\n outputs = self.model(input_valid)\n #TODO: compute the loss using self.criterion and backpropagate to\n # compute the gradients w.r.t. the alphas\n loss = self.criterion(outputs,target_valid)\n loss.backward()\n\n #TODO: do a step in the architecture space using the\n # self.arch_optimizer\n self.arch_optimizer.step()\n \ndef train(train_loader, valid_loader, model, architect, criterion,\n optimizer, device):\n \"\"\"\n Training loop. This function computes the DARTS loop, i.e. it takes one\n step in the architecture space and one in the weight space in an\n interleaving manner. For the architectural updates we use the validation\n set and for the search model parameters updates the training set. In DARTS\n these two sets have equal sizes, which in the case of MNIST it is 30k\n examples per each.\n \"\"\"\n objs = utils.AvgrageMeter()\n accr = utils.AvgrageMeter()\n model.train()\n\n for step, (input, target) in enumerate(train_loader):\n input, target = input.to(device), target.to(device)\n\n # get a random minibatch from the search queue with replacement and\n # update the architectural parameters with the validation loss gradient\n # NOTE: The model parameters are kept fixed here, just the alphas are\n # updated\n input_search, target_search = next(iter(valid_loader))\n architect.step(input_search.to(device), target_search.to(device))\n\n # update the search model parameters with the updated architecture\n # NOTE: The architecture is kept fixed here, just the search model\n # weights/parameters are updated\n optimizer.zero_grad()\n logits = model(input)\n loss = criterion(logits, target)\n loss.backward()\n optimizer.step()\n\n _accr = utils.accuracy(logits, target)\n objs.update(loss.item(), input.size(0))\n accr.update(_accr.item(), input.size(0))\n\n logging.info('train mini-batch %03d, loss=%e accuracy=%f', step,\n objs.avg, accr.avg)\n\n return accr.avg, objs.avg\n\n\ndef infer(valid_loader, model, criterion, device):\n \"\"\"\n Compute the accuracy on the validation set (the same used for updating the\n architecture).\n \"\"\"\n objs = utils.AvgrageMeter()\n accr = utils.AvgrageMeter()\n model.eval()\n\n with torch.no_grad():\n for step, (input, target) in enumerate(valid_loader):\n input, target = input.to(device), target.to(device)\n\n logits = model(input)\n loss = criterion(logits, target)\n\n _accr= utils.accuracy(logits, target)\n objs.update(loss.item(), input.size(0))\n accr.update(_accr.item(), input.size(0))\n\n logging.info('valid mini-batch %03d, loss=%e accuracy=%f', step,\n objs.avg, accr.avg)\n\n return accr.avg, objs.avg\n\n\ndef main(args):\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n logging.info(\"args = %s\", args)\n\n use_cuda = torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n train_loader, valid_loader = utils.search_dataloader(args, kwargs)\n\n criterion = nn.CrossEntropyLoss().to(device)\n model = Network(device, nodes=2).to(device)\n\n logging.info(\"param size = %fMB\",\n np.sum(np.prod(v.size()) for name, v in model.named_parameters())/1e6)\n\n optimizer = torch.optim.SGD(\n model.parameters(),\n args.learning_rate,\n momentum=0.9,\n weight_decay=args.weight_decay)\n\n architect = Architect(model)\n\n for epoch in range(args.epochs):\n logging.info(\"Starting epoch %d/%d\", epoch+1, args.epochs)\n\n # training\n train_acc, train_obj = train(train_loader, valid_loader, model,\n architect, criterion, optimizer, device)\n logging.info('train_acc %f', train_acc)\n\n # validation\n valid_acc, valid_obj = infer(valid_loader, model, criterion, device)\n logging.info('valid_acc %f', valid_acc)\n\n # compute the discrete architecture from the current alphas\n genotype = model.genotype()\n logging.info('genotype = %s', genotype)\n\n print(F.softmax(model.alphas_normal, dim=-1))\n print(F.softmax(model.alphas_reduce, dim=-1))\n\n with open(args.save + '/architecture', 'w') as f:\n f.write(str(genotype))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\"mnist\")\n parser.add_argument('--data', type=str, default='./data', help='location of the data corpus')\n parser.add_argument('--batch_size', type=int, default=64, help='batch size')\n parser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate')\n parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')\n parser.add_argument('--epochs', type=int, default=5, help='num of training epochs')\n parser.add_argument('--save', type=str, default='logs', help='path to logs')\n parser.add_argument('--seed', type=int, default=2, help='random seed')\n args = parser.parse_args()\n\n # logging utilities\n os.makedirs(args.save, exist_ok=True)\n log_format = '%(asctime)s %(message)s'\n logging.basicConfig(stream=sys.stdout, level=logging.INFO,\n format=log_format, datefmt='%m/%d %I:%M:%S %p')\n fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))\n fh.setFormatter(logging.Formatter(log_format))\n logging.getLogger().addHandler(fh)\n\n main(args)\n\n","sub_path":"ex07/src/train_search.py","file_name":"train_search.py","file_ext":"py","file_size_in_byte":6974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"118238624","text":"import sys, serial, os\nfrom PyQt5.QtWidgets import QApplication,QMainWindow, QDesktopWidget,QMessageBox\nfrom PyQt5.uic import loadUi\nfrom PyQt5.QtGui import QPixmap\n\n# ------------------------------------------------------------------------------------------------------------------------------------------\nclass ventanaOsciloscopio(QMainWindow):\n def __init__(self):\n super().__init__()\n loadUi(\"osciloscopio1.ui\", self)\n self.vDivision=0\n self.sDivision=0\n self.verificacionComSerial = 0\n self.tiempoEntreMuestras = 10 #tiempoEnms\n self.fn_alinearVentanaIzquierda()\n self.setWindowTitle(' Osciloscopio')\n self.labelvDivision.setText(str(self.vDivision))\n self.labelsDivision.setText(str(self.sDivision))\n self.labelLogo.setPixmap(QPixmap('logo.png'))\n# ------------------------------------------------------------------------------------------------------------------------------------------\n self.botonIniciarSerial.clicked.connect(self.fn_iniciarSerial)\n self.botonIniciarGrafica.clicked.connect(self.fn_IniciarGrafica)\n self.slidervDvision.valueChanged.connect(self.fn_slidervDivision)\n self.slidersDvision.valueChanged.connect(self.fn_slidersDivision)\n# ------------------------------------------------------------------------------------------------------------------------------------------\n def fn_alinearVentanaIzquierda(self):\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveBottomRight(cp)\n self.move(qr.topLeft())\n# ------------------------------------------------------------------------------------------------------------------------------------------\n def fn_iniciarSerial(self):\n try:\n self.comSerial = serial.Serial(self.nombrePuertoSerial.text(), 9600)\n self.comSerial.close()\n QMessageBox.about(self, \" \", \"La comunicación serial funciona correctamente\")\n self.verificacionComSerial = 1\n except:\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Critical)\n msg.setText(\"Error\")\n msg.setInformativeText('Existe un error, verifique la conexión y los parámetros de la comunicación serial')\n msg.setWindowTitle(\"Error\")\n msg.exec_()\n self.verificacionComSerial = 2\n# ------------------------------------------------------------------------------------------------------------------------------------------\n def fn_IniciarGrafica(self):\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Critical)\n msg.setText(\"Error\")\n if self.sDivision == 0 or self.vDivision == 0:\n msg.setInformativeText('V/División y S/División deben ser diferentes de cero')\n msg.setWindowTitle(\"Error\")\n msg.exec_()\n elif self.verificacionComSerial == 0:\n msg.setInformativeText('No se ha verificado la comunicación serial')\n msg.setWindowTitle(\"Error\")\n msg.exec_()\n elif self.verificacionComSerial == 2:\n msg.setInformativeText('Existe un error, verifique la conexión y los parámetros de la comunicación serial')\n msg.setWindowTitle(\"Error\")\n msg.exec_()\n else:\n archivo = open(\"parametros.txt\",\"w\")\n archivo.write(\"10\\n\") #limite eje x\n archivo.write(str(int(10*self.sDivision/(self.tiempoEntreMuestras/1000)))+'\\n') #numero de muestras\n archivo.write(str(self.tiempoEntreMuestras)+'\\n') #tiempo entre muestras para generar la gráfica\n archivo.write(str(self.comSerial.port)+'\\n') #com\n archivo.write(str(self.comSerial.baudrate)+'\\n') #velocidad com\n archivo.write(str(self.vDivision))\n archivo.close()\n os.system('python pruebaGraficaConObjeto.py')\n# ------------------------------------------------------------------------------------------------------------------------------------------\n def fn_slidervDivision(self,value):\n self.vDivision = value/10\n self.labelvDivision.setText(str(self.vDivision))\n# ------------------------------------------------------------------------------------------------------------------------------------------\n def fn_slidersDivision(self,value):\n self.sDivision = value\n self.labelsDivision.setText(str(value))\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n guiOsciloscopio = ventanaOsciloscopio()\n guiOsciloscopio.show()\n sys.exit(app.exec_())\n","sub_path":"osciloscopio1.py","file_name":"osciloscopio1.py","file_ext":"py","file_size_in_byte":4603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"47599045","text":"from os import error\nimport numpy as np\nfrom numpy.core.fromnumeric import shape\nfrom scipy import sparse\nimport scipy.linalg\nimport math\nimport scipy.io as scio\nimport osqp\nimport matplotlib.pyplot as plt\nimport time\n\n# def t_n_list(t,n_order):\n# t_array_p = []\n# for i in range(n_order+1):\n# t_array_p.append(pow(t,i))\n# return t_array_p\n\n# def plot_xy_traj(x_traj,y_traj,ref_x,ref_y):\n# fig = plt.figure()\n# plt.scatter(x_traj, y_traj,marker = 'x',color = 'blue', s = 2 ,label = 'state')\n# plt.scatter(ref_x,ref_y,marker = 'x',color = 'red', s = 2 ,label = 'state')\n# plt.show()\n\n# def gen_traj(Matrix_x,Matrix_y,time_set,n_order):\n# p_x_traj = []\n# p_y_traj = []\n# k = 0\n# i = 0\n# for t in range(0,math.floor(time_set[-1]*100)+1):\n# while True:\n# if t/100>=time_set[i] and t/100<=time_set[i+1]:\n# break\n# else:\n# k =k+1\n# i = i+1\n# break\n# p_x_traj.append(np.dot(np.array(t_n_list(t/100,n_order)),Matrix_x[ : , k]))\n# p_y_traj.append(np.dot(np.array(t_n_list(t/100,n_order)),Matrix_y[ : , k]))\n# return p_x_traj,p_y_traj\n\nclass MinSnap:\n def __init__(self) -> None:\n pass\n\n def minsnap_trajectory_single_axis(self, way_points, time_set, n_order, n_obj, v_i, a_i, v_e, a_e):\n # Set init and end point\n p_i = way_points[0]\n p_e = way_points[-1]\n \n # Set poly num and coeffient num\n n_poly = len(way_points) - 1\n n_coef = n_order + 1\n\n # Compute QP cost function matrix\n q_i_cost = []\n for i in range(n_poly):\n q_i_cost.append(self.compute_q(n_order+1, n_obj, time_set[i], time_set[i+1]))\n q_cost = scipy.linalg.block_diag(*q_i_cost)\n p_cost = np.zeros(q_cost.shape[0])\n\n # Set equality constraints\n A_eq = np.zeros((4*n_poly + 2, n_coef*n_poly))\n b_eq = np.zeros((4*n_poly + 2, 1))\n # Init and End constraints: 6\n A_eq[0:3,0:n_coef] = [self.compute_t_vec(time_set[0], n_order, 0), \\\n self.compute_t_vec(time_set[0], n_order, 1), \\\n self.compute_t_vec(time_set[0], n_order, 2)]\n A_eq[3:6,n_coef*(n_poly-1):n_coef*n_poly] = \\\n [self.compute_t_vec(time_set[-1], n_order, 0), \\\n self.compute_t_vec(time_set[-1], n_order, 1), \\\n self.compute_t_vec(time_set[-1], n_order, 2)]\n b_eq[0:6] = np.transpose(np.array([[p_i, v_i, a_i, p_e, v_e, a_e] ]))\n # Points constraints: n_poly - 1\n n_eq = 6\n for i in range(0, n_poly-1):\n A_eq[n_eq, n_coef*(i+1):n_coef*(i+2)] = self.compute_t_vec(time_set[i+1], n_order, 0)\n b_eq[n_eq] = np.array([[way_points[i+1]]])\n n_eq = n_eq+1\n # Continous constraints: (n_poly - 1)*3\n for i in range(0, n_poly-1):\n t_vec_p = self.compute_t_vec(time_set[i+1], n_order, 0)\n t_vec_v = self.compute_t_vec(time_set[i+1], n_order, 1)\n t_vec_a = self.compute_t_vec(time_set[i+1], n_order, 2)\n A_eq[n_eq,n_coef*i:n_coef*(i+1)]=t_vec_p\n A_eq[n_eq,n_coef*(i+1):n_coef*(i+2)]=-t_vec_p\n n_eq=n_eq+1\n A_eq[n_eq,n_coef*i:n_coef*(i+1)]=t_vec_v\n A_eq[n_eq,n_coef*(i+1):n_coef*(i+2)]=-t_vec_v\n n_eq=n_eq+1\n A_eq[n_eq,n_coef*i:n_coef*(i+1)]=t_vec_a\n A_eq[n_eq,n_coef*(i+1):n_coef*(i+2)]=-t_vec_a\n n_eq=n_eq+1\n \n # Set inequality constraints\n A_ieq = np.zeros((0, n_coef*n_poly))\n b_ieq = np.zeros((0, 1))\n\n # Convert equality constraints to inequality constraints\n A_eq_ieq = np.vstack((A_eq, -1 * A_eq))\n b_eq_ieq = np.vstack((b_eq, -1 * b_eq))\n # A_ieq = np.vstack((A_ieq, A_eq_ieq))\n # b_ieq = np.vstack((b_ieq, b_eq_ieq))\n A_ieq = A_eq_ieq\n b_ieq = b_eq_ieq\n\n # Solve qp(sqp) problem\n m = osqp.OSQP()\n m.setup(P=sparse.csc_matrix(q_cost), q=None, l=None, A=sparse.csc_matrix(A_ieq), u=b_ieq, verbose=False)\n results = m.solve()\n x = results.x\n return x\n\n def compute_q(self, n, r , t_i, t_e):\n q = np.zeros((n,n))\n for i in range(r, n):\n for j in range(i, n):\n k1 = i - r \n k2 = j - r \n k = k1 + k2 +1\n q[i, j] = (math.factorial(i)/math.factorial(k1)) * (math.factorial(j)/math.factorial(k2)) * (pow(t_e,k)-pow(t_i,k)) / k\n q[j,i] = q[i,j]\n return q\n\n def compute_t_vec(self, t, n_order, k):\n t_vector = np.zeros(n_order+1)\n for i in range(k, n_order+1):\n t_vector[i] = (math.factorial(i)/math.factorial(i-k)) * pow(t, i-k)\n return t_vector\n\n def time_arrange(self, way_points, T):\n dist_vec = way_points[:, 1:] - way_points[:, :-1]\n dist = []\n for i in range(dist_vec.shape[1]):\n dist_i = 0\n for j in range(dist_vec.shape[0]):\n dist_i += dist_vec[j][i] **2\n dist_i = dist_i **0.5\n dist.append(dist_i)\n k = T/sum(dist)\n time_set = [0]\n time_i = np.array(dist)*k\n for i in range(1,len(time_i)):\n time_i[i] = time_i[i] + time_i[i-1]\n time_set.extend(time_i)\n return time_set\n\ndef minimum_snap_traj(way_points):\n start_t = time.time()\n traj = MinSnap()\n # Reshape waypoints matrix\n way_points_n = np.array(way_points)[:,0:3]\n # Manual adjustment \n way_points_n[1:-1,2] = way_points_n[1:-1,2] + np.ones((1,way_points_n.shape[0]-2))*1\n way_points_n[-1,-1] = way_points_n[-2,-1] \n way_points_n = np.transpose(way_points_n)\n # Total time (s)\n T = 40\n # Poly order\n n_order = 8\n # Object order: 1-minimum vel 2-mimimum acc 3-minimum jerk 4-minimum snap\n n_obj = 3\n # Poly num\n n_poly = len(way_points) - 1\n v_i = [0,0,0,0]\n a_i = [0,0,0,0]\n v_e = [0,0,0,0]\n a_e = [0,0,0,0]\n # Arrange time roughly\n time_set = traj.time_arrange(way_points_n,T)\n # Adjust time manually\n # for i in range(1,len(time_set)):\n # time_set[i] = time_set[i] + 1\n p_x = traj.minsnap_trajectory_single_axis(way_points_n[0,:], time_set, n_order, n_obj, v_i[0], a_i[0], v_e[0], a_e[0])\n p_y = traj.minsnap_trajectory_single_axis(way_points_n[1,:], time_set, n_order, n_obj, v_i[1], a_i[1], v_e[1], a_e[1])\n p_z = traj.minsnap_trajectory_single_axis(way_points_n[2,:], time_set, n_order, n_obj, v_i[2], a_i[2], v_e[2], a_e[2])\n Matrix_x = np.transpose(p_x.reshape(n_poly,n_order+1))\n Matrix_y = np.transpose(p_y.reshape(n_poly,n_order+1))\n Matrix_z = np.transpose(p_z.reshape(n_poly,n_order+1))\n end_t = time.time()\n # print(end_t-start_t)\n return time_set, Matrix_x, Matrix_y, Matrix_z\n\ndef minimum_snap_traj_p2p(traj, way_points, time_set, n_order, n_obj, v_i, a_i, v_e, a_e):\n start_t = time.time()\n # Poly num\n n_poly = way_points.shape[1] - 1\n p_x = traj.minsnap_trajectory_single_axis(way_points[0,:], time_set, n_order, n_obj, v_i[0], a_i[0], v_e[0], a_e[0])\n p_y = traj.minsnap_trajectory_single_axis(way_points[1,:], time_set, n_order, n_obj, v_i[1], a_i[1], v_e[1], a_e[1])\n p_z = traj.minsnap_trajectory_single_axis(way_points[2,:], time_set, n_order, n_obj, v_i[2], a_i[2], v_e[2], a_e[2])\n Matrix_x = np.transpose(p_x.reshape(n_poly,n_order+1))\n Matrix_y = np.transpose(p_y.reshape(n_poly,n_order+1))\n Matrix_z = np.transpose(p_z.reshape(n_poly,n_order+1))\n end_t = time.time()\n print(end_t-start_t)\n return Matrix_x, Matrix_y, Matrix_z\n\ndef mimimum_snap_traj_p2p_id(way_points, point_id, p_i, v_i, a_i):\n traj = MinSnap()\n # Reshape waypoints matrix\n way_points_n = np.array(way_points)[:,0:3]\n # Manual adjustment \n way_points_n[1:-1,2] = way_points_n[1:-1,2] + np.ones((1,way_points_n.shape[0]-2))*1\n way_points_n[-1,-1] = way_points_n[-2,-1] # land point Z set equ to last circle\n way_points_n[1,1] = way_points_n[1,1] + 10# first circle Y forward\n way_points_n = np.transpose(way_points_n)\n # Total time (s)\n T = 40\n # Poly order\n n_order = 6\n # Object order: 1-minimum vel 2-mimimum acc 3-minimum jerk 4-minimum snap\n n_obj = 3\n # Set end speed \n speed = 4\n v_e = []\n a_e = []\n for i in range(len(way_points)):\n v_e.append([speed*np.cos(way_points[i][3]), speed*np.sin(way_points[i][3]), 0, 0])\n a_e.append([0, 0, 0, 0])\n # Arrange time roughly\n time_set = traj.time_arrange(way_points_n,T)\n # Adjust time manually\n # for i in range(1,len(time_set)):\n # time_set[i] = time_set[i] + 1\n p2p = np.zeros((3,2))\n p2p[:,0] = p_i\n p2p[:,1] = way_points_n[:,point_id+1]\n time_p2p = np.array(time_set[point_id:point_id+2])-np.array([time_set[point_id], time_set[point_id]])\n return minimum_snap_traj_p2p(traj, p2p, time_p2p, \\\n n_order, n_obj, v_i, a_i, v_e[point_id+1], a_e[point_id+1])","sub_path":"minsnap_traj.py","file_name":"minsnap_traj.py","file_ext":"py","file_size_in_byte":9259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"7123326","text":"'''\r\nCreated on 30 Jan 2018\r\n\r\n@author: neeraj.mahajan\r\n'''\r\nfrom flask import jsonify\r\nfrom flask.globals import request\r\nfrom flask_restful import reqparse, Resource\r\nfrom delivery_db_api.exception import ObjectNotFound\r\nfrom delivery_db_api.models.system import SystemModel\r\nfrom delivery_db_api.models.system_element import SystemElementModel\r\nfrom delivery_db_api.models.system_element_component import SystemElementComponentModel\r\nfrom delivery_db_api.models.system_version import SystemVersionModel\r\nfrom delivery_db_api.resources.abstract_resource import AbstractResource\r\nfrom delivery_db_api.security import authenticate\r\n\r\n\r\nclass SystemElement(AbstractResource):\r\n '''\r\n This method creates a new System Element in the delivery database\r\n '''\r\n\r\n def add_argument_for_parsing(self, parser):\r\n '''Method to add arguments for validating the request'''\r\n parser.add_argument('system_element_id', type=int, required=False)\r\n parser.add_argument('system_element_name', type=str, required=False)\r\n parser.add_argument(\r\n 'system_element_short_name',\r\n type=str,\r\n required=False)\r\n parser.add_argument('system_element_type_id', type=int, required=False)\r\n parser.add_argument('system_id', type=int, required=False)\r\n\r\n def get_model(self):\r\n ''' Returns the model class used'''\r\n return SystemElementModel\r\n\r\n @authenticate\r\n def post(self):\r\n '''\r\n This post method will creates a new System Element in the delivery database\r\n '''\r\n request_data = request.get_json()\r\n system_id = request_data[\"system_id\"]\r\n system_element_name = request_data[\"system_element_name\"]\r\n system_element_short_name = request_data[\"system_element_short_name\"]\r\n system_element_type_id = request_data[\"system_element_type_id\"]\r\n\r\n search_keys = {\"system_id\": system_id}\r\n\r\n try:\r\n systems_from_db = SystemModel.find_generic(\r\n **search_keys)\r\n systems_from_db = systems_from_db[0]\r\n for system_element in systems_from_db.system_elements:\r\n if system_element.system_element_name == system_element_name:\r\n return {\"message\": \"System Element with name \" +\r\n system_element_name + \" already exist\"}, 400\r\n\r\n new_system_element = SystemElementModel(\r\n system_element_name=system_element_name,\r\n system_element_short_name=system_element_short_name,\r\n system_element_type_id=system_element_type_id)\r\n\r\n systems_from_db.system_elements.append(new_system_element)\r\n systems_from_db.save_to_db()\r\n return {\"system_element_id\": new_system_element.system_element_id}\r\n\r\n except ObjectNotFound as exception:\r\n return exception.message, exception.status_code\r\n\r\nimport ast\r\nclass SystemElementDetailsByEnvID(Resource):\r\n '''\r\n This method get System Element, System version in the delivery database\r\n '''\r\n\r\n def add_argument_for_parsing(self, parser):\r\n '''Method to add arguments for validating the request'''\r\n parser.add_argument('system_id', type=int, required=False)\r\n parser.add_argument('env_id', type=int, required=False)\r\n\r\n def get(self):\r\n '''\r\n This method retrieves system_element deployed on a specific environment and returns the output in JSON format.\r\n '''\r\n parser = reqparse.RequestParser()\r\n self.add_argument_for_parsing(parser)\r\n args = parser.parse_args(strict=True)\r\n env_id = args['env_id']\r\n system_id = args['system_id']\r\n resp_data = {}\r\n sys_ele_obj = SystemElementModel.query.filter_by(system_id=system_id)\r\n obj = []\r\n for sys_ele in sys_ele_obj: \r\n system_element_component_obj = SystemElementComponentModel.query.filter_by(system_element_id=sys_ele.system_element_id).group_by(SystemElementComponentModel.system_version_id).order_by(\"system_element_component_id desc\").first()\r\n data = {}\r\n data['system_element_name'] = sys_ele.system_element_name\r\n data['system_element_type_name'] = sys_ele.system_element_type.system_element_type_name \r\n data['system_version_id'] = system_element_component_obj.system_version_id\r\n data['system_element_id'] = sys_ele.system_element_id\r\n data['system_id'] = int(system_id)\r\n data['env_id'] = int(env_id)\r\n \r\n instance_set = set()\r\n for deployment in sys_ele.deployments:\r\n instance_data = {}\r\n instance = deployment.instance\r\n if (instance is not None and (int(deployment.environment_id) == int(env_id)) ):\r\n system_version = sys_ele.get_latest_version_deployed(deployment.instance.instance_id,sys_ele.system_element_id,env_id)\r\n if system_version['system_version_id'] :\r\n system_version_known_obj = SystemVersionModel.query.filter_by(system_version_id=system_version['system_version_id']).first()\r\n data['system_version_name'] = system_version_known_obj.system_version_name\r\n else: \r\n data['system_version_name'] = 'Unknown'\r\n data['instance_id'] = instance.instance_id\r\n data['instance_name'] = instance.instance_name\r\n data['assigned_ip'] = instance.assigned_ip\r\n data['instance_state'] = instance.instance_state\r\n instance_set.add(str(data))\r\n instances = [ast.literal_eval(instance)\r\n for instance in instance_set]\r\n if len(instances) > 0:\r\n for ints in instances: \r\n obj.append(ints)\r\n else:\r\n obj.append(data)\r\n return jsonify(obj)\r\n","sub_path":"flask/delivery_db_api/resources/system_element_resource.py","file_name":"system_element_resource.py","file_ext":"py","file_size_in_byte":6018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"638616084","text":"import logging\nimport sys\n\n\nclass Log(object):\n level_set = False\n\n @staticmethod\n def set_loglevel(loglevel):\n global logger\n Log.get_logger().setLevel(loglevel)\n Log.get_logger().info(\"Set loglevel to %d\" % loglevel)\n logger = Log.get_logger()\n Log.level_set = True\n\n @staticmethod\n def get_logger():\n return logging.getLogger(\"kernel_exp_family\")\n\n\nif not Log.level_set:\n level = logging.INFO\n logging.basicConfig(format='KERNEL_EXP_FAMILY: %(levelname)s: %(asctime)s: %(module)s.%(funcName)s(): %(message)s',\n level=level)\n Log.get_logger().info(\"Global logger initialised with loglevel %d\" % level)\n Log.level_set = True\n\n\nclass SimpleLogger(object):\n def __init__(self, path):\n self.terminal = sys.stdout\n self.log = open(path, 'a')\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # needed for python 3 compatibility\n pass\n","sub_path":"kernel_exp_family/tools/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"303048435","text":"# -*- coding: utf-8 -*-\n\n# Copyright © 2014-2018 GWHAT Project Contributors\n# https://github.com/jnsebgosselin/gwhat\n#\n# This file is part of GWHAT (Ground-Water Hydrograph Analysis Toolbox).\n# Licensed under the terms of the GNU General Public License.\n\n\n# ---- Standard Library Imports\nimport csv\nimport os\n\n# ---- Third Party Imports\nfrom matplotlib.figure import Figure as MplFigure\nfrom matplotlib.transforms import ScaledTranslation\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n\nimport numpy as np\nfrom scipy.stats._continuous_distns import gamma\n\nfrom xlrd.xldate import xldate_from_date_tuple\n\n# ---- Local Library Imports\nfrom gwhat.common.utils import save_content_to_csv\n\n\nclass PostProcessErr(object):\n\n SUPPORTED_FIG_FORMATS = ['pdf', 'svg']\n SUPPORTED_LANGUAGES = ['English', 'French']\n\n def __init__(self, fname):\n self.Yp = None # Predicted value at target station\n self.Ym = None # Measured value at target station\n self.Time = None\n self.Date = None\n\n self.staName = None\n self.climID = None\n self.set_fig_format(self.SUPPORTED_FIG_FORMATS[0])\n self.set_fig_language(self.SUPPORTED_LANGUAGES[0])\n\n self.fname = fname\n self.dirname = os.path.dirname(self.fname)\n\n self.load_err_file()\n\n # ---- Figure extension and format\n\n @property\n def fig_ext(self):\n \"\"\"Figure extension to use for saving the figures.\"\"\"\n return '.' + self.fig_format\n\n @property\n def fig_format(self):\n \"\"\"Figure format to use for saving the figures.\"\"\"\n return self.__fig_format\n\n def set_fig_format(self, fig_format):\n \"\"\"Set the format that will be used for saving the figures.\"\"\"\n if fig_format in self.SUPPORTED_FIG_FORMATS:\n self.__fig_format = fig_format\n else:\n print(\"Supported figure formats are:\", self.SUPPORTED_FIG_FORMATS)\n raise ValueError\n\n # ---- Language\n\n @property\n def fig_language(self):\n \"\"\"Language of the figure labels.\"\"\"\n return self.__fig_language\n\n def set_fig_language(self, language):\n \"\"\"Set the language of the figure labels.\"\"\"\n if language in self.SUPPORTED_LANGUAGES:\n self.__fig_language = language\n else:\n print(\"Supported language:\", self.SUPPORTED_LANGUAGES)\n raise ValueError\n\n # ---- Open and Load files\n\n def open_err_file(self, filename):\n \"\"\"Open .err file and return None if it fails.\"\"\"\n for dlm in [',', '\\t']:\n with open(self.fname) as f:\n reader = list(csv.reader(f, delimiter=dlm))\n for line in reader:\n try:\n if line[0] == 'VARIABLE':\n return reader\n except IndexError:\n continue\n else:\n print('The format of the .err file is wrong.')\n return None\n\n def load_err_file(self):\n \"\"\"Read .err file and return None if it fails.\"\"\"\n reader = self.open_err_file(self.fname)\n if reader is None:\n return\n\n for row, line in enumerate(reader):\n try:\n if line[0] == 'VARIABLE':\n break\n elif line[0] == 'Station Name':\n self.staName = reader[row][1]\n elif line[0] == 'Climate Identifier':\n self.climID = reader[row][1]\n except IndexError:\n continue\n row += 1\n\n # ------------------------------------------------ Re-Organizes Data --\n\n # Get unique weather variable names\n\n DATA = np.array(reader[row:])\n self.varNames = np.unique(DATA[:, 0])\n self.varTypes = ['continuous'] * (len(self.varNames))\n\n # Splits data acoording to the weather variables found.\n\n self.Yp, self.Ym, self.Time, self.Date = [], [], [], []\n for i, var in enumerate(self.varNames):\n indx = np.where(DATA[:, 0] == var)[0]\n\n self.Yp.append(DATA[indx, 7].astype(float))\n self.Ym.append(DATA[indx, 8].astype(float))\n\n y = DATA[indx, 1].astype(int)\n m = DATA[indx, 2].astype(int)\n d = DATA[indx, 3].astype(int)\n\n # ---- Time ----\n\n t = np.zeros(len(y))\n for date in range(len(y)):\n t[date] = (xldate_from_date_tuple((y[date],\n m[date],\n d[date]), 0)\n - xldate_from_date_tuple((y[date], 1, 1), 0))\n\n self.Time.append(t)\n self.Date.append([y, m, d])\n\n # ---- Weather Variable Type ----\n\n # If the proportion of zeros in the data series is higher\n # than 25%, the data type is set as an event-based weather\n # variable. Otherwise, default value is kept and variable is\n # considered to be continuous in time.\n #\n # The precipitation (solid, liquid or total) is a good example of\n # an event-based variable, while air temperature (min, max or mean)\n # is a good example of a continuous variable.\n\n pc0 = len(np.where(self.Ym[i] == 0)[0]) / float(len(self.Ym[i]))\n if pc0 > 0.25:\n self.varTypes[i] = 'event-based'\n\n return\n\n def generates_graphs(self):\n \"\"\"Generates all the graphs from the err file.\"\"\"\n for i in range(len(self.Yp)):\n name = self.varNames[i]\n name = name.lower()\n name = name.replace(\" \", \"_\")\n name = name.replace(\"(\", \"\")\n name = name.replace(\")\", \"\")\n print(name)\n fname = '%s/%s%s' % (self.dirname, name, self.fig_ext)\n print('------------------------')\n print('Generating %s.' % (os.path.basename(fname)))\n print('------------------------')\n plot_est_err(self.Ym[i], self.Yp[i], self.varNames[i],\n fname, self.fig_language)\n\n if self.varNames[i] == 'Total Precip (mm)':\n fname = '%s/%s%s' % (self.dirname, 'precip_PDF', self.fig_ext)\n plot_gamma_dist(self.Ym[i], self.Yp[i],\n fname, self.fig_language)\n print('Generating %s.' % (os.path.basename(fname)))\n\n def generates_summary(self):\n\n Ypre = self.Yp\n Ymes = self.Ym\n\n for i in range(len(Ypre)):\n\n RMSE = (np.mean((Ypre[i] - Ymes[i]) ** 2)) ** 0.5\n MAE = np.mean(np.abs(Ypre[i] - Ymes[i]))\n ME = np.mean(Ypre[i] - Ymes[i])\n r = np.corrcoef(Ypre[i], Ymes[i])[1, 0]\n\n Emax = np.min(Ypre[i] - Ymes[i])\n Emin = np.max(Ypre[i] - Ymes[i])\n\n dirname = 'summary/'\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n filename = dirname + self.varNames[i] + '.csv'\n\n # ---- Generate File ----\n\n if not os.path.exists(filename):\n header = [['Station', 'RMSE', 'MAE', 'ME',\n 'r', 'Emax', 'Emin']]\n save_content_to_csv(filename, header)\n\n # ---- Write Stats to File ----\n\n rowcontent = [[self.staName, '%0.1f' % RMSE, '%0.1f' % MAE,\n '%0.2f' % ME, '%0.3f' % r, '%0.1f' % Emax,\n '%0.1f' % Emin]]\n save_content_to_csv(filename, rowcontent, mode='a')\n\n\ndef plot_est_err(Ymes, Ypre, varName, fname, language='English'):\n\n Ymax = np.ceil(np.max(Ymes)/10)*10\n Ymin = np.floor(np.min(Ymes)/10)*10\n\n fw, fh = 6, 6\n fig = MplFigure(figsize=(fw, fh))\n canvas = FigureCanvas(fig)\n\n # ---- Create Axes\n\n leftMargin = 1. / fw\n rightMargin = 0.25 / fw\n bottomMargin = 0.8 / fh\n topMargin = 0.25 / fh\n\n x0 = leftMargin\n y0 = bottomMargin\n w0 = 1 - (leftMargin + rightMargin)\n h0 = 1 - (bottomMargin + topMargin)\n\n ax0 = fig.add_axes([x0, y0, w0, h0])\n ax0.set_axisbelow(True)\n ax0.grid(axis='both', color='0.', linestyle='--', linewidth=0.5,\n dashes=[0.5, 3])\n\n # ---- Plot\n\n # Estimation Error\n hscat, = ax0.plot(Ymes, Ypre, '.', mec='k', mfc='k', ms=12, alpha=0.35)\n hscat.set_rasterized(True)\n\n # 1:1 Line\n dl = 12 # dashes length\n ds = 6 # spacing between dashes\n dew = 0.5 # dashes edge width\n dlw = 1.5 # dashes line width\n\n # Plot a white contour line\n ax0.plot([Ymin, Ymax], [Ymin, Ymax], '-w', lw=dlw + 2 * dew, alpha=1)\n\n # Plot a black dashed line\n hbl, = ax0.plot([Ymin, Ymax], [Ymin, Ymax], 'k', lw=dlw,\n dashes=[dl, ds], dash_capstyle='butt')\n\n # ---- Text\n\n # Calculate Statistics\n\n RMSE = (np.mean((Ypre - Ymes) ** 2)) ** 0.5\n MAE = np.mean(np.abs(Ypre - Ymes))\n ME = np.mean(Ypre - Ymes)\n r = np.corrcoef(Ypre, Ymes)[1, 0]\n print('RMSE=%0.1f ; MAE=%0.1f ; ME=%0.2f ; r=%0.3f' %\n (RMSE, MAE, ME, r))\n\n Emax = np.min(Ypre - Ymes)\n Emin = np.max(Ypre - Ymes)\n\n print('Emax=%0.1f ; Emin=%0.1f' % (Emax, Emin))\n\n # Generate and Plot Labels\n\n if varName in ['Max Temp (deg C)', 'Mean Temp (deg C)',\n 'Min Temp (deg C)']:\n units = u'°C'\n elif varName in ['Total Precip (mm)']:\n units = 'mm'\n else:\n units = ''\n\n tcontent = [u'RMSE = %0.1f %s' % (RMSE, units),\n u'MAE = %0.1f %s' % (MAE, units),\n u'ME = %0.2f %s' % (ME, units),\n u'r = %0.3f' % (r)]\n tcontent = list(reversed(tcontent))\n for i in range(len(tcontent)):\n dx, dy = -10 / 72., 10 * (i+1) / 72.\n padding = ScaledTranslation(dx, dy, fig.dpi_scale_trans)\n transform = ax0.transAxes + padding\n ax0.text(0, 0, tcontent[i], ha='left', va='bottom', fontsize=16,\n transform=transform)\n\n # ---- Get Labels Win. Extents\n\n hext, vext = np.array([]), np.array([])\n renderer = canvas.get_renderer()\n for text in ax0.texts:\n bbox = text.get_window_extent(renderer)\n bbox = bbox.transformed(ax0.transAxes.inverted())\n hext = np.append(hext, bbox.width)\n vext = np.append(vext, bbox.height)\n\n # ---- Position Labels in Axes\n\n x0 = 1 - np.max(hext)\n y0 = 0\n for i, text in enumerate(ax0.texts):\n text.set_position((x0, y0))\n y0 += vext[i]\n\n # ----- Labels\n\n ax0.xaxis.set_ticks_position('bottom')\n ax0.yaxis.set_ticks_position('left')\n ax0.tick_params(axis='both', direction='out', labelsize=14)\n\n if varName == 'Max Temp (deg C)':\n if language == 'French':\n var = u'Températures maximales journalières %s (°C)'\n else:\n var = u'%s Daily Max Temperature (°C)'\n elif varName == 'Mean Temp (deg C)':\n if language == 'French':\n var = u'Températures moyennes journalières %s (°C)'\n else:\n var = u'%s Daily Mean Temperature (°C)'\n elif varName == 'Min Temp (deg C)':\n if language == 'French':\n var = u'Températures minimales journalières %s (°C)'\n else:\n var = u'%s Daily Min Temperature (°C)'\n elif varName == 'Total Precip (mm)':\n if language == 'French':\n var = u'Précipitations totales journalières %s (mm)'\n else:\n var = '%s Daily Total Precipitation (mm)'\n else:\n var = ''\n\n if language == 'French':\n ax0.set_xlabel(var % u'mesurées', fontsize=16, labelpad=15)\n ax0.set_ylabel(var % u'prédites', fontsize=16, labelpad=15)\n else:\n ax0.set_xlabel(var % 'Measured', fontsize=16, labelpad=15)\n ax0.set_ylabel(var % 'Predicted', fontsize=16, labelpad=15)\n\n # ---- Axis\n\n ax0.axis([Ymin, Ymax, Ymin, Ymax])\n\n # ---- Legend\n\n if language == 'French':\n lglabels = ['Données journalières', '1:1']\n else:\n lglabels = ['Daily weather data', '1:1']\n\n ax0.legend([hscat, hbl], lglabels,\n loc='upper left', numpoints=1, frameon=False, fontsize=16)\n\n # ---- Draw\n\n fig.savefig(fname, dpi=300)\n\n return canvas\n\n\ndef plot_gamma_dist(Ymes, Ypre, fname, language='English'):\n\n fw, fh = 6, 6\n fig = MplFigure(figsize=(fw, fh), facecolor='white')\n canvas = FigureCanvas(fig)\n\n # ---- Create Axes\n\n leftMargin = 1.1 / fw\n rightMargin = 0.25 / fw\n bottomMargin = 0.85 / fh\n topMargin = 0.25 / fh\n\n x0 = leftMargin\n y0 = bottomMargin\n w0 = 1 - (leftMargin + rightMargin)\n h0 = 1 - (bottomMargin + topMargin)\n\n ax0 = fig.add_axes([x0, y0, w0, h0])\n ax0.set_yscale('log', nonposy='clip')\n\n Xmax = max(np.ceil(np.max(Ymes)/10.) * 10, 80)\n\n # ---- Plots\n\n c1, c2 = '#6495ED', 'red'\n\n if language == 'French':\n lg_labels = [u'DP des données mesurées', u'FDP Gamma (mesurée)',\n u'FDP Gamma (estimée)']\n else:\n lg_labels = ['Measured data PDF', 'Gamma PDF (measured)',\n 'Gamma PDF (estimated)']\n\n # Histogram\n\n ax0.hist(Ymes, bins=20, color=c1, histtype='stepfilled', density=True,\n alpha=0.25, ec=c1, label=lg_labels[0])\n\n # Measured Gamma PDF\n\n alpha, loc, beta = gamma.fit(Ymes)\n x = np.arange(0.5, Xmax, 0.1)\n ax0.plot(x, gamma.pdf(x, alpha, loc=loc, scale=beta), '-', lw=2,\n alpha=1., color=c1, label=lg_labels[1])\n\n # Predicted Gamma PDF\n\n alpha, loc, beta = gamma.fit(Ypre)\n x = np.arange(0.5, Xmax, 0.1)\n ax0.plot(x, gamma.pdf(x, alpha, loc=loc, scale=beta), '--r',\n lw=2, alpha=0.85, color=c2, label=lg_labels[2])\n\n # ---- Axis Limits\n\n ax0.axis(xmin=0, xmax=Xmax, ymax=1)\n\n # ---- Labels\n\n # Setup axis labels\n\n if language == 'French':\n ax0.set_xlabel(u'Précipitations totales journalières (mm)',\n fontsize=18, labelpad=15)\n ax0.set_ylabel('Probabilité', fontsize=18, labelpad=15)\n else:\n ax0.set_xlabel('Daily Total Precipitation (mm)', fontsize=18,\n labelpad=15)\n ax0.set_ylabel('Probability', fontsize=18, labelpad=15)\n\n # Setup yticks labels\n\n ax0.xaxis.set_ticks_position('bottom')\n ax0.yaxis.set_ticks_position('left')\n ax0.tick_params(axis='both', direction='out', labelsize=14)\n ax0.tick_params(axis='both', which='minor', direction='out',\n labelsize=14)\n\n canvas.draw()\n ylabels = []\n for i, label in enumerate(ax0.get_yticks()):\n if label >= 1:\n ylabels.append('%d' % label)\n elif label <= 10**-3:\n ylabels.append('$\\\\mathdefault{10^{%d}}$' % np.log10(label))\n else:\n ylabels.append(str(label))\n ax0.set_yticklabels(ylabels)\n\n # ---- Legend\n\n lg = ax0.legend(loc='upper right', frameon=False)\n\n # ---- Wet Days Comparison --\n\n # ---- Generate text\n\n preWetDays = np.where(Ypre > 0)[0]\n mesWetDays = np.where(Ymes > 0)[0]\n\n f = len(preWetDays) / float(len(mesWetDays)) * 100\n\n if f > 100:\n if language == 'French':\n msg = 'Nombre de jours pluvieux surestimé de %0.1f%%' % (f - 100)\n else:\n msg = 'Number of wet days overestimated by %0.1f%%' % (f - 100)\n else:\n if language == 'French':\n msg = 'Nombre de jours pluvieux sous-estimé de %0.1f%%' % (100 - f)\n else:\n msg = 'Number of wet days underestimated by %0.1f%%' % (100 - f)\n\n # ---- Get Legend Box Position and Extent\n\n canvas.draw()\n bbox = lg.get_window_extent(canvas.get_renderer())\n bbox = bbox.transformed(ax0.transAxes.inverted())\n\n dx, dy = 5/72., 5/72.\n padding = ScaledTranslation(dx, dy, fig.dpi_scale_trans)\n transform = ax0.transAxes + padding\n\n ax0.text(0., 0., msg, transform=transform, va='bottom', ha='left')\n\n # ---- Draw\n\n fig.savefig(fname) # A canvas.draw() is included with this.\n return canvas\n\n\ndef plot_rmse_vs_time(Ymes, Ypre, Time, Date, name):\n\n fw, fh = 6, 6\n fig = MplFigure(figsize=(fw, fh), facecolor='white')\n canvas = FigureCanvas(fig)\n\n # ---- Create Axes\n\n leftMargin = 0.75 / fw\n rightMargin = 0.75 / fw\n bottomMargin = 0.75 / fh\n topMargin = 0.75 / fh\n\n x0, y0 = leftMargin, bottomMargin\n w0 = 1 - (leftMargin + rightMargin)\n h0 = 1 - (bottomMargin + topMargin)\n\n ax0 = fig.add_axes([x0, y0, w0, h0], polar=True)\n\n # ---- Plot Data\n\n # Estimation Error\n\n Yerr = np.abs(Ypre - Ymes)\n Time *= 2 * np.pi / 365.\n\n c = '0.4'\n ax0.plot(Time, Yerr, '.', mec=c, mfc=c, ms=15, alpha=0.5)\n\n # RMSE Polygon\n\n Months = Date[1]\n RMSE = np.zeros(12)\n mfd = np.zeros(12)\n for m in range(12):\n mfd[m] = (xldate_from_date_tuple((2000, m+1, 1), 0) -\n xldate_from_date_tuple((2000, 1, 1), 0))\n indx = np.where(Months == m+1)[0]\n RMSE[m] = (np.mean(Yerr[indx] ** 2)) ** 0.5\n\n # Transform first day of the month to radians\n mfd = mfd * 2 * np.pi / 365.\n\n # Add first point at the end to close the polygon\n mfd = np.append(mfd, mfd[0])\n RMSE = np.append(RMSE, RMSE[0])\n ax0.plot(mfd, RMSE * 5, ls='--', c='red', lw=2, mec='b', mew=3, mfc='b',\n ms=10, dash_capstyle='round', dash_joinstyle='round')\n\n # ---- Labels\n\n ax0.tick_params(axis='both', direction='out', labelsize=16)\n ax0.set_xticklabels(['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN',\n 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC'])\n ax0.set_xticks(mfd)\n\n ax0.set_yticklabels([])\n ax0.set_yticks([])\n ax0.set_rmax(1.1 * np.max(Yerr))\n # ax0.set_rgrids([10,20,30,40,50,60,70,80,90], angle=345.)\n\n # ---- Draw\n\n fig.savefig(name + '_polar_error.pdf')\n canvas.show()\n\n\ndef compute_wet_days_LatexTable(dirname):\n fname = 'wet_days_0.5mm.csv'\n fcontent = [['station', 'Meas. wet days', 'Pred. wet days', 'Err.(days)',\n 'Err.(%)']]\n\n for root, directories, filenames in os.walk(dirname):\n for filename in filenames:\n if os.path.splitext(filename)[1] == '.err':\n print('---- %s ----' % os.path.basename(root))\n pperr = PostProcessErr(os.path.join(root, filename))\n\n preWetDays = np.where(pperr.Yp[3] > 0.5)[0]\n mesWetDays = np.where(pperr.Ym[3] > 0.5)[0]\n\n Npre = len(preWetDays)\n Nmes = len(mesWetDays)\n f = (Npre - Nmes) / float(Nmes) * 100\n\n print('Averaged nbr. of meas. wet days per year = %0.1f days'\n % (Nmes/30.))\n print('Averaged nbr. of pred. wet days per year = %0.1f days'\n % (Npre/30.))\n print('Estimation Error = %0.1f days' % ((Npre-Nmes)/30.))\n print('Estimation Error = %0.1f %%' % (f))\n\n MI = np.mean(pperr.Ym[3][mesWetDays])\n SD = np.std(pperr.Ym[3][mesWetDays])\n print('Precipitation intensity = %0.1f mm/day' % MI)\n print('Precipitation sdt = %0.1f mm/day' % SD)\n\n fcontent.append([pperr.staName,\n '%d' % (Nmes/30.),\n '%d' % (Npre/30.),\n '%d' % ((Npre-Nmes)/30.),\n '%0.1f' % f])\n save_content_to_csv(fname, fcontent, mode='a')\n\n\ndef compute_err_boxplot(dirname):\n\n Ym_tot = []\n Yp_tot = []\n\n for root, directories, filenames in os.walk(dirname):\n for filename in filenames:\n if os.path.splitext(filename)[1] == '.err':\n print('---- %s ----' % os.path.basename(root))\n pperr = PostProcessErr(os.path.join(root, filename))\n\n Ym_tot.extend(pperr.Ym)\n Yp_tot.extend(pperr.Yp)\n\n\n# ---- if __name__ == '__main__'\n\nif __name__ == '__main__':\n dirname = (\"C:\\\\Users\\\\jsgosselin\\\\GWHAT\\\\Projects\\\\\"\n \"Example\\\\Meteo\\\\Output\\\\FARNHAM (7022320)\"\n )\n filename = os.path.join(dirname, \"FARNHAM (7022320)_2005-2010.err\")\n post_worker = PostProcessErr(filename)\n post_worker.set_fig_format(\"pdf\")\n","sub_path":"pygwd/gapfill_weather_postprocess.py","file_name":"gapfill_weather_postprocess.py","file_ext":"py","file_size_in_byte":20257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"602335996","text":"# BenchExec is a framework for reliable benchmarking.\n# This file is part of BenchExec.\n#\n# Copyright (C) 2007-2015 Dirk Beyer\n# All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis module contains some useful functions for Strings, Files and Lists.\n\"\"\"\n\n# prepare for Python 3\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom decimal import Decimal\nimport glob\nimport json\nimport logging\nimport os\n\nimport re\nimport tempita\n\nfrom benchexec import model\n\nDEFAULT_TIME_PRECISION = 3\nDEFAULT_TOOLTIP_PRECISION = 2\nREGEX_SIGNIFICANT_DIGITS = re.compile('([-\\+])?(\\d+)\\.?(0*(\\d+))?([eE]([-\\+])(\\d+))?') # compile regular expression only once for later uses\nGROUP_SIGN = 1\nGROUP_INT_PART = 2\nGROUP_DEC_PART = 3\nGROUP_SIG_DEC_DIGITS = 4\nGROUP_EXP = 5\nGROUP_EXP_SIGN = 6\nGROUP_EXP_VAL = 7\nPOSSIBLE_FORMAT_TARGETS = ['html', 'html_cell', 'tooltip_stochastic', 'csv']\n\n\ndef enum(**enums):\n return type('Enum', (), enums)\n\n\nclass ColumnEnumType(object):\n\n def __init__(self, type, name):\n self._type = type\n self.name = name\n\n @property\n def type(self):\n return self\n\n def __str__(self):\n return self.name\n\n def __eq__(self, other):\n try:\n return self._type == other._type\n except:\n return False\n\n\nclass ColumnType(object):\n column_types = enum(text=1, count=2, measure=3, status=4, main_status=5)\n text = ColumnEnumType(column_types.text, 'text')\n count = ColumnEnumType(column_types.count, 'count')\n measure = ColumnEnumType(column_types.measure, 'measure')\n status = ColumnEnumType(column_types.status, 'status')\n main_status = ColumnEnumType(column_types.main_status, 'main_status')\n\n\nclass ColumnMeasureType(object):\n \"\"\"\n Column type 'Measure', contains the column's unit and the largest amount of digits after the decimal point.\n \"\"\"\n def __init__(self, max_decimal_digits):\n self._type = ColumnType.measure\n self._max_decimal_digits = max_decimal_digits\n\n @property\n def type(self):\n return self._type\n\n @property\n def max_decimal_digits(self):\n return self._max_decimal_digits\n\n\nclass Column(object):\n \"\"\"\n The class Column contains title, pattern (to identify a line in log_file),\n number_of_significant_digits of a column, the type of the column's values,\n their unit, a scale factor to apply to all values of the column (mostly to fit the unit)\n and href (to create a link to a resource).\n It does NOT contain the value of a column.\n \"\"\"\n def __init__(self, title, pattern, num_of_digits, href, col_type=None, unit=None, scale_factor=1):\n self.title = title\n self.pattern = pattern\n self.number_of_significant_digits = num_of_digits\n self.type = col_type\n self.unit = unit\n self.scale_factor = float(scale_factor) if scale_factor else 1\n if int(self.scale_factor) == self.scale_factor:\n self.scale_factor = int(self.scale_factor)\n self.href = href\n\n\ndef get_file_list(shortFile):\n \"\"\"\n The function get_file_list expands a short filename to a sorted list\n of filenames. The short filename can contain variables and wildcards.\n \"\"\"\n\n # expand tilde and variables\n expandedFile = os.path.expandvars(os.path.expanduser(shortFile))\n\n # expand wildcards\n fileList = glob.glob(expandedFile)\n\n # sort alphabetical,\n # if list is emtpy, sorting returns None, so better do not sort\n if len(fileList) != 0:\n fileList.sort()\n else:\n logging.warning(\"No file matches '%s'.\", shortFile)\n\n return fileList\n\n\ndef extend_file_list(filelist):\n '''\n This function takes a list of files, expands wildcards\n and returns a new list of files.\n '''\n return [file for wildcardFile in filelist for file in get_file_list(wildcardFile)]\n\n\ndef split_number_and_unit(s):\n \"\"\"\n Split a string into two parts: a number prefix and an arbitrary suffix.\n Splitting is done from the end, so the split is where the last digit\n in the string is (that means the prefix may include non-digit characters,\n if they are followed by at least one digit).\n \"\"\"\n if not s:\n return (s, '')\n pos = len(s)\n while pos and not s[pos-1].isdigit():\n pos -= 1\n return (s[:pos], s[pos:])\n\n\ndef remove_unit(s):\n \"\"\"\n Remove a unit from a number string, or return the full string if it is not a number.\n \"\"\"\n (prefix, suffix) = split_number_and_unit(s)\n return suffix if prefix == '' else prefix\n\n\ndef create_link(runResult, base_dir, column):\n if not column.href:\n return os.path.relpath(runResult.log_file, base_dir)\n source_file = runResult.task_id[0]\n href = model.substitute_vars([column.href], None, source_file)[0]\n if href.startswith(\"http://\") or href.startswith(\"https://\"):\n return href\n return os.path.relpath(href, base_dir)\n\n\ndef format_options(options):\n '''Helper function for formatting the content of the options line'''\n # split on one of the following tokens: ' -' or '[[' or ']]'\n lines = ['']\n for token in re.split('( -|\\[\\[|\\]\\])', options):\n if token in ['[[',']]']:\n lines.append(token)\n lines.append('')\n elif token == ' -':\n lines.append(token)\n else:\n lines[-1] += token\n # join all non-empty lines and wrap them into 'span'-tags\n return '' + ''.join(line for line in lines if line.strip()) + ''\n\n\ndef format_number_align(formattedValue, max_number_of_dec_digits):\n alignment = max_number_of_dec_digits\n if formattedValue.find('.') >= 0:\n # Subtract spaces for digits after the decimal point.\n alignment -= len(formattedValue) - formattedValue.find('.') - 1\n elif max_number_of_dec_digits > 0:\n # Add punctuation space.\n formattedValue += ' '\n formattedValue += \"\".join([' '] * alignment)\n return formattedValue\n\n\ndef _get_significant_digits(value):\n # Regular expression returns multiple groups:\n #\n # Group GROUP_SIGN: Optional sign of value\n # Group GROUP_INT_PART: Digits in front of decimal point\n # Group GROUP_DEC_PART: Optional digits after decimal point\n # Group GROUP_SIG_DEC_DIGITS: Digits after decimal point, starting at the first value not 0\n # Group GROUP_EXP: Optional exponent part (e.g. 'e-5')\n # Group GROUP_EXP_SIGN: Optional sign of exponent part\n # Group GROUP_EXP_VALUE: Value of exponent part (e.g. '5' for 'e-5')\n # Use these groups to compute the number of zeros that have to be added to the current number's\n # decimal positions.\n match = REGEX_SIGNIFICANT_DIGITS.match(value)\n\n if int(match.group(GROUP_INT_PART)) == 0 and float(value) != 0:\n sig_digits = len(match.group(GROUP_SIG_DEC_DIGITS))\n\n else:\n sig_digits = len(match.group(GROUP_INT_PART))\n if match.group(GROUP_DEC_PART):\n sig_digits += len(match.group(GROUP_DEC_PART))\n\n return sig_digits\n\n\ndef format_number(value, number_of_significant_digits, max_digits_after_decimal, isToAlign=False, format_target='html'):\n \"\"\"\n If the value is a number (or number followed by a unit),\n this function returns a string-representation of the number\n with the specified number of significant digits,\n optionally aligned at the decimal point.\n\n If the value is not a number, it is returned unchanged.\n \"\"\"\n if format_target not in POSSIBLE_FORMAT_TARGETS:\n raise ValueError('Unknown format target')\n\n if value is None:\n return ''\n\n try:\n # Round to the given amount of significant digits\n # (unfortunately this keeps the '.0' for large numbers and removes too many zeros from the end).\n float_value = float(\"{value:.{digits}g}\".format(digits=number_of_significant_digits, value=float(value)))\n formatted_value = str(float_value)\n\n # Get the number of intended significant digits and the number of current significant digits.\n # If we have not enough digits due to rounding, 0's have to be re-added.\n # If we have too many digits due to conversion of integers to float (e.g. 1234.0), the decimals have to be cut\n initial_value_sig_digits = _get_significant_digits(value)\n current_sig_digits = _get_significant_digits(formatted_value)\n\n intended_digits = min(initial_value_sig_digits, number_of_significant_digits)\n digits_to_add = intended_digits - current_sig_digits\n\n if digits_to_add > 0:\n assert '.' in formatted_value\n formatted_value += \"\".join(['0'] * digits_to_add)\n elif digits_to_add < 0:\n assert round(float_value) == float_value # check that the number has no decimal values\n formatted_value = str(round(float_value))\n\n # Cut the 0 in front of the decimal point for values < 1.\n # Example: 0.002 => .002\n if format_target == \"html_cell\" and '.' in formatted_value and 1 > float(formatted_value) >= 0:\n assert formatted_value[0] == '0'\n formatted_value = formatted_value[1:]\n\n # Alignment\n if isToAlign:\n formatted_value = format_number_align(formatted_value, max_digits_after_decimal)\n return formatted_value\n except ValueError: # If value is no float, don't format it.\n return value\n\n\ndef format_value(value, column, isToAlign=False, format_target=\"html\"):\n \"\"\"\n Format a value nicely for human-readable output (including rounding).\n\n @param value: the value to format\n @param column: a Column object describing the column the value is a part of.\n This given Column is used to derive information about proper formatting.\n @param isToAlign: if True, spaces will be added to the returned String representation to align it to all\n other values in this column, correctly\n @param format_target the target the value should be formatted for\n @return: a formatted String representation of the given value.\n \"\"\"\n if format_target not in POSSIBLE_FORMAT_TARGETS:\n raise ValueError('Unknown format target')\n\n if value is None:\n return ''\n\n # If the number ends with \"s\" or another unit, remove it.\n # Units should not occur in table cells, but in the table head.\n value = remove_unit(str(value).strip())\n\n # Apply the scale factor to the value\n try:\n if column.scale_factor != 1:\n value = float(value) * column.scale_factor\n if int(value) == value:\n value = int(value)\n value = str(value)\n except ValueError:\n pass\n\n number_of_significant_digits = column.number_of_significant_digits\n max_dec_digits = 0\n if number_of_significant_digits is None and format_target is \"tooltip_stochastic\":\n return str(round(float(value), DEFAULT_TOOLTIP_PRECISION))\n\n elif column.type.type == ColumnType.measure:\n if number_of_significant_digits is None and format_target is not \"csv\":\n number_of_significant_digits = DEFAULT_TIME_PRECISION\n max_dec_digits = column.type.max_decimal_digits\n\n if number_of_significant_digits is not None:\n return format_number(value, int(number_of_significant_digits), int(max_dec_digits), isToAlign, format_target)\n else:\n return value\n\n\ndef to_decimal(s):\n # remove whitespaces and trailing units (e.g., in '1.23s')\n if s:\n s, _ = split_number_and_unit(s.strip())\n return Decimal(s) if s else None\n else:\n return None\n\n\ndef collapse_equal_values(values, counts):\n \"\"\"\n Take a tuple (values, counts), remove consecutive values and increment their count instead.\n \"\"\"\n assert len(values) == len(counts)\n previousValue = values[0]\n previousCount = 0\n\n for value, count in zip(values, counts):\n if value != previousValue:\n yield (previousValue, previousCount)\n previousCount = 0\n previousValue = value\n previousCount += count\n\n yield (previousValue, previousCount)\n\n\ndef get_column_value(sourcefileTag, columnTitle, default=None):\n for column in sourcefileTag.findall('column'):\n if column.get('title') == columnTitle:\n return column.get('value')\n return default\n\n\ndef flatten(list_):\n return [value for sublist in list_ for value in sublist]\n\n\ndef to_json(obj):\n return tempita.html(json.dumps(obj, sort_keys=True))\n\n\ndef prettylist(list_):\n if not list_:\n return ''\n\n # Filter out duplicate values while keeping order\n values = set()\n uniqueList = []\n for entry in list_:\n if not entry in values:\n values.add(entry)\n uniqueList.append(entry)\n\n return uniqueList[0] if len(uniqueList) == 1 \\\n else '[' + '; '.join(uniqueList) + ']'\n","sub_path":"benchexec/tablegenerator/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":13391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"265714513","text":"#!/usr/bin/env python3\n\nimport sys\nimport datetime\nimport dateutil.tz\nimport psycopg2\nimport psycopg2.extras\n\nsys.exit()\n\nif __name__ == \"__main__\":\n transactions = []\n transactions.append({'tz': datetime.datetime(2018,12,23, 5,10,40, tzinfo=dateutil.tz.tzlocal()), 'v': 1500.00})\n transactions.append({'tz': datetime.datetime(2018, 4,21,18,26,21, tzinfo=dateutil.tz.tzlocal()), 'v': 1500.00})\n transactions.append({'tz': datetime.datetime(2018,11,27, 8, 3,42, tzinfo=dateutil.tz.tzlocal()), 'v': 1500.00})\n transactions.append({'tz': datetime.datetime(2018, 2, 6,10, 5,47, tzinfo=dateutil.tz.tzlocal()), 'v': 1500.00})\n\n # postgres db insert\n conn = psycopg2.connect(dbname='tutorial', user='postgres', password='password', host='localhost')\n cur = conn.cursor()\n\n for F in transactions:\n print(F)\n cur.execute(\"\"\"\n INSERT INTO degiro_test (\n time,\n fund_name,\n fund_currency,\n fund_isin,\n totVal,\n size\n )\n VALUES (\n %(time)s,\n %(fund_name)s,\n %(fund_currency)s,\n %(fund_isin)s,\n %(totVal)s,\n %(size)s\n );\"\"\", {\n 'time':F['tz'],\n 'fund_name':'Deposit',\n 'fund_currency':'EUR',\n 'fund_isin':'',\n 'totVal':F['v'],\n 'size':F['v']\n }\n )\n # conn.commit()\n","sub_path":"deposits.py","file_name":"deposits.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"559456453","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nModule implementing SettingsDlg.\n\"\"\"\nimport configparser\n\nfrom PyQt4.QtGui import QDialog\nfrom PyQt4.QtCore import pyqtSignature, QTime\n\nfrom ui.Ui_settings import Ui_settingsDlg\n\nDEFAULT_SETTINGS = {'server address':'localhost:9696', 'show tooltips':str(True), 'update time':'60'}\n\nclass SettingsDlg(QDialog, Ui_settingsDlg):\n \"\"\"\n Class documentation goes here.\n \"\"\"\n def __init__(self, settings, parent = None):\n \"\"\"\n Constructor\n \"\"\"\n QDialog.__init__(self, parent)\n self.setupUi(self)\n self.settings = settings\n self.settings_ = configparser.SafeConfigParser(DEFAULT_SETTINGS)\n self.settings_.read('settings.cfg')\n self.enable_tooltips = self.settings_.getboolean('main', 'show tooltips')\n self.tooltipsCheckBox.setChecked(self.enable_tooltips)\n self.address = self.settings_.get('main', 'server address')\n self.address = self.address.replace('localhost', '127.0.0.1')\n self.ipLineEdit.setText(self.address.split(':')[0])\n self.portLineEdit.setText(self.address.split(':')[1])\n self.update_time = self.settings_.getint('main', 'update time')\n minutes, seconds = divmod(self.update_time, 60)\n try:\n time = QTime.fromString('{0}:{1}'.format(minutes, seconds), \"m:ss\")\n self.updateTimeEdit.setTime(time)\n except Exception:\n self.updateTimeEdit.setTime(QTime(0, 1, 0))\n \n @pyqtSignature(\"\")\n def on_buttonBox_accepted(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n try:\n new_port = int(str(self.portLineEdit.text()))\n new_ip = str(self.ipLineEdit.text())\n new_address = '{0}:{1}'.format(new_ip, new_port)\n except ValueError:\n new_address = self.address\n new_ip = self.address.split(':')[0]\n new_port = int(self.address.split(':')[0])\n if not self.tooltipsCheckBox.isChecked():\n for k in self.settings['tooltips']:\n self.settings['tooltips'][k] = ''\n if self.enable_tooltips != self.tooltipsCheckBox.isChecked():\n self.settings_.set('main','show tooltips', str(self.tooltipsCheckBox.isChecked()))\n print(new_address, self.address)\n if new_address != self.address:\n if new_ip == '127.0.0.1':\n new_ip = 'localhost'\n self.settings['address'] = [new_ip, new_port]\n self.settings_.set('main','server address', new_address)\n new_update_time = self.updateTimeEdit.time()\n new_update_time = (new_update_time.minute() * 60 + \n new_update_time.second())\n if new_update_time != self.update_time:\n self.settings['update_time'] = new_update_time * 1000\n self.settings_.set('main','update time', str(new_update_time))\n with open('settings.cfg', 'w') as configfile:\n self.settings_.write(configfile)\n self.accept()\n \n \n @pyqtSignature(\"\")\n def on_buttonBox_rejected(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n self.reject()\n","sub_path":"CarNumbersActiveClient/ui/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"73024197","text":"ErrorMSGShown = (\"Failed To Read .txt\")\nNext = 0\n\ndef LaunchError():\n print(\"\"\"\n\n\n____________________________________________________________\n\n===============================================\n Error Occured\n===============================================\n\n\"\"\")\n print (\"We have encounted a Error :(\")\n print (\" ----------------------------------\")\n print (\"Error MSG : \")\n TXT = open(\"ErrorMSG.txt\")\n print (TXT.read())\n print (\"-----------------------------------\")\n print (\"Error Level : \")\n if \"Fatal\" in TXT.read():\n Type = \"FATAL\"\n\n else:\n Type = \"Non Urgent\"\n\n print (Type)\n print(\"\"\"\n\nPress 'Enter' to close\n\"\"\")\n Next = input(\">> \")\n\ndef GetTXT():\n ErrorMSGShownTXT = open(\"ErrorMSG.txt\")\n ErrorMSGShown = ErrorMSGShownTXT.read()\n \n if ErrorMSGShown == (\"\"):\n ErrorMSGShown = (\"ErrorMSG.txt Is Empty!\")\n LaunchError()\n\n else:\n LaunchError()\n\nGetTXT()\n","sub_path":"Email/Error.py","file_name":"Error.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"415717469","text":"import turtle\nimport time\nstyle = ('Courier', 30, 'italic')\nturtle.penup()\nturtle.goto(400,-100)\nturtle.pendown()\nturtle.write('Velkommen til OL 2034 Tromse', font=style, align='right')\n \nt = turtle.Turtle()\nt.pensize(6) \nfirstRowColors = [\"blue\", \"black\", \"red\"]\nfor i in range(3):\n t.penup()\n t.pencolor(firstRowColors[i])\n t.goto(i*110, 0)\n t.pendown()\n t.circle(50)\n \nsecondRowColors = [\"\", \"yellow\", \"\", \"green\"]\nfor i in range(1, 4, 2):\n t.penup()\n t.pencolor(secondRowColors[i])\n t.goto(i*55, -50)\n t.pendown()\n t.circle(50)\n\n\nt.speed(10)\nturtle.color('black')\nturtle.hideturtle()","sub_path":"Python/Oppgave 2.py","file_name":"Oppgave 2.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"256335796","text":"import sys\n# insert at 1, 0 is the script path (or '' in REPL)\n\nsys.path.insert(1, '/home/crottondi/PIRISI_TESI/MSD_Environment/PYTHON_PROJ')\nimport argparse\nfrom primary.data_io import save_data, load_data\nimport gc\nimport pandas as pd\ndef getCurrentMemoryUsage():\n ''' Memory usage in kB '''\n\n with open('/proc/self/status') as f:\n memusage = f.read().split('VmRSS:')[1].split('\\n')[0][:-3]\n\n return int(memusage.strip())\n\n\ndef main(args):\n n_chunks = args.n_chunks\n chunk_folder = args.chunk_folder\n if chunk_folder[-1] != '/':\n chunk_folder += '/'\n\n #group all chunk level ranking in a single ranking file\n dictionary = dict()\n for i in range(n_chunks):\n chunk_filename = 'chunk_' + str(i) + '_OUT.pkl'\n chunk_pathname = chunk_folder+chunk_filename\n chunk_out = load_data(filename=chunk_pathname)\n\n for k,v in chunk_out.items():\n dictionary[k]=v\n del chunk_out\n\n print('chunk ', str(i), 'Memory (GB) : ', getCurrentMemoryUsage()/(2**20))\n final_pathname = chunk_folder+'merged_OUT.pkl'\n print('before gc Memory (GB) : ', getCurrentMemoryUsage() / (2 ** 20))\n gc.collect()\n print('after gc Memory (GB) : ', getCurrentMemoryUsage() / (2 ** 20))\n df = pd.DataFrame.from_dict(dictionary)\n save_data(dict=df, filename=final_pathname)\n print('chunk ', str(i), 'Memory (GB) : ', getCurrentMemoryUsage() / (2 ** 20))\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--n_chunks', '-n', required=False, type=int, default=1,\n help='number of chunk lists to create')\n parser.add_argument('--chunk_folder', '-c', required=False, type=str, default=1,\n help='folder where _OUT chunks are')\n\n args = parser.parse_args()\n main(args)","sub_path":"PYTHON_PROJ/4_TO_PKLS/2_merge_chunks.py","file_name":"2_merge_chunks.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"481551365","text":"import pprint\n\nfrom airflow.models import BaseOperator\nfrom airflow import utils as airflow_utils, AirflowException\n\nfrom execution import runScorecard\nfrom InformaticaPlugin.operators import available_arguments\nimport os\n\nclass ExecuteScorecard(BaseOperator):\n\n @airflow_utils.apply_defaults\n def __init__(self, **kwargs):\n self.infa_arguments = []\n self.pre_command = None\n for key, value in kwargs.items():\n if key == 'target':\n self.pre_command = '. ' + os.environ.get('configDir', '.') + '/scheduler_env.' + value + '.sh'\n else:\n if key in available_arguments:\n self.infa_arguments.append(available_arguments[key] + \" \" + value)\n\n super(ExecuteScorecard, self).__init__(\n **kwargs)\n\n def execute(self, context):\n print(\"dag: \" + self.dag.full_filepath)\n print(\"dag_id: \" + self.dag_id)\n print(\"task_type: \" + self.task_type)\n print(\"task id: \" + self.task_id)\n print(\"infa_arguments: \" + ' '.join(self.infa_arguments))\n if self.pre_command is None:\n print(\"no pre_command provided.\")\n else:\n print(\"pre_command: \" + self.pre_command)\n\n infa = runScorecard.ExecuteInformaticaScorecard(self.infa_arguments, log_on_console=True,\n pre_command=self.pre_command)\n\n result = infa.runit(infa.arguments)\n if result.rc != 0:\n raise AirflowException(\"RunScorecard failed: \" + result.message)\n","sub_path":"plugins/InformaticaPlugin/operators/execute_scorecard.py","file_name":"execute_scorecard.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"310637622","text":"# Import Library\nimport tensorflow\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.models import Model, Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, concatenate\nfrom utils.connect_db import *\n\n\n# Fit modelo no dataset\ndef fit_model(x_train, y_train, n_features=23):\n model = Sequential()\n # Initial layer\n model.add(Dense(units=100, input_shape=(n_features,), activation='relu'))\n model.add(Dropout(0.2))\n # Hidden layer\n model.add(Dense(units=100, activation='relu'))\n model.add(Dropout(0.2))\n # Hidden layer\n model.add(Dense(units=n_features, activation='relu'))\n # Output layer\n model.add(Dense(units=3, activation='softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n # Fit model\n model.fit(x_train, y_train, epochs=500, verbose=0)\n\n return model\n\n\ndef load_all_models_members(n_models, n_features=23):\n all_models = []\n for i in range(n_models):\n model = Sequential()\n # Initial layer\n model.add(Dense(units=100, input_shape=(n_features,), activation='relu'))\n model.add(Dropout(0.2))\n # Hidden layer\n model.add(Dense(units=100, activation='relu'))\n model.add(Dropout(0.2))\n # Hidden layer\n model.add(Dense(units=n_features, activation='relu'))\n # Output layer\n model.add(Dense(3, activation='softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n # Define filename for this ensemble\n try:\n filename = '/home/projetoia/service-linux/inferencia-service/classificador_fichas/model/modelo_rede_especialista_{}.h5'.format(i+1)\n model.load_weights(filename)\n except Exception as e:\n ds_msg_error = 'ERRO AO CARREGAR O MODELO ESPECIALISTA'\n ds_error = msg_exception('Error: {}'.format(e))\n ds_method = 'load_all_models_members'\n insert_msg_error('', ds_msg_error, ds_error, ds_method)\n\n # Add to list of members\n all_models.append(model)\n\n return all_models\n\n\n# Defines the final model using the members' models as input\ndef define_decisor_model(members):\n # Blocks all layers on all models from being trained\n for model in members:\n for layer in model.layers:\n # Make not trainable\n layer.trainable = False\n # Rename to avoid 'unique layer name' issue\n # layer.name = 'ensemble_' + str(i+1) + '_' + layer.name\n # Define multi-headed input\n ensemble_visible = [model.input for model in members]\n # Concatenate merge output from each model\n ensemble_outputs = [model.output for model in members]\n merge = concatenate(ensemble_outputs)\n hidden = Dense(100, activation='relu')(merge)\n output = Dense(3, activation='softmax')(hidden)\n model = Model(inputs=ensemble_visible, outputs=output)\n # model.summary()\n # Plot graph of ensemble\n # plot_model(model, show_shapes=True, to_file='model_graph.png')\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n return model\n\n\n# Fit the final model\ndef fit_decisor_model(model, X_input, Y_input):\n # Prepare input data\n X = [X_input for _ in range(len(model.input))]\n # Encode output data\n Y_input_cat = to_categorical(Y_input)\n # Fit model\n model.fit(X, Y_input_cat, epochs=100, verbose=0)\n\n return model\n\n\n# Make a prediction with a stacked model\ndef predict_decisor_model(model, X_input):\n # Prepare input data\n X = [X_input for _ in range(len(model.input))]\n # Make prediction\n return model.predict(X, verbose=0)\n","sub_path":"service-linux/retreinamento-service/retreinamento_modelo/utils/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"275001845","text":"from pyftpdlib.authorizers import DummyAuthorizer\t\t\t#Python FTP server library\nfrom pyftpdlib.handlers import FTPHandler\nfrom pyftpdlib.servers import FTPServer\t\t\t\t\t\t\n\nauthorizer = DummyAuthorizer()\t\t\t\t\t\t#dummy authorizer\n\nauthorizer.add_anonymous(\"/home/sree/Desktop/\",perm=\"elradfmw\")\t\t#adding a user(userid,password) here as anonymous to the server with \t\t\t\t\t\t\t\t\t\tread , write etc permission\n\nhandler = FTPHandler\t\t\t\t\t\t\t\nhandler.authorizer = authorizer\n\nconnection= ('192.168.1.5' , 21)\t\t\t\t\t#host address and port number\n\nserver = FTPServer(connection, handler)\t\t\t\t\t\n\nserver.serve_forever()\n","sub_path":"lsserver.py","file_name":"lsserver.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"157206689","text":"import requests\nurl = 'http://www.webscrapingfordatascience.com/usercheck/'\nr = requests.get(url)\nprint(r.text)\n# Shows: It seems you are using a scraper\nprint(r.request.headers)\n\nmy_headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36' + '(KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'\n}\nr = requests.get(url, headers=my_headers)\nprint(r.text)\n# Note that the headers argument does not completely overwrite the default headers completely, but updates it instead, keeping around the default entries too.\nprint(r.request.headers)\n\n\n\nurl = 'http://www.webscrapingfordatascience.com/referercheck/secret.php'\nr = requests.get(url)\n# Shows: Sorry, you seem to come from another web page\nprint(r.text)\n# When encountering such checks in requests, we can simply spoof the “Referer” header as well:\nmy_headers = {\n 'Referer': 'http://www.webscrapingfordatascience.com/referercheck/'\n}\nr = requests.get(url, headers=my_headers)\nprint(r.text)\n\n\n# redirect\nurl = 'http://www.webscrapingfordatascience.com/redirect/'\nr = requests.get(url, allow_redirects=False)\nprint(r.text)\n# You’ll then have to retrieve the “Location” header manually to perform the next requests.get call.\nprint(r.headers)\n\n\n# Unauthorized 401\n# Indeed, the HTTP standard includes a number of authentication mechanisms, one of which can be seen by accessing the URL http://www.webscrapingfordatascience.com/ authentication/. You’ll note that this site requests a username and password through your browser. If you press “Cancel,” you’ll note that the website responds with a 401 (“Unauthorized”) result.\n# to verify the username and password that were sent.If everything looks good, the server replies with a 200 page.\n# Otherwise, a 403 (“Forbidden”) is sent ( if the password was incorrect, for instance, or the user doesn’t have access to this page).\n\n# In requests, performing a request with a basic authentication is as simple as including an “Authorization” header, so we still need to figure out how to encrypt the username and password. Instead of doing this ourselves, requests provides another means to do so, using the auth argument:\nurl = 'http://www.webscrapingfordatascience.com/authentication/'\nr = requests.get(url, auth=('myusername', 'mypassword'))\nprint(r.text)\nprint(r.request.headers)\n\n","sub_path":"src/webScraping/withRequests/headers.py","file_name":"headers.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"268350847","text":"import requests\nimport json\nimport time\n\nfrom datetime import datetime\nfrom datetime import timedelta\n\nclass Skorozvon:\n\n def __init__(self):\n self.grant_type = \"password\"\n self.username = \"zakaz@rt-voice.com\"\n self.api_key = \"ed122f83c633b8f7bc799577c55e6d31e6fb37d1e96ef8826668b23abac94926\"\n self.client_id = \"29055bf486467ffb99159edf3c21881d8ec4349ee1eb61c0b172364bbcc623b7\"\n self.client_secret = \"172f48c27f7eb1c2322526b8f92d5b25dcc9cbc8785f137a428795b3f4a4cb2a\"\n\n auth_url = \"https://app.skorozvon.ru/oauth/token\"\n auth = {\n \"grant_type\": self.grant_type,\n \"username\": self.username,\n \"api_key\": self.api_key,\n \"client_id\": self.client_id,\n \"client_secret\": self.client_secret\n }\n\n r = requests.post(auth_url, params=auth)\n res = json.loads(r.text)\n\n self.auth_token = res['access_token']\n self.project_id = 30000000022\n self.project_ids = {\"Тинькофф\": 30000000022, \"Сбербанк\": 30000000024, \"ВТБ24\": 30000000023, \"МТС\": 30000000025, \"ТЕСТ\": 20000000574}\n\n def re_auth(self):\n self.grant_type = \"password\"\n self.username = \"zakaz@rt-voice.com\"\n self.api_key = \"ed122f83c633b8f7bc799577c55e6d31e6fb37d1e96ef8826668b23abac94926\"\n self.client_id = \"29055bf486467ffb99159edf3c21881d8ec4349ee1eb61c0b172364bbcc623b7\"\n self.client_secret = \"172f48c27f7eb1c2322526b8f92d5b25dcc9cbc8785f137a428795b3f4a4cb2a\"\n\n auth_url = \"https://app.skorozvon.ru/oauth/token\"\n auth = {\n \"grant_type\": self.grant_type,\n \"username\": self.username,\n \"api_key\": self.api_key,\n \"client_id\": self.client_id,\n \"client_secret\": self.client_secret\n }\n\n r = requests.post(auth_url, params=auth)\n res = json.loads(r.text)\n\n self.auth_token = res['access_token']\n self.project_id = 30000000022\n self.project_ids = {\"Тинькофф\": 30000000022, \"Сбербанк\": 30000000024, \"ВТБ24\": 30000000023, \"МТС\": 30000000025,\n \"ТЕСТ\": 20000000574}\n\n def set_odp_state(self, contact):\n\n test = open(\"del_number.txt\", \"a\")\n\n headers = {'Authorization': 'Bearer ' + self.auth_token}\n\n url = \"https://app.skorozvon.ru/api/v2/leads/{}\".format(contact[\"ExtID\"])\n\n test.write(datetime.strftime(datetime.now(), \"%Y.%m.%d %H.%M.%S\") + \"\\n\" + url + \"\\n\")\n\n r = requests.delete(url, headers=headers)\n\n test.write(\"{}\\n{}\\n\\n\".format(r.text, r))\n\n return r.text\n\n def get_org_data(self, id):\n url = \"https://app.skorozvon.ru/api/v2/leads/{}\".format(id)\n\n headers = {'Authorization': 'Bearer ' + self.auth_token}\n\n r = requests.get(url, headers=headers)\n print(r.text)\n\n def send_data(self, org_list):\n\n #org_data = []\n #for org in org_list:\n # org_data.append({\n # \"id\": org[\"id\"],\n # \"name\": org[\"name\"],\n # #\"post\": org[\"post\"],\n # \"phones\": [org[\"phone\"]],\n # #\"address\": org[\"address\"],\n # #\"external_id\": org[\"external_id\"],\n # \"business\": org[\"business\"]\n # })\n\n url = \"https://app.skorozvon.ru/api/v2/leads/import\"\n\n headers = {'Authorization': 'Bearer ' + self.auth_token}\n\n data = {\n \"call_project_id\": 20000003128,\n \"data\": org_list\n }\n\n r = requests.post(url, json=data, headers=headers)\n print(r.text)\n return r.text\n\n def append_to_project(self, id, project):\n request = json.loads(\n \"\"\"{\"limit\":\"100\",\"page\":0,\"ids\":[20003881696],\"all\":false,\"order\":{\"column\":\"name\",\"direction\":0},\"project_id\":20000000574,\"excluded_ids\":[],\"filter\":{\"tags_condition\":\"or\",\"managers\":\"all\",\"scenario_id\":\"\",\"call_result\":\"\",\"business\":\"\",\"firm_name\":\"\",\"file_id\":\"\",\"city\":\"\",\"tags\":\"all\",\"used_by\":\"user\",\"term\":\"СУШАНЛО\",\"project_id\":20000000574,\"result_by\":\"client\",\"tags_exclusion\":null},\"authenticity_token\":\"f4zWKL3f1NnbuNLuOZby9txkj876dmIPe+Q4NxgUI1c=\",\"utf8\":\"✓\"}\"\"\")\n url = \"https://shard3-lb1.skorozvon.ru/settings/contacts\"\n\n request[\"ids\"] = [id]\n request[\"project_id\"] = project\n\n headers = {'Authorization': 'Bearer ' + self.auth_token}\n\n r = requests.put(url, json=request, headers=headers)\n print(r.text)\n\n def mass_append_to_project(self, ids, project):\n request = json.loads(\n \"\"\"{\"limit\":\"30\",\"page\":0,\"ids\":[30000716161],\"all\":false,\"order\":{\"column\":\"name\",\"direction\":1},\"project_id\":30000000022,\"excluded_ids\":[],\"project_mode\":\"save_and_transfer\",\"slices\":[],\"filter\":{\"tags_condition\":\"or\",\"managers\":\"all\",\"scenario_id\":\"\",\"call_result\":\"\",\"business\":\"\",\"firm_name\":\"\",\"file_id\":\"\",\"city\":\"\",\"tags\":[30000000346],\"used_by\":\"user\",\"result_by\":\"client\",\"tags_exclusion\":null,\"event_type\":null},\"authenticity_token\":\"AKOKwdsZeOczvqpfEMW4/8W4eC9s5c4wUrNfbHIl4As=\",\"utf8\":\"✓\"}\"\"\")\n url = \"https://shard3-lb1.skorozvon.ru/settings/contacts\"\n\n request[\"ids\"] = ids\n request[\"project_id\"] = project\n\n headers = {'Authorization': 'Bearer ' + self.auth_token}\n\n r = requests.put(url, json=request, headers=headers)\n print(r.text)\n\n def is_contacts_in_project(self, project, downloads):\n\n url = \"https://shard3-lb1.skorozvon.ru/bootstrap\"\n\n headers = {'Authorization': 'Bearer ' + self.auth_token}\n\n r = requests.get(url, headers=headers)\n res = json.loads(r.text)\n\n tag_ids = []\n\n for tag in res[\"tags\"]:\n\n for download in downloads:\n if tag[\"title\"] == \"Загрузка {}\".format(download):\n tag_ids.append(tag[\"id\"])\n\n for tag_id in tag_ids:\n url = \"https://shard3-lb1.skorozvon.ru/settings/contacts?limit=30&page=0&order%5Bcolumn%5D=name&order%5Bdirection%5D=1&filter%5Btags_condition%5D=or&filter%5Bmanagers%5D=all&filter%5Bscenario_id%5D=&filter%5Bcall_result%5D=&filter%5Bbusiness%5D=&filter%5Bfirm_name%5D=&filter%5Bfile_id%5D=&filter%5Bcity%5D=&filter%5Btags%5D%5B%5D={}&filter%5Bused_by%5D=user&filter%5Bproject_id%5D={}&filter%5Bresult_by%5D=client&filter%5Btags_exclusion%5D=&filter%5Bevent_type%5D=\"\n url = url.format(tag_id, project)\n\n r = requests.get(url, headers=headers)\n res = json.loads(r.text)\n\n if len( res[\"data\"][\"leads\"] ) > 0:\n return True\n\n return False\n\n print(r.text)\n\n def send_org(self, org, project, id, pl):\n\n pl.start_operation(\"Загрузка данных\")\n\n org_data = {\n \"name\": org[\"name\"],\n \"post\": org[\"post\"],\n \"phones\": [org[\"phone\"]],\n \"address\": org[\"address\"],\n \"external_id\": org[\"external_id\"],\n \"city\": org[\"business\"],\n \"tags\": [\n \"Загрузка \" + str(id)\n ]\n }\n\n url = \"https://app.skorozvon.ru/api/v2/leads\"\n\n headers = {'Authorization': 'Bearer ' + self.auth_token}\n\n r = requests.post(url, json=org_data, headers=headers)\n print(r.text)\n\n is_parse_error = False\n\n try:\n res = json.loads(r.text)\n except:\n is_parse_error = True\n\n if is_parse_error or r.text == \"Internal Server Error\":\n self.re_auth()\n headers = {'Authorization': 'Bearer ' + self.auth_token}\n\n r = requests.post(url, json=org_data, headers=headers)\n print(r.text)\n\n res = json.loads(r.text)\n\n org_id = res[\"id\"]\n\n pl.end_operation()\n #pl.start_operation(\"Добавление в проект\")\n\n #self.append_to_project(org_id, self.project_ids[project])\n\n #pl.end_operation()\n\n return org_id\n\n def get_contractor_info(self, ext_id):\n\n headers = {'Authorization': 'Bearer ' + self.auth_token}\n url = \"https://shard3-lb1.skorozvon.ru/leads/{}?history_limit=1\".format(ext_id)\n\n r = requests.get(url, headers=headers)\n\n return json.loads(r.text)\n\n def get_list(self, start_date, end_date, results_ids, scenario_ids):\n end_date = start_date\n start_date = end_date[6:] + \"-\" + end_date[3:5] + \"-\" + end_date[:2] + \"T00:00:00.000+05:00\"\n\n end_date = datetime.strptime(end_date, \"%d.%m.%Y\")\n #end_date = end_date - timedelta(days=1)\n end_date = end_date.strftime(\"%Y-%m-%d\")\n end_date = end_date + \"T23:59:59.999+05:00\"\n\n url = \"https://shard3-lb1.skorozvon.ru/reports.json\"\n\n headers = {'Authorization': 'Bearer ' + self.auth_token}\n\n res = []\n\n for scenario_id in scenario_ids:\n is_complete = False\n offset = 0\n limit = 100\n\n while not is_complete:\n data = {\"order\": {\n \"column\": \"start\",\n \"direction\": \"1\"\n },\n \"name\": \"calls_results\",\n \"filter\": {\n \"range\": [start_date, end_date],\n \"results_ids\": results_ids,\n \"scenario_id\": scenario_id\n },\n \"settings\": {\n \"filter\": {\n \"range\": [start_date, end_date]}\n },\n \"limit\": limit,\n \"page\": offset}\n\n r = requests.post(url, json=data, headers=headers)\n res_info = json.loads(r.text)\n\n res += res_info['reports'][0]['data']['calls']\n\n if len(res_info['reports'][0]['data']['calls']) < limit:\n is_complete = True\n else:\n offset += 1\n\n return res\n\n def get_scenario_list(self):\n\n url = \"https://app.skorozvon.ru/api/v2/scenarios\"\n\n headers = {'Authorization': 'Bearer ' + self.auth_token}\n\n r = requests.get(url, headers=headers)\n print(r.text)\n res = json.loads(r.text)\n\n return res\n\n def get_scenario_attribs(self, scenario_id):\n\n url = \"https://app.skorozvon.ru/api/v2/scenarios/{}/results\".format(scenario_id)\n\n headers = {'Authorization': 'Bearer ' + self.auth_token}\n\n r = requests.get(url, headers=headers)\n print(r.text)\n res = json.loads(r.text)\n\n return res\n\n\ndef send_to_partner(data):\n client = Skorozvon()\n\n client.send_data(data)\n\n\nif __name__ == \"__main__\":\n\n client = Skorozvon()\n res = client.get_scenario_attribs(30000002080)\n\n i = 0\n","sub_path":"PartnerSender.py","file_name":"PartnerSender.py","file_ext":"py","file_size_in_byte":10823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"494615834","text":"from random import *\n\n##Exercice 1\nN = randrange(100)\nn = -1\n\nwhile(n!=N):\n n = int(input('Quelle est la valeur du nombre ?'))\n if(n < N):\n print('C\\'est plus !')\n elif(n > N):\n print('C\\'est moins !')\n else:\n print('C\\'est gagné !')\n \n##Exercice 2\nsquare = u\"\\u25A1\"\ngrille = [[square,square,square],[square,square,square],[square,square,square]]\n\ndef afficherGrille():\n print('----------------------------')\n for line in grille:\n print(line)\n print('----------------------------')\n\n \ndef tourOrdi():\n while(True):\n l = randrange(3)\n c = randrange(3)\n if(grille[l][c] == square):\n break\n grille[l][c] = 'O'\n \ndef tourJoueur():\n while(True):\n l = int(input('Dans quelle ligne voulez-vous placer une croix ? '))-1\n c = int(input('Quelle colonne ? '))-1\n if(grille[l][c] == square):\n break\n grille[l][c] = 'X'\n\ndef testerVictoire():\n #Teste les 3 lignes et les 3 colonnes\n for i in range(3):\n if(grille[i][0] == grille[i][1] == grille[i][2] == ('X' or 'O')):\n victoire(grille[i][0])\n if(grille[0][i] == grille[1][i] == grille[2][i] == ('X' or 'O')):\n victoire(grille[0][i])\n\n #1ère diagonale\n if(grille[0][0] == grille[1][1] == grille[2][2] == ('X' or 'O')):\n victoire(grille[0][0])\n \n #2ème diagonale\n if(grille[0][2] == grille[1][1] == grille[2][0] == ('X' or 'O')):\n victoire(grille[0][2])\n\ndef victoire(gagnant):\n global fini\n if(gagnant == 'O'):\n print('L\\'ordinateur a gagné !')\n elif(gagnant == 'X'):\n print('Vous avez gagné !')\n fini = True\n \nafficherGrille()\n\nfini = False\nwhile not(fini):\n print('A vous de jouer !')\n tourJoueur()\n afficherGrille()\n testerVictoire()\n \n if(fini):\n break\n \n print('Lordinateur joue')\n tourOrdi()\n afficherGrille()\n testerVictoire()\n\nprint('\\nFin de la partie.')","sub_path":"tp6.py","file_name":"tp6.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"27714597","text":"from __future__ import division\nfrom __future__ import print_function\n\nimport argparse\n\nimport numpy as np\nimport torch\nfrom torch.nn import functional as F\nfrom torch.utils.data.dataloader import DataLoader\nfrom tqdm import tqdm\n\nfrom utils import CoauthorDataset, query_collate_fn, load_data, load_checkpoint\nfrom models import GCN, RNN\n\n\n# Training settings\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--no-cuda', action='store_true', default=False, help='Disables CUDA training.')\nparser.add_argument('--seed', type=int, default=42, help='Random seed.')\n\nparser.add_argument('-b', '--batch_size', type=int, default=1)\nparser.add_argument('-j', '--workers', type=int, default=4)\nparser.add_argument('--pretrained_gcn', type=str, required=True)\nparser.add_argument('--pretrained_rnn', type=str, required=True)\n\nparser.add_argument('--dropout', type=float, default=0.5, help='Dropout rate (1 - keep probability).')\n\nparser.add_argument('--feature_node', type=int, default=256, help='node_2_vec_feature_dim')\nparser.add_argument('--hidden_gcn', type=int, default=200)\nparser.add_argument('--feature_gcn', type=int, default=128)\nparser.add_argument('--model', type=str, default='adj', help='Choosing between the adj and the node2vec')\n\n# node2vec setting\nparser.add_argument('--walk_length', type=int, default=80, help='node2vec walk_length')\nparser.add_argument('--num_walks', type=int, default=10, help='node2vec num_walks')\n\n\nargs = parser.parse_args()\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\n\n\n# Load data\nadj, features = load_data(args=args)\n# bulid symmetric adj matrix\n\ntest_dataset = CoauthorDataset('query_private.txt')\ntest_loader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.workers, collate_fn=query_collate_fn)\n\n\n# Model and optimizer\nif args.model == 'adj':\n model = GCN(nfeat=features.size()[1],\n nhid=args.hidden_gcn,\n nclass=args.feature_gcn,\n dropout=args.dropout)\nelif args.model == 'node2vec':\n model = GCN(nfeat=args.feature_node,\n nhid=args.hidden_gcn,\n nclass=args.feature_gcn,\n dropout=args.dropout)\nrnn = RNN(args.feature_gcn)\n\nif args.cuda:\n model.cuda()\n rnn.cuda()\n features = features.cuda()\n adj = adj.cuda()\n\n\ndef test():\n model.eval()\n rnn.eval()\n\n with torch.no_grad():\n embedding = model(features, adj)\n embedding = F.pad(embedding, (0, 0, 1, 0), 'constant', 0)\n with open('./project_data/answer_private.txt', 'w') as f:\n for queries, labels in tqdm(test_loader):\n queries = queries.cuda()\n logits = rnn(queries, embedding)\n if logits > 0.0:\n f.write(\"True\\n\")\n else:\n f.write(\"False\\n\")\n\n\nif __name__ == '__main__':\n load_checkpoint(model, args.pretrained_gcn)\n load_checkpoint(rnn, args.pretrained_rnn)\n\n test()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"100164869","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 24 16:14:47 2017\n\n@author: vineeth\n\"\"\"\n\nimport numpy as np\nimport random\nimport math\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport time\nimport datetime\nfrom copy import copy, deepcopy\nfrom IPython import get_ipython\n#get_ipython().run_line_magic('matplotlib', 'qt')\n\n\n\ndef ASI_Lattice(M,N,State):\n a = np.ones(N, dtype=int)\n a[::2]=0\n a = a.reshape(N)\n b = np.zeros(N, dtype=int)\n b[::2]=1\n L = deepcopy(a)\n S = np.zeros((M,N))\n for d in range(0,M-1):\n if d % 2 == 0: \n L = np.row_stack((L,b))\n else:\n L = np.row_stack((L,a))\n \n#===============Different intial states\n \n#Intialize the random ASI spin array\n if (State == 0):\n S = np.random.rand(M,N) \n S[S>0.5] = 1\n S[S<0.5] = -1\n\n#Intialize spin array to +M\n elif (State == 1):\n S = np.ones((M,N)) #Intialize spin array to +M\n\n# True antiferromagnetic state \n else: \n for i in range (0,N):\n if(i%2==0 and (i//2)%2==0):\n for j in range (0,M): \n if(j%2==0):\n S[i,j] = 0 \n elif(j%2 ==1):\n if((j//2)%2 == 0 or j==1):# and (i//2)%2==0):\n S[i,j] = 1\n else:\n S[i,j] = -1\n \n elif(i%2==0 and (i//2)%2==1):\n for j in range (0,M): \n if(j%2==0):\n S[j,i] = 0 \n elif(j%2 ==1):\n if((j//2)%2 == 0 or j==1):# and (i//2)%2==0):\n S[i,j] = -1\n else:\n S[i,j] = 1\n \n elif(i%2==1 and (i//2)%2==0):\n for j in range (0,M):\n if(j%2==1):\n S[j,i] = 0 \n elif(j%2==0):\n if((j//2)%2 == 0 or j==0):# and (i//2)%2==0):\n S[i,j] = 1\n else:\n S[i,j] = -1 \n else:\n for j in range (0,M):\n if(j%2==1):\n S[j,i] = 0 \n elif(j%2==0):\n if((j//2)%2 == 0 or j==0):# and (i//2)%2==0):\n S[i,j] = -1\n else:\n S[i,j] = 1\n\n \n S = S*L\n Sy = deepcopy(S)\n Sy[::2] = 0\n Sx = deepcopy(S)\n Sx[1::2] = 0\n Sxy = np.stack([Sx, Sy], axis=0)\n return (S,Sxy,Sx,Sy)\n###########################################################################################################################################\n\n\n\n\n\ndef sort_vertex_type(yii,xii,Sxy,M,N): ## *&* Index of the vertex and not of spin\n Vtype = 0\n if (10: # np.dot(mi,mi)!=0 or\n Ez = -np.dot(mi,H) # Zeeman energy # Calculate the energy of the single chosen spin\n\n if exchange_switch == 1: # Exchange interactions with 4 nearest neighbour\n if yi != 0:\n ExT = -np.dot(mi, Sxy[:,yi-2,xi]) # top\n else:\n ExT = 0\n if xi != 0:\n ExL = -np.dot(mi, Sxy[:,yi,xi-2]) # left\n else:\n ExL = 0\n if yi != M-1:\n ExB = -np.dot(mi, Sxy[:,yi+2,xi]) # bottom\n else:\n ExB = 0\n if xi != N-1:\n ExR = -np.dot(mi, Sxy[:,yi,xi+2]) # right\n else:\n ExR = 0 \n Ex = ExL + ExR + ExT + ExB \n \n#=====Calculate dipolar energy of spin and spin-flipped:: Next nearest neigbour======================== \n if dipolar_switch == 1: \n #==========4 Next nearest neighbour dipolar coupling=========================\n if (yi > 1): # mi= (mx,my); mx = Sxy[0,yi,xi] and my = Sx[1,yi,xi]; r = (x,y)\n EdT = C*np.dot(Sxy[:,yi-2,xi],mi)/8 - 3*C*np.dot(Sxy[:,yi-2,xi],(0,2))*np.dot(mi,(0,2))/32\n else:\n EdT = 0\n if (xi > 1):\n EdL = C*np.dot(Sxy[:,yi,xi-2],mi)/8 - 3*C*np.dot(Sxy[:,yi,xi-2],(-2,0))*np.dot(mi,(-2,0))/32\n else:\n EdL = 0\n if (yi < M-2):\n EdB = C*np.dot(Sxy[:,yi+2,xi],mi)/8 - 3*C*np.dot(Sxy[:,yi+2,xi],(0,-2))*np.dot(mi,(0,-2))/32\n else:\n EdB = 0\n if (xi < N-2):\n EdR = C*np.dot(Sxy[:,yi,xi+2],mi)/8 - 3*C*np.dot(Sxy[:,yi,xi+2],(2,0))*np.dot(mi,(2,0))/32\n else:\n EdR = 0\n\n#======Nearest neigbour r = (1,-1),(-1,1),(-1,-1) and (1,1)\n if (yi > 0 and xi < N-1):# mi= (mx,my); mx = Sxy[0,yi,xi] and my = Sx[1,yi,xi]\n Ed1 = -3*C*np.dot(Sxy[:,yi-1,xi+1],(1,1))*np.dot(mi,(1,1))/5.656854#2**(2.5)# + C*np.dot(Sxy[:,yi-1,xi+1],mi)/2**(1.5) # top right\n else:\n Ed1 = 0 # top right\n if (yi > 0 and xi > 0):\n Ed4 = -3*C*np.dot(Sxy[:,yi-1,xi-1],(-1,1))*np.dot(mi,(-1,1))/5.656854#2**(2.5)# + C*np.dot(Sxy[:,yi-1,xi-1],mi)/2**(1.5)\n else:\n Ed4 = 0 # top left\n if (yi < N-1 and xi > 0):\n Ed3 = -3*C*np.dot(Sxy[:,yi+1,xi-1],(-1,-1))*np.dot(mi,(-1,-1))/5.656854#2**(2.5)# + C*np.dot(Sxy[:,yi+1,xi-1],mi)/2**(1.5)\n else:\n Ed3 = 0 # left bottom\n if (yi < N-1 and xi < M-1):\n Ed2 = -3*C*np.dot(Sxy[:,yi+1,xi+1],(1,-1))*np.dot(mi,(1,-1))/5.656854#2**(2.5)# + C*np.dot(Sxy[:,yi+1,xi+1],mi)/2**(1.5)\n else:\n Ed2 = 0 # right bottom \n \n Ed = Ed1 + Ed2 + Ed3 + Ed4 # + EdL + EdR + EdT + EdB \n \n if (Exchange_Bias==1):\n Eeb = -1*np.dot(mi, Hxy_eb_arr[:,yi,xi])\n else:\n Eeb = 0\n#===========================================================================================\n E = Ed + Ez + Ex + Eeb\n return(E, Ed, Ez, Ex, Eeb) #+ Ex # Total energy","sub_path":"Function_ASI_Energy.py","file_name":"Function_ASI_Energy.py","file_ext":"py","file_size_in_byte":11466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"151509467","text":"# encoding: utf-8\nimport io\n\n\n\nclass DiffParser(object):\n\n _DIFF_SYMBOLS = (\"diff\", \"---\", \"+++\", \"@@\")\n _PARSED_DIFFS = {}\n start_line = 0\n diff_name = None\n place = None\n\n def __init__(self, diff):\n if not isinstance(diff, unicode):\n raise UnicodeError('No unicode diff found.')\n self.diff = io.StringIO(diff)\n\n def _line_nums(self, line):\n start = line[line.find('-')+1:line.find(',')]\n try:\n int(start)\n except ValueError:\n start = line[line.find('-')+1:line.find('-')+1+line[line.find('-'):].find(' ')]\n # self._PARSED_DIFFS[self.diff_name].setdefault('chunks', {}).setdefault('places', []).append(line[:line.rfind('@@')+2])\n first = line.replace(line[:line.rfind('@@')+2], '')\n\n if len(first) != 1:\n res = {'number': int(start), 'line': first}\n self._PARSED_DIFFS[self.diff_name].setdefault('chunks', {}).setdefault('content', []).append(res)\n dogs = line[:line.rfind('@@')+2]\n\n #self._PARSED_DIFFS[self.diff_name].setdefault('chunks', {}).setdefault('places', []).append(dogs)\n\n removed = dogs[dogs.find('-')+1:dogs.find('+')].strip()\n added = dogs[dogs.find('+')+1:].strip('@').strip()\n removed = removed[:removed.find(',')]\n added = added[:added.find(',')]\n try:\n removed = int(removed)\n added = int(added)\n if removed <= added:\n self.start_line = int(removed)\n else:\n self.start_line = int(added)\n except ValueError:\n pass\n\n self._PARSED_DIFFS[self.diff_name]['start_line'] = int(start)\n\n def _hanlder(self, line):\n try:\n line_starts = line.split()[0]\n except IndexError:\n pass\n else:\n if line.startswith('diff'):\n self._PARSED_DIFFS.setdefault(line, {})\n self.diff_name = line\n\n # Append extended diff content\n if line[:1].isalpha() and line_starts not in self._DIFF_SYMBOLS:\n self._PARSED_DIFFS[self.diff_name].setdefault('extended', []).append(line)\n\n # Append filename\n if line.startswith(\"---\") or line.startswith(\"+++\"):\n if not '/dev/null' in line:\n filename = line.split()[1][1:]\n self._PARSED_DIFFS[self.diff_name].setdefault('filename', filename)\n\n # And some chunks\n if line.startswith(\"@@\"):\n self._PARSED_DIFFS[self.diff_name].setdefault('chunks', {}).setdefault(line, {})\n self.place = line\n self._line_nums(line)\n\n if line_starts not in self._DIFF_SYMBOLS and not line[:1].isalpha():\n res = {'number': self.start_line, 'line': line}\n self._PARSED_DIFFS[self.diff_name]['chunks'][self.place].setdefault('content', []).append(res)\n self.start_line += 1\n\n def _format_diff(self):\n diff = \"\"\n for line in self.diff.readlines():\n if line.startswith(\"@@\") and not line.endswith(\"@@\\n\"):\n self._hanlder(line[:line.rfind(\"@@\")+2] + \"\\n\")\n self._hanlder(line[line.rfind(\"@@ \")+2:])\n else:\n self._hanlder(line)\n return diff\n\n def parse(self):\n self._format_diff()\n return self._PARSED_DIFFS","sub_path":"plainreview/apps/core/diff_utils.py","file_name":"diff_utils.py","file_ext":"py","file_size_in_byte":3416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"334721369","text":"import os\nimport misc\n\nclass Merger:\n scriptfile = None\n enginefile = 'engine.js'\n tempdir = None\n logs = False\n\n def __init__(self, scriptfile, tempdir, options,):\n self.tempdir = tempdir\n self.scriptfile = scriptfile\n for av in options:\n if av[0] == '-l' : self.logs = True\n\n def getScript(self):\n data = misc.loadFile(self.scriptfile)\n config = []\n for i in range(0, len(data)):\n if data[i].replace('\\n', '').replace(' ', '') == \"varconfig={\":\n for j in range(i, len(data)):\n config.append(data[j])\n if data[j].replace('\\n', '').replace(' ', '') == \"};\":\n break\n if not config:\n sys.stderr.write(\"No config found in script\\n\")\n exit(1)\n for i in range(0, len(data)):\n data[i] = data[i].replace(\"exit\", \"process.exit\")\n if (self.logs):\n data[i] = data[i].replace(\"log\", \"console.log\")\n else:\n data[i] = data[i].replace(\"log\", \"// console.log\")\n return data, config\n\n def merge(self):\n print(\"Merging...\")\n script, config = self.getScript()\n engine = misc.loadFile(self.enginefile)\n temp = engine[:1] + config + engine[:len(engine) - 18]\n temp += script[len(config):] + [\"\\n\"] + engine[len(engine)-19:]\n with open(os.getcwd() + \"/\" + self.tempdir + \"/script.js\", \"w\") as file:\n for line in temp : file.write(line)\n print(\"'\" + self.scriptfile + \"' merged\")","sub_path":"merger.py","file_name":"merger.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"274226762","text":"from models import Profile\n\n\ndef save_profile(backend, user=None, response={}, *args, **kwargs):\n\ttry:\n\t\tprofile = user.profile\n\texcept Profile.DoesNotExist:\n\t\tprofile = Profile()\n\t\tprofile.user = user\n\n\tif backend.name == 'google-oauth2' and not profile.avatar:\n\t\tif not response['image']['isDefault']:\n\t\t\tprofile.avatar = response['image']['url'].split('?')[0]\n\t\t\tprofile.save()\n","sub_path":"users/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"408932450","text":"import cv2\r\nimport dlib\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport _pickle as pkl\r\nfrom keras.models import Sequential, load_model\r\nfrom keras.layers import Dense\r\nfrom keras.callbacks import EarlyStopping\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import StandardScaler\r\n\r\nx, y = pkl.load(open('F:/python/samples.pkl', 'rb'))\r\n\r\nprint(x.shape, y.shape)\r\n\r\nroll, pitch, yaw = y[:, 0], y[:, 1], y[:, 2]\r\n\r\nprint(roll.min(), roll.max(), roll.mean(), roll.std())\r\nprint(pitch.min(), pitch.max(), pitch.mean(), pitch.std())\r\nprint(yaw.min(), yaw.max(), yaw.mean(), yaw.std())\r\n\r\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=42)\r\nx_val, x_test, y_val, y_test = train_test_split(x_test, y_test, test_size=0.5, random_state=42)\r\n\r\nprint(x_train.shape, y_train.shape)\r\nprint(x_val.shape, y_val.shape)\r\nprint(x_test.shape, y_test.shape)\r\n\r\nstd = StandardScaler()\r\nstd.fit(x_train)\r\nx_train = std.transform(x_train)\r\nx_val = std.transform(x_val)\r\nx_test = std.transform(x_test)\r\n\r\nBATCH_SIZE = 64\r\nEPOCHS = 100\r\n\r\nmodel = Sequential()\r\nmodel.add(Dense(units=20, activation='relu', kernel_regularizer='l2', input_dim=x.shape[1]))\r\nmodel.add(Dense(units=10, activation='relu', kernel_regularizer='l2'))\r\nmodel.add(Dense(units=3, activation='linear'))\r\n\r\nprint(model.summary())\r\n\r\ncallback_list = [EarlyStopping(monitor='val_loss', patience=25)]\r\n\r\nmodel.compile(optimizer='adam', loss='mean_squared_error')\r\nhist = model.fit(x=x_train, y=y_train, validation_data=(x_val, y_val), batch_size=BATCH_SIZE, epochs=EPOCHS, callbacks=callback_list)\r\nmodel.save('F:/python/model.h5')\r\n\r\nprint()\r\nprint('Train loss:', model.evaluate(x_train, y_train, verbose=0))\r\nprint(' Val loss:', model.evaluate(x_val, y_val, verbose=0))\r\nprint(' Test loss:', model.evaluate(x_test, y_test, verbose=0))\r\n\r\nhistory = hist.history\r\nloss_train = history['loss']\r\nloss_val = history['val_loss']\r\n\r\nplt.figure()\r\nplt.plot(loss_train, label='train')\r\nplt.plot(loss_val, label='val_loss', color='red')\r\nplt.legend()\r\n\r\ny_pred = model.predict(x_test)\r\ndiff = y_test - y_pred\r\ndiff_roll = diff[:, 0]\r\ndiff_pitch = diff[:, 1]\r\ndiff_yaw = diff[:, 2]\r\n\r\nplt.figure(figsize=(16, 10))\r\n\r\nplt.subplot(3, 1, 1)\r\nplt.plot(diff_roll, color='red')\r\nplt.title('roll')\r\n\r\nplt.subplot(3, 1, 2)\r\nplt.plot(diff_pitch, color='red')\r\nplt.title('pitch')\r\n\r\nplt.subplot(3, 1, 3)\r\nplt.plot(diff_yaw, color='red')\r\nplt.title('yaw')\r\n\r\nplt.tight_layout()\r\n\r\ndef detect_face_points(image):\r\n detector = dlib.get_frontal_face_detector()\r\n predictor = dlib.shape_predictor(\"C:/Users/HP/~/openface/models/dlib/shape_predictor_68_face_landmarks.dat\")\r\n face_rect = detector(image, 1)\r\n if len(face_rect) != 1: return []\r\n \r\n dlib_points = predictor(image, face_rect[0])\r\n face_points = []\r\n for i in range(68):\r\n x, y = dlib_points.part(i).x, dlib_points.part(i).y\r\n face_points.append(np.array([x, y]))\r\n return face_points\r\n \r\ndef compute_features(face_points):\r\n #assert (len(face_points) == 68), \"len(face_points) must be 68\"\r\n face_points \r\n face_points = np.array(face_points)\r\n features = []\r\n for i in range(68):\r\n for j in range(i+1, 68):\r\n features.append(np.linalg.norm(face_points[i]-face_points[j]))\r\n \r\n return np.array(features).reshape(1, -1)\r\n\r\n\r\n#im = cv2.imread(\"G:\\photos\\pictures\\1.jpg\")#, cv2.IMREAD_COLOR)\r\n#im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\r\n##imgray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\r\n#face_points = detect_face_points(im)\r\n\r\ncap = cv2.VideoCapture(0)\r\n\r\n# Loop once video is successfully loaded\r\nwhile True:\r\n \r\n # Read first frame\r\n ret, im = cap.read()\r\n im = cv2.resize(im, None,fx=0.3, fy=0.4, interpolation = cv2.INTER_LINEAR)\r\n im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\r\n face_points = detect_face_points(im)\r\n for x, y in face_points:\r\n cv2.circle(im, (x, y), 1, (0, 255, 0), -1)\r\n try:\r\n \r\n features = compute_features(face_points)\r\n features = std.transform(features)\r\n model = load_model('F:/python/model.h5')\r\n y_pred = model.predict(features)\r\n \r\n roll_pred, pitch_pred, yaw_pred = y_pred[0]\r\n print(' Roll: {:.2f}°'.format(roll_pred))\r\n print('Pitch: {:.2f}°'.format(pitch_pred))\r\n print(' Yaw: {:.2f}°'.format(yaw_pred))\r\n print('')\r\n except:\r\n print(\"Look at the camera\")\r\n \r\n \r\n \r\n \r\n \r\n# cv2.putText(im,'Roll',(50,50), cv2.FONT_HERSHEY_COMPLEX, 2 ,(0,255,0), 2) \r\n# cv2.putText(im,'Pitch',(50,50), cv2.FONT_HERSHEY_COMPLEX, 2 ,(0,255,0), 2) \r\n# cv2.putText(im,'Yaw',(50,50), cv2.FONT_HERSHEY_COMPLEX, 2 ,(0,255,0), 2) \r\n# \r\n \r\n plt.figure(figsize=(10, 10))\r\n #cv2.imshow(im)\r\n cv2.imshow('img',im)\r\n k = cv2.waitKey(30) & 0xff\r\n if k == 13:\r\n break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()","sub_path":"Head_Pose.py","file_name":"Head_Pose.py","file_ext":"py","file_size_in_byte":4948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"262014408","text":"import os\nfrom setuptools import setup, find_packages\n\nversion = '1.2.8'\n\nwith open('README.md') as readme:\n long_description = readme.read()\n\ndef recursive_requirements(requirement_file, libs, links, path=''):\n if not requirement_file.startswith(path):\n requirement_file = os.path.join(path, requirement_file)\n with open(requirement_file) as requirements:\n for requirement in requirements.readlines():\n if requirement.startswith('-r'):\n requirement_file = requirement.split()[1]\n if not path:\n path = requirement_file.rsplit('/', 1)[0]\n recursive_requirements(requirement_file, libs, links,\n path=path)\n elif requirement.startswith('-f'):\n links.append(requirement.split()[1])\n elif requirement.startswith('--allow'):\n pass\n else:\n libs.append(requirement)\n\nlibraries, dependency_links = [], []\nrecursive_requirements('requirements.txt', libraries, dependency_links)\n\nsetup(name='django-cas-sso',\n version=version,\n install_requires=libraries,\n dependency_links=dependency_links,\n description=\"Django Cas SSO Client (inherited from django-cas)\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: End Users/Desktop\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries\",\n \"Topic :: Utilities\",\n \"License :: OSI Approved :: BSD License\",\n ],\n keywords=['django', 'cas', 'sso'],\n author='di-dip-unistra',\n author_email='di-dip@unistra.fr',\n maintainer='di-dip-unistra',\n maintainer_email='di-dip@unistra.fr',\n url='http://github.com/unistra/django-cas/',\n download_url='http://pypi.python.org/pypi/django-cas-sso',\n license='MIT',\n entry_points={},\n packages=find_packages(),\n include_package_data=True,\n zip_safe=True,\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"490641596","text":"\"\"\"\nFile : MSOtput.py\n\nDescription: MSOutput.py class provides the whole logic behind\nthe Output data placement in WMCore MicroServices.\n\"\"\"\n\n# futures\nfrom __future__ import division, print_function\n\n# system modules\nfrom retry import retry\n\n# WMCore modules\nfrom WMCore.MicroService.DataStructs.DefaultStructs import OUTPUT_REPORT,\\\n OUTPUT_MONGO_DOC\nfrom WMCore.MicroService.DataStructs.Workflow import Workflow\nfrom WMCore.MicroService.Unified.MSCore import MSCore\nfrom WMCore.MicroService.Unified.RequestInfo import RequestInfo\nfrom WMCore.Services.DDM.DDM import DDM, DDMReqTemplate\nfrom Utils.EmailAlert import EmailAlert\n\n\nclass MSOutput(MSCore):\n \"\"\"\n MSOutput.py class provides the whole logic behind the Output data placement\n in MicroServices.\n \"\"\"\n\n def __init__(self, msConfig, logger=None):\n \"\"\"\n Runs the basic setup and initialization for the MSOutput module\n :param microConfig: microservice configuration\n \"\"\"\n super(MSOutput, self).__init__(msConfig, logger)\n\n self.msConfig.setdefault(\"limitRequestsPerCycle\", 500)\n self.msConfig.setdefault(\"verbose\", True)\n self.msConfig.setdefault(\"interval\", 600)\n self.msConfig.setdefault(\"services\", ['output'])\n self.msConfig.setdefault(\"defaultDataManSys\", \"DDM\")\n self.msConfig.setdefault(\"defaultGroup\", \"DataOps\")\n self.msConfig.setdefault(\"enableAggSubscr\", True)\n self.msConfig.setdefault(\"enableDataPlacement\", False)\n self.msConfig.setdefault(\"excludeDataTier\", ['NANOAOD'])\n self.msConfig.setdefault(\"rucioAccount\", 'wma_test')\n self.uConfig = {}\n self.emailAlert = EmailAlert(self.msConfig)\n\n @retry(tries=3, delay=2, jitter=2)\n def updateCaches(self):\n \"\"\"\n Fetch some data required for the output logic, e.g.:\n * unified configuration\n \"\"\"\n self.uConfig = self.unifiedConfig()\n if not self.uConfig:\n raise RuntimeWarning(\"Failed to fetch the unified configuration\")\n\n def execute(self, reqStatus):\n \"\"\"\n Executes the whole output data placement logic\n :return: summary\n \"\"\"\n # start threads in MSManager which should call this method\n # NOTE:\n # Here we should make the whole logic - like:\n # * Calling the system to fetch the workflows from;\n # * Creating the workflow objects;\n # * Pushing them into the back end database system we choose for bookkeeping\n # * Updating their status in that system, both MsStatus (subscribed,\n # processing, etc.) and also the Reqmgr status\n # * Associate and keep track of the requestID/subscriptionID/ruleID\n # returned by the Data Management System and the workflow\n # object (through the bookkeeping machinery we choose/develop)\n summary = dict(OUTPUT_REPORT)\n\n # We should decide here what variable name to use request vs. workflow\n try:\n requestRecords = self.getRequestRecords(reqStatus)\n self.updateReportDict(summary, \"total_num_requests\", len(requestRecords))\n msg = \" retrieved %s requests. \" % len(requestRecords)\n msg += \"Service set to process up to %s requests per cycle.\" % self.msConfig[\"limitRequestsPerCycle\"]\n self.logger.info(msg)\n except Exception as err: # general error\n msg = \"Unknown exception while fetching requests from ReqMgr2. Error: %s\", str(err)\n self.logger.exception(msg)\n self.updateReportDict(summary, \"error\", msg)\n\n try:\n self.updateCaches()\n except RuntimeWarning as ex:\n msg = \"All retries exhausted! Last error was: '%s'\" % str(ex)\n msg += \"\\nRetrying to update caches again in the next cycle.\"\n self.logger.error(msg)\n self.updateReportDict(summary, \"error\", msg)\n return summary\n except Exception as ex:\n msg = \"Unknown exception updating caches. Error: %s\" % str(ex)\n self.logger.exception(msg)\n self.updateReportDict(summary, \"error\", msg)\n return summary\n\n # this one is put here just for example.\n self.updateReportDict(summary, \"ddm_request_id\", 42)\n return summary\n\n def makeSubscriptions(self, workflows=[]):\n \"\"\"\n The common function to make the final subscriptions. It depends on the\n default Data Management System configured through msConfig. Based on that\n The relevant service wrapper is called.\n :return: A list of results from the REST interface of the DMS in question\n \"\"\"\n\n # NOTE:\n # Here is just an example construction of the function. None of the\n # data structures used to visualise it is correct. To Be Updated\n results = []\n if self.msConfig['defaultDataManSys'] == 'DDM':\n # TODO: Here to put the dryrun mode: True/False\n ddm = DDM(\n url=self.msConfig['ddmUrl'],\n logger=self.logger,\n enableDataPlacement=self.msConfig['enableDataPlacement'])\n\n ddmReqList = []\n for workflow in workflows:\n for output in workflow['output']:\n ddmReqList.append(DDMReqTemplate('copy', item=output))\n\n if self.msConfig['enableAggSubscr']:\n results = ddm.makeAggRequests(ddmReqList, aggKey='item')\n else:\n for ddmReq in ddmReqList:\n results.append(ddm.makeRequests(ddmReqList, aggKey='item'))\n\n elif self.msConfig['defaultDataManSys'] == 'PhEDEx':\n pass\n\n elif self.msConfig['defaultDataManSys'] == 'Rucio':\n pass\n\n return results\n\n def getRequestRecords(self, reqStatus):\n \"\"\"\n Queries ReqMgr2 for requests in a given status.\n NOTE: to be taken from MSTransferor with minor changes\n \"\"\"\n\n # NOTE:\n # If we are about to use an additional database for book keeping like\n # MongoDb, we can fetch up to 'limitRequestsPerCycle' and keep track\n # their status.\n\n # The following is taken from MSMonitor, just for an example.\n # get requests from ReqMgr2 data-service for given status\n # here with detail=False we get back list of records\n requests = self.reqmgr2.getRequestByStatus([reqStatus], detail=False)\n self.logger.info(' retrieved %s requests in status: %s', len(requests), reqStatus)\n\n return requests\n\n def _pushToMongo(self, reqStatus):\n \"\"\"\n An auxiliary function to push documents with workflow/request\n representation into mongoDB\n \"\"\"\n pass\n","sub_path":"src/python/WMCore/MicroService/Unified/MSOutput.py","file_name":"MSOutput.py","file_ext":"py","file_size_in_byte":6791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"47535570","text":"#!/usr/bin/env python\n\n'''Plot bivariate graph and spearman coefficent (climate diff bet months)'''\n\n__author__ = 'Jia Le Lim'\n__version__ = '0.0.5'\n\nimport csv\nimport operator\nimport decimal\nimport numpy as np\nimport scipy as sp\nimport matplotlib.pyplot as pl\nimport matplotlib.lines as mlines\nimport calendar\nimport sys\nimport os.path\nfrom math import log\nfrom scipy import stats\nfrom matplotlib import rc\n\nh = open('../../data/rearranged/new/AllTurnoverCorrectedNewCerrado.csv','rb')\ndata = csv.reader(h)\n\n\n########## Inputting data into lists ##########\n\n# copy and paste all headers in data twice\n[years, months, bints, beeturnovers, plantturnovers, specturnovers, osturnovers, stturnovers, \\\navgprecips, avgtemps, avgmaxtemps, avgtempranges, avghumids, \\\ndiffprecips, difftemps, diffmaxtemps, difftempranges, diffhumids] = ([] for i in range(len(next(data))))\n\nheaders2 = [years, months, bints, beeturnovers, plantturnovers, specturnovers, osturnovers, stturnovers, \\\navgprecips, avgtemps, avgmaxtemps, avgtempranges, avghumids, \\\ndiffprecips, difftemps, diffmaxtemps, difftempranges, diffhumids]\n\nfor column in data:\n for j, i in enumerate(headers2):\n i.append(column[j])\n\nh.close()\n\n\n########## Bivariates between turnover data ##########\n\ndef separateintoseasons(months, values, drymonths, wetmonths):\n 'Separate data into dry and wet seasons'\n dryvalues = []\n wetvalues = []\n for i in range(len(values)):\n if months[i] in drymonths:\n dryvalues.append(values[i])\n if months[i] in wetmonths:\n wetvalues.append(values[i])\n return dryvalues, wetvalues\n\ndef plotturnovers(a, b, drya, dryb, weta, wetb, xlabel_str, ylabel_str):\n 'Calculate correlation and plot bivariate plot'\n \n # Calculate Spearman Correlation Coefficient\n dryr, dryp = stats.spearmanr(drya, dryb)\n wetr, wetp = stats.spearmanr(weta, wetb)\n allr, allp = stats.spearmanr(a, b)\n \n # plot\n pl.plot(drya, dryb, 'ro', label = 'Dry Season')\n pl.plot(weta, wetb, 'bo', label = 'Wet Season')\n \n # plot dimensions\n axes = pl.gca()\n ymin, ymax = axes.get_ylim()\n xmin, xmax = axes.get_xlim()\n pl.axis([ xmin - 0.01, xmax + 0.01, ymin - 0.01, ymax + 0.01])\n\n # labels\n pl.xlabel(xlabel_str, size=16)\n pl.ylabel(ylabel_str, size=16)\n pl.title('Climate in Cerrado (2008-2009)', size=16)\n\n # correlation text\n rc('text', usetex=True)\n pl.text(xmax - xmax/4 - 1, ymin, r' \\underline{Spearman`s coefficient}' + '\\n Dry Season: ' + str(round(dryr, 5)) + \\\n '\\n Wet Season: ' + str(round(wetr, 5)) + '\\n All: ' + str(round(allr, 5)) , size = 12)\n\n # legend\n legend = pl.legend(loc='best', frameon=True, numpoints=1)\n light_grey = np.array([float(248)/float(255)]*3)\n legend.get_frame().set_linewidth(0.0)\n legend.get_frame().set_color(light_grey)\n\n # remove borders\n pl.gca().spines['top'].set_visible(False)\n pl.gca().spines['right'].set_visible(False)\n pl.gca().xaxis.set_ticks_position('bottom')\n pl.gca().yaxis.set_ticks_position('left')\n\n # grid\n pl.grid(True)\n\n # save plot and show\n plotname = xlabel_str + '-' + ylabel_str\n plotpath = '../../results/CorrectedNewCerrado/Bivariateplots/monthlydiff/' + plotname + '.pdf'\n pl.savefig(plotpath)\n pl.close()\n \n # data to be printed\n return dryr, dryp, wetr, wetp, allr, allp\n\ndef writenewdata(filename_str, headers, values):\n 'inputting new data into csv file'\n headers = headers.split(', ')\n pathname = '../../results/CorrectedNewCerrado/Bivariateplots/monthlydiff/' + filename_str + '.csv'\n g = open(pathname, 'wb')\n\n csvwrite = csv.writer(g)\n csvwrite.writerow(headers)\n tobewritten = zip(*values)\n for row in tobewritten:\n csvwrite.writerow(row)\n g.close()\n\n# Plotting starts here\n# diffprecips, difftemps, diffmaxtemps, difftempranges, diffhumids\n\n# separate into seasons\ndrymonths = ['4', '5', '6', '7', '8', '9']\nwetmonths = ['10', '11', '12', '1', '2', '3']\n[drydiffprecips, wetdiffprecips] = separateintoseasons(months, diffprecips, drymonths, wetmonths)\n[drydifftemps, wetdifftemps] = separateintoseasons(months, difftemps, drymonths, wetmonths)\n[drydiffmaxtemps, wetdiffmaxtemps] = separateintoseasons(months, diffmaxtemps, drymonths, wetmonths)\n[drydifftempranges, wetdifftempranges] = separateintoseasons(months, difftempranges, drymonths, wetmonths)\n[drydiffhumids, wetdiffhumids] = separateintoseasons(months, diffhumids, drymonths, wetmonths)\n\n# turnoverset for y, turnoverxset for x needed to be plotted against y\nturnoverset = [diffprecips, difftemps, diffmaxtemps, difftempranges, diffhumids]\nturnoverxset = [[difftemps, diffmaxtemps, difftempranges, diffhumids], \\\n[diffmaxtemps, difftempranges, diffhumids], \\\n[difftempranges, diffhumids], \\\n[], []]\nwetturnoverset = [wetdiffprecips, wetdifftemps, wetdiffmaxtemps, wetdifftempranges, wetdiffhumids]\ndryturnoverset = [drydiffprecips, drydifftemps, drydiffmaxtemps, drydifftempranges, drydiffhumids]\nturnoversetlabel = ['PrecipsDiff', 'TempsDiff', 'MaxTempsDiff', 'TempRangesDiff', 'HumidsDiff']\n\n# for each y value find the x values needed to be plotted against, find x index in dry and wet and label sets\n# append the appropriate labels and values for exporting correlation values\n\n[xlabels, ylabels, dryrs, dryps, wetrs, wetps, allrs, allps] = ([] for i in range(8))\n\nfor i in range(len(turnoverset)):\n for xturnover in turnoverxset[i]:\n for turnover in turnoverset:\n if xturnover == turnover:\n h = turnoverset.index(turnover)\n\n dryr, dryp, wetr, wetp, allr, allp = plotturnovers(xturnover, turnoverset[i], \\\n dryturnoverset[h], dryturnoverset[i], wetturnoverset[h], wetturnoverset[i], turnoversetlabel[h], turnoversetlabel[i])\n\n xlabels.append(turnoversetlabel[h])\n ylabels.append(turnoversetlabel[i])\n dryrs.append(round(dryr, 5))\n dryps.append(round(dryp, 5))\n wetrs.append(round(wetr, 5))\n wetps.append(round(wetp, 5))\n allrs.append(round(allr, 5))\n allps.append(round(allp, 5))\n\n# output data\n\nnewheaders = 'x-axis, y-axis, DryCoefficient, DryP-value, WetCoefficient, WetP-value, AllCoefficient, AllP-value'\nnewvalues = [xlabels, ylabels, dryrs, dryps, wetrs, wetps, allrs, allps]\nwritenewdata('DiffClimateCoefficients(New)', newheaders, newvalues)\n","sub_path":"code/figuresCode/bivariatemonthdiff.py","file_name":"bivariatemonthdiff.py","file_ext":"py","file_size_in_byte":6161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"459959877","text":"from random import randint\n\ndef alphabet_position(text):\n temp = []\n for i in range(len(text)):\n if text[i].isupper():\n temp.append(ord(text[i])-64)\n elif text[i].islower():\n temp.append(ord(text[i])-96)\n return ' '.join(map(str, temp))\n\n\ndef alpha_refactored(text):\n return ' '.join(str(ord(c) - 96) for c in text.lower() if c.isalpha())\n\ndef main():\n a = alphabet_position(\"The sunset sets at twelve o' clock.\")\n b = \"20 8 5 19 21 14 19 5 20 19 5 20 19 1 20 20 23 5 12 22 5 15 3 12 15 3 11\"\n print(a == b)\n print(alphabet_position(\"Bloop bip bingo boinger.\"), \"\\n2 12 15 15 16 2 9 16 2 9 14 7 15 2 15 9 14 7 5 18 \")\n\n number_test = \"\"\n for item in range(10):\n number_test += str(randint(1, 9))\n print(alphabet_position(number_test), \"\")\n\nmain()","sub_path":"ASCIIindex.py","file_name":"ASCIIindex.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"112982917","text":"# -*- coding: utf-8 -*-\nfrom openprocurement.api.utils import opresource\nfrom openprocurement.tender.openua.views.award_complaint import TenderUaAwardComplaintResource\n\n\n@opresource(name='Tender Two Stage Award Complaints',\n collection_path='/tenders/{tender_id}/awards/{award_id}/complaints',\n path='/tenders/{tender_id}/awards/{award_id}/complaints/{complaint_id}',\n procurementMethodType='aboveThresholdTS',\n description=\"Tender TS award complaints\")\nclass TenderTSAwardComplaintResource(TenderUaAwardComplaintResource):\n\n def complaints_len(self, tender):\n return sum([len(i.complaints) for i in tender.awards], sum([len(i.complaints) for i in tender.qualifications], len(tender.complaints)))\n","sub_path":"openprocurement/tender/twostage/views/award_complaint.py","file_name":"award_complaint.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"384349871","text":"# While loop\n\n# Ch 45\ntotal = 0\nwhile total <= 50:\n num = int(input(\"Enter a number: \"))\n total += num\n print(\"The current total is: \", total)\nprint(\"The total is over 50.\")\n\n# Ch 46\nvalue = 0\nwhile value < 5:\n value += 1\n num = input(\"Enter a number: \")\nprint(\"The last number you've entered was a\", num)\n\n# Ch 47\nnum1 = int(input(\"Enter your first number : \"))\nnum2 = int(input(\"Enter your second number: \"))\nanswer = num1 + num2\nchoice = \"y\"\nwhile choice != \"n\":\n choice = input(\"Do you want to enter another number? (y/n): \")\n if choice == \"y\":\n newNum = int(input(\"Enter another number: \"))\n answer += newNum\n else:\n input(\"Press to see total.\")\nprint(\"The total is\", answer)\n\n# Ch 48\ncount = 0\nagain = \"y\"\nwhile again == \"y\":\n name = input(\"Enter the name you want to invite: \")\n print(name, \"has now been invited.\")\n count += 1\n again = input(print(\"Do you want to invite someboody else? (y/n): \"))\nprint(\"Total of\", count, \"has been invited.\")\n\n# Ch 49\ncompNum = 50\nnum = 0\ncount = 0\nwhile compNum != num:\n num = int(input(\"Enter a number: \"))\n count += 1\n if compNum > num:\n print(\"Number is too low!\")\n elif compNum < num:\n print(\"Number is too high!\")\nprint(\"\\nWell done, you took\", count, \"attempts.\")\n\n# Ch 50\nnum = int(input(\"Enter a number: \"))\nwhile num < 10 or num > 20:\n if num < 10:\n print(\"Too low!\")\n elif num > 20:\n print(\"Too high!\")\n num = int(input(\"Try again!: \"))\nprint(\"Thank you!\")\n\n# Ch 51\nnum = 10\nwhile num > 0:\n print(\"There are\", num, \"green bottles hanging on the wall.\")\n print(num, \"green bottles hanging on the wall.\")\n print(\"And if 1 green bottle should accidentally fall,\")\n num -= 1\n answer = int(input(\"How many green bottles will be hanging on the wall? \"))\n if answer == num:\n print(\"There will be\", num, \"green bottles hanging on the wall.\")\n else:\n while answer != num:\n answer = int(input(\"No, try again!\"))\nprint(\"There are no more green bottles hanging on the wall.\")\n\n\n","sub_path":"Ch# 45-51.py","file_name":"Ch# 45-51.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"179491740","text":"import requests\nimport csv\nimport json\n\nAPI_KEY = \"2be95b32489bf42da7ff5af042b72247\"\nURL_PREFIX = \"http://ws.audioscrobbler.com/2.0/\"\nREQUEST_METHOD = \"?method=chart.gettopartists\"\nREQUEST_API = \"&api_key=\"+API_KEY\nREQUEST_FORMAT = \"&format=json\"\nREQUEST_LIMIT = \"&limit=1000\"\nOUTPUT_FILE = 'topArtists.json'\ndef createRequestURI():\n return (URL_PREFIX + REQUEST_METHOD+REQUEST_API+\n REQUEST_LIMIT+ REQUEST_FORMAT)\ndef main(): \n try:\n topArtists = (requests.get(createRequestURI())).json()\n with open(OUTPUT_FILE, 'w') as outfile:\n json.dump(topArtists, outfile)\n \n except requests.exceptions.RequestException:\n print(\"ERROR\")\n\nmain()\n","sub_path":"CSV/artists/getDataFromLastFM.py","file_name":"getDataFromLastFM.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"627825620","text":"from selenium import webdriver\nimport pandas as pd\nimport filepath\n\npath = filepath.getChromeDriverPath()\ndriver = webdriver.Chrome(path)\ndriver.get(\n \"https://news.naver.com/main/read.nhn?mode=LSD&mid=shm&sid1=100&oid=014&aid=0004332161\"\n)\n\narticle_title_element = driver.find_element_by_id(\"articleTitle\")\narticle_body_element = driver.find_element_by_id(\"articleBodyContents\")\narticle_date_elements = driver.find_elements_by_class_name(\"t11\") # 리스트가 반환됨\n(article_input_date_element, article_last_modified_date_element) = article_date_elements\n\ndataframe = pd.DataFrame(\n {\n \"title\": [article_title_element.text],\n \"body\": [article_body_element.text],\n \"input_date\": [article_input_date_element.text],\n \"last_modified_date\": [article_last_modified_date_element.text],\n }\n)\n\ncsv_save_path = filepath.getPath(\"../../csv/article_1.csv\")\ndataframe.to_csv(csv_save_path, mode=\"w\", index=False)\n\ndriver.close()\n\n","sub_path":"lecture-3/02.pandas/01.pandas-save.py","file_name":"01.pandas-save.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"208199219","text":"from typing import List\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\n\ndef conv3x3_bn_relu(in_planes, out_planes, stride=1):\n \"\"\"\n 3x3 convolution + BN + relu\n \"\"\"\n return nn.Sequential(\n nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False),\n nn.BatchNorm2d(out_planes),\n nn.ReLU(inplace=True),\n )\n\n\nclass UPerNet(nn.Module):\n def __init__(self, output_filters: List[int], num_classes=150, pool_scales=(1, 2, 3, 6), fpn_dim=256):\n super(UPerNet, self).__init__()\n\n last_fm_dim = output_filters[-1]\n\n # PPM Module\n self.ppm_pooling = []\n self.ppm_conv = []\n\n for scale in pool_scales:\n self.ppm_pooling.append(nn.AdaptiveAvgPool2d(scale))\n self.ppm_conv.append(\n nn.Sequential(\n nn.Conv2d(last_fm_dim, 512, kernel_size=1, bias=False), nn.BatchNorm2d(512), nn.ReLU(inplace=True)\n )\n )\n self.ppm_pooling = nn.ModuleList(self.ppm_pooling)\n self.ppm_conv = nn.ModuleList(self.ppm_conv)\n self.ppm_last_conv = conv3x3_bn_relu(last_fm_dim + len(pool_scales) * 512, fpn_dim, 1)\n\n # FPN Module\n self.fpn_in = []\n for fpn_inplane in output_filters[:-1]: # skip the top layer\n self.fpn_in.append(\n nn.Sequential(\n nn.Conv2d(fpn_inplane, fpn_dim, kernel_size=1, bias=False),\n nn.BatchNorm2d(fpn_dim),\n nn.ReLU(inplace=True),\n )\n )\n self.fpn_in = nn.ModuleList(self.fpn_in)\n\n self.fpn_out = []\n for i in range(len(output_filters) - 1): # skip the top layer\n self.fpn_out.append(nn.Sequential(conv3x3_bn_relu(fpn_dim, fpn_dim, 1)))\n self.fpn_out = nn.ModuleList(self.fpn_out)\n\n self.conv_last = nn.Sequential(\n conv3x3_bn_relu(len(output_filters) * fpn_dim, fpn_dim, 1), nn.Conv2d(fpn_dim, num_classes, kernel_size=1)\n )\n\n def forward(self, feature_maps):\n last_fm = feature_maps[-1]\n\n input_size = last_fm.size()\n ppm_out = [last_fm]\n for pool_scale, pool_conv in zip(self.ppm_pooling, self.ppm_conv):\n ppm_out.append(\n pool_conv(\n F.interpolate(\n pool_scale(last_fm), (input_size[2], input_size[3]), mode=\"bilinear\", align_corners=False\n )\n )\n )\n ppm_out = torch.cat(ppm_out, 1)\n f = self.ppm_last_conv(ppm_out)\n\n fpn_feature_list = [f]\n for i in reversed(range(len(feature_maps) - 1)):\n conv_x = feature_maps[i]\n conv_x = self.fpn_in[i](conv_x) # lateral branch\n\n f = F.interpolate(f, size=conv_x.size()[2:], mode=\"bilinear\", align_corners=False) # top-down branch\n f = conv_x + f\n\n fpn_feature_list.append(self.fpn_out[i](f))\n\n fpn_feature_list.reverse() # [P2 - P5]\n output_size = fpn_feature_list[0].size()[2:]\n fusion_list = [fpn_feature_list[0]]\n for i in range(1, len(fpn_feature_list)):\n fusion_list.append(F.interpolate(fpn_feature_list[i], output_size, mode=\"bilinear\", align_corners=False))\n\n fusion_out = torch.cat(fusion_list, 1)\n x = self.conv_last(fusion_out)\n return x\n","sub_path":"pytorch_toolbelt/modules/decoders/upernet.py","file_name":"upernet.py","file_ext":"py","file_size_in_byte":3397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"369299377","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/toil_lib/test/test_programs.py\n# Compiled at: 2017-05-10 17:49:05\nimport os\n\ndef test_docker_call(tmpdir):\n from toil_lib.programs import docker_call\n work_dir = str(tmpdir)\n parameter = ['--help']\n tool = 'quay.io/ucsc_cgl/samtools'\n docker_call(work_dir=work_dir, parameters=parameter, tool=tool)\n fpath = os.path.join(work_dir, 'test')\n with open(fpath, 'w') as (f):\n docker_call(tool='ubuntu', env=dict(foo='bar'), parameters=['printenv', 'foo'], outfile=f)\n assert open(fpath).read() == 'bar\\n'","sub_path":"pycfiles/toil_lib-1.1.8-py2.7/test_programs.py","file_name":"test_programs.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"405383181","text":"import pandas as pd\nimport numpy as np\nimport xarray as xr\nimport ipdb\nimport matplotlib.pyplot as plt\nimport cartopy\nfrom utils import u_plot as up\nimport cartopy.crs as ccrs\nimport os\nimport matplotlib as mpl\nfrom utils import constants as cnst\nfrom scipy.ndimage.measurements import label\n\n\n\ndef monthly(years):\n\n for y in years:\n y = str(y)\n da = xr.open_dataset(cnst+'gridsat_WA_' + y + '.nc')\n da = da['t']\n da = da.where(da <= -80)\n\n month = da.groupby('time.month').mean(dim='time')\n\n simple = month.plot(x='lon', y='lat', col='month', col_wrap=3, cmap='viridis_r', transform=ccrs.PlateCarree(),\n subplot_kws={'projection': ccrs.PlateCarree()}, vmin=-85)\n\n for ax in simple.axes.flat:\n ax.coastlines()\n ax.gridlines()\n ax.set_extent([-17.5, 30, -6, 20])\n ax.set_aspect('equal', 'box-forced')\n\n plt.suptitle(y)\n plt.savefig('/users/global/cornkle/figs/CLOVER/GRIDSAT_cold_clouds/tests/monthly_'+y+'.png')\n plt.close('all')\n\n\n\ndef climatology_month():\n years = np.arange(2005,2016)#2017)\n\n msg_folder = cnst\n fname='aggs/gridsat_WA_cold_climatology_mean.nc'\n\n if not os.path.isfile(msg_folder + fname):\n da = xr.open_dataset(cnst+'gridsat_WA_' + str(2004) + '.nc')\n da = da['t']\n da = da.where(da <= -60)\n\n month = da.groupby('time.month').mean(dim='time')\n for y in years:\n y = str(y)\n da = xr.open_dataset(cnst+'gridsat_WA_' + y + '.nc')\n da = da['t']\n da = da.where(da <= -60)\n\n month = month + da.groupby('time.month').mean(dim='time')\n\n month = month / (len(years)+1)\n\n enc = {'t': {'complevel': 5, 'zlib': True}}\n month.to_netcdf(msg_folder + fname, encoding=enc)\n\n\n else:\n ds = xr.open_dataset(msg_folder + fname)\n month = ds['t']\n month.values[month.values==0]=np.nan\n\n simple = month.plot(x='lon', y='lat', col='month', col_wrap=4, cmap='inferno', transform=ccrs.PlateCarree(),\n subplot_kws={'projection': ccrs.PlateCarree()}, vmax=-65, vmin=-75, levels = 6)\n\n for ax in simple.axes.flat:\n ax.coastlines()\n ax.gridlines()\n ax.set_extent([-17.5, 30, -6, 20])\n ax.set_aspect('equal', 'box-forced')\n\n\n plt.savefig('/users/global/cornkle/figs/CLOVER/GRIDSAT_cold_clouds/tests/mean_t.png', dpi=300)\n\n\ndef month():\n y1 = 1982\n y2 =2017#2017\n years = list(range(1983,1985)) #+ list(range(2004,2014))\n\n msg_folder = cnst.GRIDSAT\n fname='aggs/gridsat_WA_-70_monthly.nc'\n\n if not os.path.isfile(msg_folder + fname):\n da = None\n for y in years:\n y = str(y)\n da1 = xr.open_dataset(cnst.GRIDSAT+'gridsat_WA_' + y + '.nc')\n print('Doing '+y)\n da1['tir'] = da1['tir'].where(da1['tir'] <= -70)\n\n da1 = da1.resample(time='m').mean('time')\n try:\n da = xr.concat([da, da1], 'time')\n except TypeError:\n da = da1.copy()\n\n\n enc = {'tir': {'complevel': 5, 'zlib': True}}\n da.to_netcdf(msg_folder + fname, encoding=enc)\n\n\n else:\n ds = xr.open_dataset(msg_folder + fname)\n da = ds['tir']\n da.values[da.values==0]=np.nan\n da.sel(lat=slice(11, 20))\n mean = da['tir'].mean(dim=['lat', 'lon'])\n\n mean.plot()\n\n plt.savefig('/users/global/cornkle/figs/CLOVER/GRIDSAT_cold_clouds/tests/trend_mcs.png', dpi=300)\n\n\ndef month_count():\n y1 = 1982\n y2 = 2017 # 2017\n years = list(range(1983, 2003)) + list(range(2004,2014))\n\n msg_folder = cnst.GRIDSAT\n fname = 'aggs/gridsat_WA_-70_monthly_count.nc'\n\n if not os.path.isfile(msg_folder + fname):\n da = None\n for y in years:\n y = str(y)\n da1 = xr.open_dataset(cnst.GRIDSAT + 'gridsat_WA_' + y + '.nc')\n print('Doing ' + y)\n da1['tir'] = da1['tir'].where((da1['tir'] <= -70) & (da1['tir'] >= -108) )\n da1['tir'].values[da1['tir'].values < -70] = 1\n\n da1 = da1.resample(time='m').sum('time')\n try:\n da = xr.concat([da, da1], 'time')\n except TypeError:\n da = da1.copy()\n\n enc = {'tir': {'complevel': 5, 'zlib': True}}\n da.to_netcdf(msg_folder + fname, encoding=enc)\n\n\n else:\n ds = xr.open_dataset(msg_folder + fname)\n da = ds['tir']\n pdb.set_trace()\n da.values[da.values == 0] = np.nan\n da.sel(lat=slice(11, 20))\n mean = da['t'].mean(dim=['lat', 'lon'])\n\n mean.plot()\n\n plt.savefig('/users/global/cornkle/figs/CLOVER/GRIDSAT_cold_clouds/tests/trend_mcs.png', dpi=300)\n\n\ndef hourly_count():\n y1 = 1982\n y2 = 1984 # 2017\n years = np.arange(y1 + 1, y2) # 2017)\n\n msg_folder = '/users/global/cornkle/data/OBS/gridsat/gridsat_netcdf/yearly_files/'\n fname = 'gridsat_WA_-70_hourly_count.nc'\n\n if not os.path.isfile(msg_folder + fname):\n da = xr.open_dataset(msg_folder+'gridsat_WA_' + str(y1) + '.nc')\n\n da['t'] = da['t'].where(da['t'] <= -70)\n da['t'].values[da['t'].values <= -70] = 1\n da = da['t']\n\n da = da[(da['time.month']>=6) & (da['time.month']<=9)]\n\n da= da.groupby('time.hour').sum(dim='time.hour')\n\n for y in years:\n y = str(y)\n da1 = xr.open_dataset('/users/global/cornkle/data/OBS/gridsat/gridsat_netcdf/gridsat_WA_hours_' + y + '.nc')\n print('Doing ' + y)\n da1['t'] = da1['t'].where(da['t'] <= -70)\n da1['t'].values[da1['t'].values <= -70] = 1\n\n da1 = da1[(da1['time.month'] >= 6) & (da1['time.month'] <= 9)]\n\n da1 = da1.groupby('time.hour').sum(dim='time.hour')\n\n pdb.set_trace()\n\n da = xr.concat([da, da1], 'time')\n da1.close()\n\n enc = {'t': {'complevel': 5, 'zlib': True}}\n da.to_netcdf(msg_folder + fname, encoding=enc)\n\n\n else:\n ds = xr.open_dataset(msg_folder + fname)\n da = ds['t']\n da.values[da.values == 0] = np.nan\n da.sel(lat=slice(11, 20))\n mean = da['t'].mean(dim=['lat', 'lon'])\n\n mean.plot()\n\n plt.savefig('/users/global/cornkle/VERA/plots/leeds_june_2017/trend_mcs.png', dpi=300)\n\n\ndef timeline_trend_count():\n msg_folder = cnst.GRIDSAT\n fname = 'aggs/gridsat_WA_-70_monthly_count_-40base_1000km2.nc'\n\n da = xr.open_dataarray(msg_folder + fname)\n da = da.sel(lat=slice(4.5,8), lon=slice(-10, 15))\n #da=da.sel(lat=slice(5,10))\n #da[da==0]=np.nan\n mean = da.mean(dim=['lat', 'lon'])\n #mean = mean[(mean['time.month']==8)]\n f= plt.figure(figsize=(10,6))\n for i in range(3,6):\n bla = mean[(mean['time.month'] == i)]\n bla.plot(label=str(i), marker='o')\n plt.title('Average number of pixels <= -70C, 4.5-8N')\n plt.legend()\n #plt.ylim(0,3)\n # plt.ylim(-76,-72)\n\n\ndef timeline_trend_count_SA():\n msg_folder = cnst.GRIDSAT\n fname = 'aggs/gridsat_WA_-65_monthly_count_-40base_1000km2.nc'\n fname2 = 'aggs/gridsat_WA_-40_monthly_count_-40base_1000km2.nc'\n\n\n da = xr.open_dataarray(msg_folder + fname)\n da2 = xr.open_dataarray(msg_folder + fname2)\n #[25,33,-28,-10] , West[15,25,-26,-18]\n da = da.sel(lat=slice(-25,-18), lon=slice(18, 22))# (lat=slice(-28,-10), lon=slice(25, 33))\n da2 = da2.sel(lat=slice(-25,-18), lon=slice(18, 22)) #[25,33,-28,-10]\n #da=da.sel(lat=slice(5,10))\n #da[da==0]=np.nan\n mean = da.mean(dim=['lat', 'lon'])\n mean2 = da2.mean(dim=['lat', 'lon'])\n #mean = mean[(mean['time.month']==8)]\n f= plt.figure(figsize=(10,6))\n for i in [12,1]:\n bla = mean[(mean['time.month'] == i)]\n bla.plot(label=str(i), marker='o')\n plt.title('Average number of pixels <= -70C, SouthA 10-28S, 25-35E')\n f = plt.figure(figsize=(10, 6))\n for i in [12,1]:\n bla2 = mean2[(mean2['time.month'] == i)]\n bla2.plot(label=str(i), marker='o')\n plt.title('Average number of pixels <= -40C, SouthA 10-28S, 25-35E')\n\n\n plt.legend()\n\n\ndef timeline_trend_mean():\n msg_folder = '/users/global/cornkle/data/OBS/gridsat/gridsat_netcdf/'\n fname = 'gridsat_WA_-70_monthly.nc'\n\n da = xr.open_dataarray(msg_folder + fname)\n da=da.sel(lat=slice(5,7), lon=slice(-17,20))\n da[da==0]=np.nan\n mean = da.mean(dim=['lat', 'lon'])\n #mean = mean[(mean['time.month']==8)]\n f= plt.figure(figsize=(10,6))\n for i in range(4,6):\n bla = mean[(mean['time.month'] == i)]\n bla.plot(label=str(i), marker='o')\n plt.title('Monthly mean temperature of pixels <= -40C, 11-18N')\n plt.legend()\n plt.ylim(-78,-71)\n\n\ndef trend_map():\n\n def linear_trend(x):\n pf = np.polyfit(np.arange(len(x)), x, 1)\n # we need to return a dataarray or else xarray's groupby won't be happy\n return xr.DataArray(pf[0])\n\n msg_folder = '/users/global/cornkle/data/OBS/gridsat/gridsat_netcdf/'\n fpath = '/users/global/cornkle/figs/gap_filling_Tgrad/gridsat/'\n\n fname = 'gridsat_WA_-70_monthly_count.nc'\n\n dam = xr.open_dataarray(msg_folder + fname)\n dam = dam.sel(lon=slice(-18,51), lat=slice(-37, 36))\n lons = dam.lon\n lats = dam.lat\n months = np.arange(1, 13)\n #da[da == 0] = np.nan\n for m in months:\n da = dam[(dam['time.month']==m) & (dam['time.year']>1982)]\n da = da.groupby('time.day').sum(axis=0)\n da = da.groupby('time.year').mean(axis=0) # average daily frequency per year\n # stack lat and lon into a single dimension called allpoints\n stacked = da.stack(allpoints=['lat', 'lon'])\n # apply the function over allpoints to calculate the trend at each point\n\n trend = stacked.groupby('allpoints').apply(linear_trend)\n # unstack back to lat lon coordinates\n trend_unstacked = trend.unstack('allpoints')\n\n trend_unstacked = trend_unstacked * 10.\n da2 = xr.DataArray(trend_unstacked, coords=[lats, lons], dims=['latitude', 'longitude'])\n\n fp = fpath + 'ttrend_' + str(m).zfill(2) + '.png'\n\n up.quick_map_salem(da2, vmin=-0.4, vmax=0.4, cmap='RdBu_r', save=fp)\n\n\n\ndef t_ratio():\n\n msg_folder = cnst.local_data + 'GRIDSAT/MCS18/aggs/'\n fname = 'gridsat_WA_-70_monthly_count_-40base_1000km2.nc'\n da70 = xr.open_dataarray(msg_folder + fname)\n fname = 'gridsat_WA_-50_monthly_count_-40base_1000km2.nc'\n da40 = xr.open_dataarray(msg_folder + fname)\n\n da70.values[da70.values == 0] = np.nan\n da40.values[da40.values == 0] = np.nan\n\n #ratio = da70/da40\n\n msg40 = da40[(da40['time.year'] >= 2007) & (da40['time.year'] <= 2017)]\n msg70 = da70[(da70['time.year'] >= 2007) & (da70['time.year'] <= 2017)]\n\n mfg40 = da40[(da40['time.year'] >= 1984) & (da40['time.year'] <= 2000)]\n mfg70 = da70[(da70['time.year'] >= 1984) & (da70['time.year'] <= 2000)]\n\n # f = plt.figure(figsize=(10, 6))\n # ax = f.add_subplot(111)\n\n msg40 = msg40.groupby('time.month').sum(dim='time')#('time.season').sum(dim='time')\n msg70 = msg70.groupby('time.month').sum(dim='time')\n\n mfg40 = mfg40.groupby('time.month').sum(dim='time')\n mfg70 = mfg70.groupby('time.month').sum(dim='time')\n\n msg_ratio = msg70/msg40*100\n mfg_ratio = mfg70 /mfg40*100\n\n # msg_ratio.values[np.isinf(msg_ratio).values] = np.nan\n # mfg_ratio.values[np.isinf(mfg_ratio).values] = np.nan\n\n simple = msg40.plot(x='lon', y='lat', col='month', col_wrap=3, cmap='viridis', transform=ccrs.PlateCarree(),\n subplot_kws={'projection': ccrs.PlateCarree()}, vmin=5, vmax=50)\n\n for ax in simple.axes.flat:\n ax.coastlines()\n ax.gridlines()\n ax.set_extent([-17.5, 55, -35, -5])\n ax.set_aspect('equal', 'box-forced')\n\n\n # simple = msg_ratio.plot(x='lon', y='lat', col='month', col_wrap=3, cmap='viridis', transform=ccrs.PlateCarree(),\n # subplot_kws={'projection': ccrs.PlateCarree()}, vmax=70)\n #\n # for ax in simple.axes.flat:\n # ax.coastlines()\n # ax.gridlines()\n # ax.set_extent([-17.5, 55, -35, -5])\n # ax.set_aspect('equal', 'box-forced')\n # #ax.set_title('MSG')\n #\n # simple = mfg_ratio.plot(x='lon', y='lat', col='month', col_wrap=3, cmap='viridis', transform=ccrs.PlateCarree(),\n # subplot_kws={'projection': ccrs.PlateCarree()}, vmax=70)\n #\n # for ax in simple.axes.flat:\n # ax.coastlines()\n # ax.gridlines()\n # ax.set_extent([-17.5, 55, -35, -5])\n # ax.set_aspect('equal', 'box-forced')\n # #ax.set_title('MFG')\n\n ratio = (msg_ratio-mfg_ratio)\n\n\n simple = ratio.plot(x='lon', y='lat', col='month', col_wrap=3, cmap='RdBu', transform=ccrs.PlateCarree(),\n subplot_kws={'projection': ccrs.PlateCarree()}, levels=[-25,-15,-10, -5, 5, 10,15, 25])\n\n for ax in simple.axes.flat:\n ax.coastlines()\n ax.gridlines()\n ax.set_extent([-17.5, 55, -35, -5])\n ax.set_aspect('equal', 'box-forced')\n\n\n\ndef size_trend():\n\n msg_folder = '/users/global/cornkle/data/OBS/gridsat/gridsat_netcdf/yearly_files/'\n data = xr.open_mfdataset(msg_folder + 'gridsat*.nc')\n\n cut = data.sel(lat=slice(10,17), lon=slice(-17,-10))\n cut = cut.isel(time= ((cut['time.year']>1984) & (cut['time.month']==8)))\n cut=cut['t']\n\n dic= {}\n for p in np.arange(1985,2017,1):\n dic[p] = []\n\n def mcs_find(image, thresh=None):\n if not thresh:\n print('Give threshold')\n return\n\n image[image > thresh] = 0\n image[image <= thresh] = 1\n image[np.isnan(image)] = 0\n\n if np.sum(image<10):\n return []\n\n labels, numL = label(image)\n\n ret = []\n\n for l in np.unique(labels):\n if l == 0:\n continue\n\n blob = np.sum(labels == l)\n\n pdb.set_trace()\n\n if np.sum(len(blob[0])) < 100: # at least 1000m2\n continue\n\n ret.append(blob*49)\n\n return ret\n\n for i in np.arange(cut.shape[0]):\n\n ret = mcs_find(cut[i,:,:].values, thresh=-40)\n if ret == []:\n continue\n pdb.set_trace()\n dic[cut['time.year']].append(ret)\n\n pdb.set_trace()\n\n for d in dic:\n d = [item for sublist in d for item in sublist]\n\n\n\n\n","sub_path":"CLOVER/old/gridsat_baseplots.py","file_name":"gridsat_baseplots.py","file_ext":"py","file_size_in_byte":14317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"85107098","text":"import math as m\r\nimport numpy as np\r\n\r\n\r\ndef linearinterpolaterange( lx : list, ly: list, x:float):\r\n\r\n '''Simple Linear Interpolation with 2 range input'''\r\n\r\n for index , value in enumerate(lx):\r\n if x < value:\r\n print(1)\r\n return ly[index]\r\n elif x > lx[len(lx)-1]:\r\n print(2)\r\n return ly[len(lx)-1]\r\n else:\r\n return ((x - lx[index]) / (lx[index+1] - lx[index])) * (ly[index+1] - ly[index]) + ly[index]\r\n\r\ndef linear_cof(lx : list, ly: list):\r\n a,b = np.polyfit(np.array(lx),np.array(ly),1)\r\n return a,b\r\n\r\nlTenure = [1,31,61,92]\r\nlMarketData = [1.725,1.86,1.9,1.94]\r\n\r\ntest = linearinterpolaterange(lTenure,lMarketData,100)\r\n\r\nprint(test)\r\nprint(linear_cof(lTenure,lMarketData))\r\n","sub_path":"tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"598623853","text":"import calendar\nfrom hashlib import md5\nfrom os.path import join\nfrom datetime import timedelta\nfrom operator import attrgetter\nfrom itertools import product, tee, count\nfrom collections import defaultdict\nfrom collections import namedtuple\n\nimport numpy as np\nfrom arrow import Arrow\nfrom dateutil.parser import parse as parse_date\nfrom dateutil.relativedelta import relativedelta\n\nfrom .loader import load_classes\nfrom .renderer import render\n\nDAYS = dict(zip(calendar.day_name, count()))\nBAD_DAYS = {\n 'Thursday',\n 'Tuesday',\n 'Friday'\n}\nTimePeriod = namedtuple('TimePeriod', 'start,end')\nLUNCH = TimePeriod(12, 12 + 2)\n\n\ndef pairs(iterable):\n first, second = tee(iterable)\n next(second)\n return zip(first, second)\n\n\ndef overlaps_on_day(day) -> bool:\n '''\n Given an iterable of objects with `start` and `end` attributes,\n return True if any of them overlap.\n '''\n day = sorted(\n day,\n key=attrgetter('start')\n )\n return any(\n first.end > second.start\n for first, second in pairs(day)\n )\n\n\ndef sort_into_days(classes) -> dict:\n '''\n Given an iterable of objects with a `start` attribute, sorts them into the\n weekday on which they occur\n\n ::code::\n {weekday_num: [classes]}\n '''\n days = defaultdict(list)\n\n for class_ in classes:\n days[class_.start.weekday()].append(class_)\n\n return dict(days) # convert defaultdict to normal dict\n\n\ndef overlaps_on_days(days) -> bool:\n return any(\n overlaps_on_day(classes)\n for classes in days.values()\n )\n\n\ndef classes_on_days(days, on_days):\n return any(\n days.get(DAYS[day_name])\n for day_name in on_days\n )\n\n\ndef none_on_bad_days(days) -> bool:\n return not classes_on_days(days, BAD_DAYS)\n\n\ndef average_starting_time(days) -> timedelta:\n '''\n Given a dictionary mapping days to lists of with a `start` attribute,\n calculate the average time since the start of that day\n '''\n return average_n_time(days, 'start')\n\n\ndef average_ending_time(days) -> timedelta:\n return average_n_time(days, 'end')\n\n\ndef average_n_time(days, attr) -> timedelta:\n days = [\n sorted(day, key=attrgetter(attr))[0].start\n for day in days.values()\n ]\n days = map(Arrow.fromdatetime, days)\n\n days = [\n # get start time relative to start of day\n day - day.floor('day')\n for day in days\n ]\n # average start time :)\n return sum(days[1:], days[0]) / len(days)\n\n\ndef relative_to_time(rd: relativedelta) -> timedelta:\n 'Converts a relativedelta to a comparable timedelta'\n rd = {\n 'days': rd.days,\n 'hours': rd.hours,\n 'leapdays': rd.leapdays,\n 'microseconds': rd.microseconds,\n 'minutes': rd.minutes,\n 'months': rd.months,\n 'seconds': rd.seconds,\n 'years': rd.years\n }\n rd['months'] += rd['years'] * 12\n rd['days'] += rd['months'] * 30\n rd['hours'] += (rd['days'] + rd['leapdays']) * 24\n rd['minutes'] += rd['hours'] * 60\n rd['seconds'] += rd['minutes'] * 60\n rd['microseconds'] += rd['seconds'] * 1000000\n\n return timedelta(microseconds=rd['microseconds'])\n\n\ndef all_possible_class_combinations(classes):\n possibles = product(*classes)\n possibles = map(sort_into_days, possibles)\n\n # this just removes completely invalid class patterns\n # due to class overlaps\n return filter(lambda days: not overlaps_on_days(days), possibles)\n\n\ndef even_number_of_classes_per_day(days) -> bool:\n '''\n Given a list of lists representing days, calculates the stddev for the\n number of classes per day.\n '''\n classes_per_day = list(map(len, days.values()))\n\n return np.array(classes_per_day).std() < 1.5\n\n\ndef time_period_on_day(timeperiod, day):\n day = parse_date(calendar.day_name[day])\n\n start, end = timeperiod\n start = day.replace(hour=start)\n end = day.replace(hour=end)\n\n return TimePeriod(start, end)\n\n\ndef classes_during_time_period(timeperiod, days):\n return any(\n overlaps_on_day(day + [time_period_on_day(timeperiod, day_num)])\n for day_num, day in days.items()\n )\n\n\ndef classes_during_lunch(days):\n return classes_during_time_period(LUNCH, days)\n\n\ndef main():\n classes = list(load_classes())\n\n possibles = all_possible_class_combinations(classes)\n\n # user specifyable\n possibles = filter(none_on_bad_days, possibles)\n possibles = filter(\n lambda days: (\n average_starting_time(days) >\n relative_to_time(relativedelta(hours=9))\n ),\n possibles\n )\n possibles = filter(\n even_number_of_classes_per_day,\n possibles\n )\n\n for possible in possibles:\n do_render(possible)\n\n\ndef determine_hash(possible):\n # work is required to get these things to be deterministic\n data = sorted(\n (key, sorted(value))\n for key, value in possible.items()\n )\n data = str(data)\n return md5(data.encode()).hexdigest()\n\n\ndef do_render(possible):\n img = render(possible)\n\n filename = '{}.png'.format(determine_hash(possible))\n img.save(join('possibles', filename))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"api/lib/calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":5191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"62876075","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 4 17:01:10 2018\n\nExercise09\n\n@author: Patricia\n\"\"\"\nimport pandas\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# data loaded into a data frame\ndata=pandas.read_csv(\"incomevSAT.txt\", sep=\"\\t\")\n\nx = data.iloc[:,0] # sets x equal to income column\nyR = data.iloc[:,1] # gets SAT Reading scores\nyM = data.iloc[:,2] # gets SAT Math scores\nyW = data.iloc[:,3] # gets SAT Writing scores\nyC = yR + yM + yW # cumulative SAT\n\nplt.scatter(x, yC)\nplt.title('Family Income vs. SAT')\nplt.xlabel('Family Income') # x axis label\nplt.ylabel('SAT score') # y axis label\nz = np.polyfit(x,yC,1) # finds the linear line of best fit\np = np.poly1d(z)\nplt.plot(x,p(x),\"r--\")\nplt.show()\n\n","sub_path":"Ex09pt1.py","file_name":"Ex09pt1.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"343189662","text":"# coding=utf-8\nimport time\nimport imp\nimport os\nimport sys\nimport math\nimport copy\nimport random\nimport shutil\n\nimport numpy as np\n\nimport scipy as sp\nfrom scipy.spatial import ConvexHull\nfrom scipy.ndimage.morphology import binary_fill_holes\nimport scipy.misc\n\nfrom skimage import io, transform\nfrom skimage import measure\n\n\"\"\"\ncmr_ml_utils_geometry.py\n\nThis file contains utility functions for manipulation masks and contours\nMain use case is the object detection and segmentation\n\n\"\"\"\n\n# ------------------------------------------------------------------------------\ndef smooth_contours (contour_x, contour_y, n_components=24, circularise=False, n_pts=2000):\n \"\"\" takes contour_x,contour_y the cartesian coordinates of a contour, \n then procdues a smoothed more circular contour smoothed_contour_x,smoothed_contour_y\"\"\"\n\n try:\n\n if n_components is None:\n n_components=12 # slightly arbitary number, but seems to work well\n \n npts=n_pts+1\n contour_pts = np.transpose(np.stack([contour_x,contour_y]))\n \n if circularise:\n # get the contour points that form a convex hull\n hull = sp.spatial.ConvexHull(contour_pts)\n to_sample = hull.vertices\n else:\n to_sample = range(0,len(contour_x))\n \n #wrap around cirlce\n to_sample = np.hstack([to_sample,to_sample[0]])\n sample_pts = contour_pts[to_sample,:]\n \n # sample each curve at uniform distances according to arc length parameterisation\n dist_between_pts = np.diff(sample_pts,axis=0)\n cumulative_distance = np.sqrt(dist_between_pts[:,0]**2 + dist_between_pts[:,1]**2)\n cumulative_distance = np.insert(cumulative_distance,0,0,axis=0)\n cumulative_distance = np.cumsum(cumulative_distance)\n cumulative_distance = cumulative_distance/cumulative_distance[-1]\n contour_x=np.interp(np.linspace(0,1,npts),cumulative_distance,sample_pts[:,0],period=360)\n contour_y=np.interp(np.linspace(0,1,npts),cumulative_distance,sample_pts[:,1],period=360)\n contour_x = contour_x[:-1]\n contour_y = contour_y[:-1]\n \n # smooth out contour by keeping the lowest nkeep Fourier components\n n = len (contour_x)\n nfilt=n-n_components-1\n f = np.fft.fft(contour_x)\n f[int(n/2+1-nfilt/2):int(n/2+nfilt/2)] = 0.0;\n smoothed_contour_x = np.abs(np.fft.ifft(f))\n f = np.fft.fft(contour_y)\n f[int(n/2+1-nfilt/2):int(n/2+nfilt/2)] = 0.0;\n smoothed_contour_y = np.abs(np.fft.ifft(f))\n\n except Exception as e:\n print(\"Error happened in smooth_contours ...\", file=sys.stderr)\n print(e)\n sys.stderr.flush()\n\n return smoothed_contour_x, smoothed_contour_y\n\n# ------------------------------------------------------------------------------\n\ndef extract_contours(preds, thres=0.75, smoothing=True, num_components_smoothing=24, circular=False, n_pts=2000):\n \"\"\"Extract contours from segmentation mask or probability map\n\n Inputs:\n\n preds : [RO E1], input mask or probablity map\n thres : threshold to extract contours, a 2D marching cube extration is performed\n smoothing : True or False, if true, contours are smoothed\n num_components_smoothing : number of fft components kept after smoothing\n circular : True or False, if true, contours are kept to approx. circle\n\n Outputs:\n\n contours : a list of contours, every contour is a nx2 numpy array\n \"\"\"\n\n try:\n contours = measure.find_contours(preds, thres)\n\n C_len = list()\n \n for n, contour in enumerate(contours):\n C_len.append(contours[n].shape[0])\n \n if smoothing:\n s_c = copy.deepcopy(contours)\n for n, contour in enumerate(contours):\n sc_x, sc_y = smooth_contours (contour[:, 0], contour[:, 1], n_components=num_components_smoothing, circularise=circular, n_pts=n_pts)\n\n s_c[n] = np.zeros((sc_x.shape[0], 2))\n s_c[n][:,0] = sc_x\n s_c[n][:,1] = sc_y\n\n contours = copy.deepcopy(s_c)\n\n except Exception as e:\n print(\"Error happened in extract_contours ...\", file=sys.stderr)\n print(e)\n sys.stderr.flush()\n\n return contours, C_len\n\n# ------------------------------------------------------------------------------\ndef extract_endo_epi_contours(preds, thres=0.75, smoothing=True, num_components_smoothing=24, circular=False, n_pts=2000):\n \"\"\"Extract myocardium endo and epi contours from segmentation mask or probability map\n\n Inputs:\n\n preds : [RO E1], input mask or probablity map\n thres : threshold to extract contours, a 2D marching cube extration is performed\n smoothing : True or False, if true, contours are smoothed\n num_components_smoothing : number of fft components kept after smoothing\n circular : True or False, if true, contours are kept to approx. circle\n\n Outputs:\n\n endo : a nx2 numpy array for endo contour\n epi : a nx2 numpy array for epi contour\n \"\"\"\n\n try:\n contours, C_len = extract_contours(preds, thres, smoothing, num_components_smoothing, circular, n_pts)\n\n num_c = len(contours)\n\n endo = None\n epi = None\n\n if num_c == 0:\n return endo, epi\n\n if num_c == 1:\n epi = contours[0]\n return endo, epi\n \n if num_c > 1:\n # find the longest contours as epi and the second longest as endo\n c_len = np.zeros([num_c])\n\n for n, contour in enumerate(contours):\n c_len[n] = C_len[n]\n \n c_ind = np.argsort(c_len)\n\n epi = contours[c_ind[-1]]\n endo = contours[c_ind[-2]]\n\n except Exception as e:\n print(\"Error happened in extract_endo_epi_contours ...\", file=sys.stderr)\n print(e)\n sys.stderr.flush()\n\n return endo, epi\n\n# ------------------------------------------------------------------------------\n\ndef extract_epi_contours(preds, thres=0.75, smoothing=True, num_components_smoothing=24, circular=False, n_pts=2000):\n \"\"\"Extract myocardium epi contours from segmentation mask or probability map\n\n Inputs:\n\n preds : [RO E1], input mask or probablity map\n thres : threshold to extract contours, a 2D marching cube extration is performed\n smoothing : True or False, if true, contours are smoothed\n num_components_smoothing : number of fft components kept after smoothing\n circular : True or False, if true, contours are kept to approx. circle\n\n Outputs:\n\n epi : a nx2 numpy array for epi contour\n \"\"\"\n\n try:\n\n contours, C_len = extract_contours(preds, thres, smoothing, num_components_smoothing, circular, n_pts)\n\n print(C_len)\n num_c = len(contours)\n\n epi = None\n\n if num_c == 0:\n return epi\n\n if num_c == 1:\n epi = contours[0]\n return epi\n\n if num_c > 1:\n # find the longest contours as epi\n c_len = np.zeros([num_c])\n\n for n, contour in enumerate(contours):\n c_len[n] = C_len[n]\n\n c_ind = np.argsort(c_len)\n\n print(\"Pick %d with len %d\" % (c_ind[-1], c_len[c_ind[-1]]))\n\n epi = contours[c_ind[-1]]\n\n except Exception as e:\n print(\"Error happened in extract_epi_contours ...\", file=sys.stderr)\n print(e)\n sys.stderr.flush()\n\n return epi\n\n# ------------------------------------------------------------------------------\n\ndef extract_sector_contours(sectors, thres=0.95, smoothing=True, num_components_smoothing=36, circular=False, n_pts=2000):\n \"\"\"Extract contours for every sector\n\n Inputs:\n\n sectors : [RO E1 3], mask for basal/medial/apex\n background is 0 \n basal/medial/apex: sector 1 - value 1, sector 2 - value 2 etc.\n\n thres : threshold to extract contours, a 2D marching cube extration is performed\n smoothing : True or False, if true, contours are smoothed\n num_components_smoothing : number of fft components kept after smoothing\n circular : True or False, if true, contours are kept to approx. circle\n\n Outputs:\n\n sector_C : a nx2x16 numpy array for sector contours\n \"\"\"\n\n try:\n RO, E1, SLC = sectors.shape\n\n max_pts = 0\n\n basal = list()\n for i in np.arange(6):\n mask = np.zeros((RO, E1))\n pts = np.where(np.squeeze(sectors[:,:,0])==i+1)\n mask[pts] = 1\n C = extract_epi_contours(mask, thres=thres, smoothing=True, num_components_smoothing=num_components_smoothing, circular=False, n_pts=n_pts)\n basal.append(C)\n if(C is not None):\n if(C.shape[0]>max_pts):\n max_pts = C.shape[0]\n\n medial = list()\n for i in np.arange(6):\n mask = np.zeros((RO, E1))\n pts = np.where(np.squeeze(sectors[:,:,1])==i+1)\n mask[pts] = 1\n C = extract_epi_contours(mask, thres=thres, smoothing=True, num_components_smoothing=num_components_smoothing, circular=False, n_pts=n_pts)\n medial.append(C)\n if(C is not None):\n if(C.shape[0]>max_pts):\n max_pts = C.shape[0]\n\n apex = list()\n for i in np.arange(4):\n mask = np.zeros((RO, E1))\n pts = np.where(np.squeeze(sectors[:,:,2])==i+1)\n mask[pts] = 1\n C = extract_epi_contours(mask, thres=thres, smoothing=True, num_components_smoothing=num_components_smoothing, circular=False, n_pts=n_pts)\n apex.append(C)\n if(C is not None):\n if(C.shape[0]>max_pts):\n max_pts = C.shape[0]\n\n sector_C = np.zeros((max_pts, 2, 16))-1\n\n for i in np.arange(6):\n C = basal[i]\n if(C is not None):\n num = C.shape[0]\n sector_C[0:num, :, i] = C\n\n for i in np.arange(6):\n C = medial[i]\n if(C is not None):\n num = C.shape[0]\n sector_C[0:num, :, i+6] = C\n\n for i in np.arange(4):\n C = apex[i]\n if(C is not None):\n num = C.shape[0]\n sector_C[0:num, :, i+12] = C\n\n except Exception as e:\n print(\"Error happened in extract_sector_contours ...\", file=sys.stderr)\n print(e)\n sys.stderr.flush()\n\n return sector_C, basal, medial, apex\n\ndef extract_sector_contours_array(sectors, thres=0.95, smoothing=True, num_components_smoothing=36, circular=False, n_pts=2000):\n sectors_C, basal, medial, apex = extract_sector_contours(sectors, thres, smoothing, num_components_smoothing, circular, n_pts)\n return sectors_C\n","sub_path":"utils/cmr_ml_utils_geometry.py","file_name":"cmr_ml_utils_geometry.py","file_ext":"py","file_size_in_byte":10872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"306977962","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('us', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='url',\n name='created',\n field=models.DateTimeField(default=datetime.datetime(2014, 12, 26, 14, 7, 0, 34243, tzinfo=utc), verbose_name='Created', auto_now_add=True),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='url',\n name='modified',\n field=models.DateTimeField(default=datetime.datetime(2014, 12, 26, 14, 7, 15, 615885, tzinfo=utc), verbose_name='Modified', auto_now=True),\n preserve_default=False,\n ),\n ]\n","sub_path":"us/migrations/0002_auto_20141226_1407.py","file_name":"0002_auto_20141226_1407.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"412224785","text":"# Copyright (C) 2019 Google Inc.\n# Licensed under http://www.apache.org/licenses/LICENSE-2.0 \n\n\"\"\"Review RBAC Factory.\"\"\"\n\nfrom ggrc.models import all_models\n\nfrom integration.ggrc.access_control.rbac_factories import universal_factory\nfrom integration.ggrc.models.factories import get_model_factory\nfrom integration.ggrc.review import build_reviewer_acl\n\n\nclass MappedReviewRBACFactory(universal_factory.UniversalRBACFactory):\n \"\"\"MappedReview contains methods to check review related actions\"\"\"\n def __init__(self, user_id, acr, parent=None, role_at_review=False):\n self.parent = None\n self.parent_id = None\n self.parent_name = None\n self.role_at_review = role_at_review\n self.review_id = None\n super(MappedReviewRBACFactory, self).__init__(user_id, acr, parent)\n\n def setup_models(self, parent_name):\n \"\"\"Setup Review, Reviewer\"\"\"\n self.parent = get_model_factory(parent_name)()\n self.parent_id = self.parent.id\n self.parent_name = parent_name\n if self.role_at_review:\n _, review = self.setup_review(self.acr.id, self.user_id)\n self.review_id = review.id\n else:\n acr_id = self.acr.id\n _, review = self.setup_review()\n self.parent = self.parent.__class__.query.get(self.parent_id)\n self.acr = self.acr.__class__.query.get(acr_id)\n self.review_id = review.id\n self.assign_person(self.parent, self.acr, self.user_id)\n\n def setup_review(self, acr_id=None, user_id=None):\n \"\"\"Create new review object\"\"\"\n resp, review = self.objgen.generate_object(\n all_models.Review,\n {\n \"reviewable\": {\n \"type\": self.parent.type,\n \"id\": self.parent_id,\n },\n \"context\": None,\n \"status\": all_models.Review.STATES.UNREVIEWED,\n \"access_control_list\": build_reviewer_acl(acr_id, user_id),\n \"notification_type\": all_models.Review.NotificationTypes.EMAIL_TYPE\n },\n )\n return resp, review\n\n def create_review(self, acr_id=None, user_id=None):\n \"\"\"Create review\"\"\"\n resp, _ = self.setup_review(acr_id, user_id)\n return resp\n\n def read_review(self):\n \"\"\"Read existing Review object.\"\"\"\n res = self.api.get(all_models.Review, self.review_id)\n return res\n\n def update_review(self):\n \"\"\"Update status of existing Review object.\"\"\"\n review = all_models.Review.query.get(self.review_id)\n return self.api.put(review, {\"status\": all_models.Review.STATES.REVIEWED})\n\n def delete_review(self):\n \"\"\"Delete Review object.\"\"\"\n review = all_models.Review.query.get(self.review_id)\n return self.api.delete(review)\n","sub_path":"test/integration/ggrc/access_control/rbac_factories/review.py","file_name":"review.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"538588287","text":"from flask import Flask, request, redirect, render_template, session, flash\nfrom mysqlconnection import MySQLConnector\nimport md5\nimport os, binascii # include this at the top of your file\nimport datetime\nimport re\nEMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\\.[a-zA-Z]+$')\nsalt = binascii.b2a_hex(os.urandom(15))\n\napp = Flask(__name__)\napp.secret_key = 'KeepItSecretKeepItSafe'\nmysql = MySQLConnector(app,'emails')\n\n@app.route('/')\ndef index():\n\n return render_template('index.html')\n\n@app.route('/addemail', methods=['POST'])\ndef create():\n if len(request.form['email']) < 1:\n flash(\"Email cannot be blank!\")\n elif not EMAIL_REGEX.match(request.form['email']):\n flash(\"Invalid Email Address!\")\n else: \n query = \"INSERT INTO emails (email, created_at, updated_at) VALUES( :email, NOW(), NOW())\"\n data = {\n 'email': request.form[\"email\"],\n }\n mysql.query_db(query, data)\n return redirect('/success')\n \n return redirect('/')\n@app.route('/success')\ndef success():\n emails = mysql.query_db(\"SELECT * FROM emails\")\n \n return render_template('success.html', all_emails=emails)\n\n# @app.route('/friends', methods=['POST'])\n# def create():\n# print request.form['first_name']\n# print request.form['last_name']\n# print request.form['occupation']\n# query = \"INSERT INTO friends (first_name, last_name, occupation, created_at, updated_at) VALUES (:first_name, :last_name, :occupation, NOW(), NOW())\"\n# # We'll then create a dictionary of data from the POST data received.\n# data = {\n# 'first_name': request.form['first_name'],\n# 'last_name': request.form['last_name'],\n# 'occupation': request.form['occupation']\n# }\n# # Run query, with dictionary values injected into the query.\n# mysql.query_db(query, data)\n# return redirect('/')\n\n# @app.route('/friends/')\n# def show(friend_id):\n# # Write query to select specific user by id. At every point where\n# # we want to insert data, we write \":\" and variable name.\n# query = \"SELECT * FROM friends WHERE id = :specific_id\"\n# # Then define a dictionary with key that matches :variable_name in query.\n# data = {'specific_id': friend_id}\n# # Run query with inserted data.\n# friends = mysql.query_db(query, data)\n# # Friends should be a list with a single object,\n# # so we pass the value at [0] to our template under alias one_friend.\n# return render_template('index.html', one_friend=friends[0])\n\n\n# @app.route('/update_friend/', methods=['POST'])\n# def update(friend_id):\n# query = \"UPDATE friends SET first_name = :first_name, last_name = :last_name, occupation = :occupation WHERE id = :id\"\n# data = {\n# 'first_name': request.form['first_name'],\n# 'last_name': request.form['last_name'],\n# 'occupation': request.form['occupation'],\n# 'id': friend_id\n# }\n# mysql.query_db(query, data)\n# return redirect('/')\n\n# @app.route('/remove_friend/', methods=['POST'])\n# def delete(friend_id):\n# query = \"DELETE FROM friends WHERE id = :id\"\n# data = {'id': friend_id}\n# mysql.query_db(query, data)\n# return redirect('/')\n\napp.run(debug=True)\n\n\n# username = request.form['username'] #for hashing passwords with salt\n# email = request.form['email']\n# password = request.form['password']\n# salt = binascii.b2a_hex(os.urandom(15))\n# hashed_pw = md5.new(password + salt).hexdigest()\n# insert_query = \"INSERT INTO users (username, email, password, salt, created_at, updated_at)\n# VALUES (:username, :email, :hashed_pw, :salt, NOW(), NOW())\"\n# query_data = { 'username': username, 'email': email, 'hashed_pw': hashed_pw, 'salt': salt}\n# mysql.query_db(insert_query, query_data)\n\n\n# email = request.form['email'] For authenticating passwords hashed with salt\n# password = request.form['password']\n# user_query = \"SELECT * FROM users WHERE users.email = :email LIMIT 1\"\n# query_data = {'email': email}\n# user = mysql.query_db(user_query, query_data)\n# if len(user) != 0:\n# encrypted_password = md5.new(password + user[0]['salt']).hexdigest()\n# if user[0]['password'] == encrypted_password:\n# # this means we have a successful login!\n# else:\n# # invalid password!\n# else:\n# # invalid email!","sub_path":"MySQLFlask/Emails/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"198767933","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\ndef power():\n l = list(simfunc.split())\n n = np.double(l[1])\n for i in range(np.size(x)):\n x[i] = x[i]**n\n\ndef log():\n for i in range (np.size(x)):\n x[i] = np.log(x)\n\nx = list(map(np.double, input().split()))#входные данные оси x\nx = np.asarray(x)\ny = list(map(np.double, input().split()))#входные данные оси y\ny = np.asarray(y)\nsimfunc = input()\nn = int(input())\nxlabel = input()\nylabel = input()\nxlabel = 'r\"' + xlabel\nylabel = 'r\"' + ylabel\nxlabel = xlabel + '\"'\nylabel = ylabel + '\"'\nif simfunc == \"log\":\n log()\nif simfunc == \" \":\n t = 1\nelse:\n power()\nn, bins, patches = plt.hist(x, 50, density=True, facecolor='g', alpha=0.75)\n\nplt.xlabel(xlabel)\nplt.ylabel(ylabel)\nplt.axis([x.min(), x.max(), y.min(), y.max()])\nplt.grid(True)\nplt.show()","sub_path":"hist.py","file_name":"hist.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"280590720","text":"\"\"\"empty message\n\nRevision ID: 9202296f9519\nRevises: dd665747689a\nCreate Date: 2019-10-09 19:34:43.819937\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '9202296f9519'\ndown_revision = 'dd665747689a'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('alumnos',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=120), nullable=False),\n sa.Column('birthdate', sa.DateTime(), nullable=True),\n sa.Column('age', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('apoderados',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=120), nullable=False),\n sa.Column('rut', sa.String(length=120), nullable=False),\n sa.Column('email', sa.String(length=120), nullable=False),\n sa.Column('telefono', sa.String(length=120), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('apoderados_alumnos',\n sa.Column('apoderado_id', sa.Integer(), nullable=False),\n sa.Column('alumno_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['alumno_id'], ['alumnos.id'], ),\n sa.ForeignKeyConstraint(['apoderado_id'], ['apoderados.id'], ),\n sa.PrimaryKeyConstraint('apoderado_id', 'alumno_id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('apoderados_alumnos')\n op.drop_table('apoderados')\n op.drop_table('alumnos')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/9202296f9519_.py","file_name":"9202296f9519_.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"136790270","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\n\nimport sys\n\nfrom colour.utilities.deprecation import ModuleAPI, build_API_changes\nfrom colour.utilities.documentation import is_documentation_building\n\nfrom .datasets import * # noqa\nfrom . import datasets\nfrom .macadam_limits import is_within_macadam_limits\nfrom .mesh import is_within_mesh_volume\nfrom .pointer_gamut import is_within_pointer_gamut\nfrom .spectrum import (generate_pulse_waves, XYZ_outer_surface,\n is_within_visible_spectrum)\nfrom .rgb import (RGB_colourspace_limits, RGB_colourspace_volume_MonteCarlo,\n RGB_colourspace_volume_coverage_MonteCarlo,\n RGB_colourspace_pointer_gamut_coverage_MonteCarlo,\n RGB_colourspace_visible_spectrum_coverage_MonteCarlo)\n\n__all__ = []\n__all__ += datasets.__all__\n__all__ += ['is_within_macadam_limits']\n__all__ += ['is_within_mesh_volume']\n__all__ += ['is_within_pointer_gamut']\n__all__ += [\n 'generate_pulse_waves', 'XYZ_outer_surface', 'is_within_visible_spectrum'\n]\n__all__ += [\n 'RGB_colourspace_limits', 'RGB_colourspace_volume_MonteCarlo',\n 'RGB_colourspace_volume_coverage_MonteCarlo',\n 'RGB_colourspace_pointer_gamut_coverage_MonteCarlo',\n 'RGB_colourspace_visible_spectrum_coverage_MonteCarlo'\n]\n\n\n# ----------------------------------------------------------------------------#\n# --- API Changes and Deprecation Management ---#\n# ----------------------------------------------------------------------------#\nclass volume(ModuleAPI):\n def __getattr__(self, attribute):\n return super(volume, self).__getattr__(attribute)\n\n\n# v0.3.16\nAPI_CHANGES = {\n 'ObjectRenamed': [[\n 'colour.volume.ILLUMINANT_OPTIMAL_COLOUR_STIMULI',\n 'colour.volume.OPTIMAL_COLOUR_STIMULI_ILLUMINANTS',\n ], ]\n}\n\"\"\"\nDefines *colour.volume* sub-package API changes.\n\nAPI_CHANGES : dict\n\"\"\"\n\nif not is_documentation_building():\n sys.modules['colour.volume'] = volume(sys.modules['colour.volume'],\n build_API_changes(API_CHANGES))\n\n del ModuleAPI, is_documentation_building, build_API_changes, sys\n","sub_path":"colour/volume/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"458429077","text":"import FileModule\nimport json\nimport requests\nheaders = {'Content-type': 'application/json'}\n\ncont1 = FileModule.get_account_by_port(5000)\ncont2 = FileModule.get_account_by_port(5005)\n\ndata = {\n 'receiver_public_key': cont2['public_key'],\n 'amount': 30000,\n 'sender_public_key': cont1['public_key'],\n 'sender_private_key': cont1['private_key']\n}\n\n\nrequests.post(cont1['address']+'transactions/new', data=json.dumps(data), headers=headers)\n\n\n","sub_path":"Node-Experiment/transaction.py","file_name":"transaction.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"406214921","text":"import csv\nimport types\nimport urllib\n\nimport io\nfrom flask import Flask, request, jsonify, make_response\nfrom flask_restful import Resource, Api\nfrom sqlalchemy import create_engine\nfrom flask_cors import CORS\nfrom flask_restful import reqparse\n\ne = create_engine('sqlite:///breweries.db')\n\napp = Flask(__name__)\nCORS(app)\napi = Api(app)\n\ndef create_csv(data):\n out = \"\"\n for row in data:\n for item in row:\n out = out + str(item) + \";\"\n out = out + \"\\n\"\n response = make_response(out,200)\n response.headers['Content-Type'] = 'text/csv'\n response.headers['Content-Disposition'] = 'attachment; filename=%s' % \"csv_output\"\n return response\n\ndef create_json(data):\n result = {'data': [dict(zip(tuple (data.keys()) ,i)) for i in data.cursor]}\n return result\n\n\ndef get_all():\n conn = e.connect()\n data = conn.execute(\"select distinct id, name, country, website, address1 from breweries order by name\")\n return data\n\ndef get_by_name(name):\n name = urllib.request.unquote(name)\n conn = e.connect()\n query = conn.execute(\"select * from breweries where name='%s'\"%name)\n return query\n\ndef get_by_country(country):\n conn = e.connect()\n name = urllib.request.unquote(country)\n query = conn.execute(\"select name, country, website, address1 from breweries where country='%s'\"%country)\n return query\n\nclass CSVAll(Resource):\n def get(self):\n return create_csv(get_all())\n\nclass CSVByName(Resource):\n def get(self, name):\n return create_csv(get_by_name(name))\n\nclass CSVByCountry(Resource):\n def get(self, country):\n return create_csv(get_by_country(country))\n\nclass AllBreweries(Resource):\n #returns all breweries\n def get(self):\n return create_json(get_all())\n #creates new\n def post(self):\n parser = reqparse.RequestParser()\n parser.add_argument('name')\n parser.add_argument('country')\n parser.add_argument('website')\n parser.add_argument('address1')\n parser.add_argument('secretkey')\n\n args = parser.parse_args()\n\n n,c,w,a = args['name'], args['country'], args['website'], args['address1']\n\n key = args['secretkey']\n if(key != \"admin1\"):\n return {\"auth\" : \"false\"}\n\n insert = \"INSERT INTO breweries (name, country, website, address1) VALUES ('%s','%s','%s','%s'); \" % (n,c,w,a)\n print(insert)\n conn = e.connect()\n conn.execute(insert)\n return {\"auth\" : \"true\"}\n\n\nclass BreweriesByName(Resource):\n def get(self, name):\n return create_json(get_by_name(name))\n def delete(self, name):\n conn = e.connect()\n name = urllib.request.unquote(name)\n query = conn.execute(\"DELETE FROM breweries WHERE name='%s'\"%name)\n\n\nclass BreweriesByCountry(Resource):\n def get(self, country):\n return create_json(get_by_country(country))\n\napi.add_resource(BreweriesByCountry, '/country/')\napi.add_resource(BreweriesByName, '/breweries/')\napi.add_resource(AllBreweries, '/breweries')\n\napi.add_resource(CSVAll, '/csv/')\napi.add_resource(CSVByName, '/csv/name/')\napi.add_resource(CSVByCountry, '/csv/country/')\n\n\n\nif __name__ == '__main__':\n app.run()","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"398760703","text":"#Kyle Chinn 10/10/2018 I have not given or received any unauthorized assistance on this assignment.\n\n#NOTE: The professor gave me an extension on this homework since I was at a funeral for 5 days over the due date.\n\nimport pandas as pd\nimport subprocess\nimport sys\n\n#The lxml package is needed for pandas to read the files from github\nprint('Checking dependecies...')\nprint('')\nsubprocess.call([sys.executable, '-m', 'pip', 'install', 'lxml'])\nprint('')\n\n#Selection function\ndef selection():\n #Acceptable input list\n input_list = [1, 2, 3]\n while True:\n try:\n #Greet user and ask for selection\n user_num = input('Hello! I print out stem and leaf plots... Please select 1, 2, 3: ')\n #If input is not accetable, reissue prompt\n if int(user_num) not in input_list:\n print('Please input 1, 2, or 3 only')\n #If input is accetable, call stem() \n elif int(user_num) in input_list:\n stem(user_num)\n #Catch ValueErrors\n except ValueError:\n print('Please input 1, 2, or 3 only')\n \n#Function for printing stem and leaf plot\ndef stem(num):\n #Create empty dictionary\n dict = {}\n #Import file from github given user input\n file = pd.read_html('https://github.com/PixarJunkie/dsc-430-python-programming/blob/master/data/StemAndLeaf%d.txt' % (int(num)))\n #Get file into usable list format\n list_ = list(file[0][1])\n list_ = [list(str(item)) for item in list_]\n\n #Fill the dictionary\n for line in list_:\n #Set key, value from list_\n key = ''.join(line[:len(line) - 1])\n value = line[-1]\n #If key doesn't exist yet, create it, and assign empty list for it's values\n if key not in dict:\n dict[key] = []\n #Add values to lists for each key\n dict[key].append(value)\n #Title\n print(\"\"\" Stem and Leaf Plot\"\"\")\n #Print stem and leaf plot\n for k, v in dict.items():\n #Print key: value while joining values and removing ','\n print('{}: {}'.format(k, ''.join(v)))\n #Prompt user for another selection or to quit\n another_selection()\n return None\n\n#Function for checking if player would like to continue\ndef another_selection():\n #Acceptable input list\n yes_no = ['yes', 'no']\n while True:\n #Prompt user for selection\n new_selection = input('Would you like make another selection?(yes or no)')\n if new_selection.lower() not in yes_no:\n print('Please input yes or no only...')\n #If no, thank user and exit program\n elif new_selection.lower() == 'no':\n print('')\n print('Thanks for checking out the program, bye!')\n print('')\n sys.exit(1)\n #If yes, call selection()\n elif new_selection.lower() == 'yes':\n print('')\n selection()\n return None\n \nselection()","sub_path":"src/python/.ipynb_checkpoints/stem-and-leaf-checkpoint.py","file_name":"stem-and-leaf-checkpoint.py","file_ext":"py","file_size_in_byte":2947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"463084589","text":"import random\nimport re\n\nwith open('quijote.txt', encoding='utf-8') as archivo:\n contenido = archivo.read()\n\ncontenido = contenido.replace('\\n', ' ')\ncontenido = re.sub(' {2,}', ' ', contenido)\ncontenido = re.sub('[^\\w ]', '', contenido)\ncontenido = contenido.lower()\ncontenido = contenido.split(' ')\n\npalabras = {}\n\nfor posicion, palabra in enumerate(contenido[:-1]):\n if palabra not in palabras:\n palabras[palabra] = []\n palabras[palabra].append(contenido[posicion + 1])\n\npalabra_inicial = 'el'\nnumero_palabras = 30\n\nresultado = [palabra_inicial]\n\nfor numero in range(numero_palabras):\n ultima_palabra = resultado[-1]\n lista_palabras = palabras[ultima_palabra]\n if lista_palabras:\n resultado.append(random.choice(lista_palabras))\n\nprint(' '.join(resultado)[:140])\n","sub_path":"clase2/markov.py","file_name":"markov.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"259374343","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 4 17:31:10 2018\n\n@author: eikegroen\n\"\"\"\nimport numpy as np \nimport scipy.linalg as sclin\nfrom scipy.interpolate import griddata\nstart = -2\nstop = 2\nM = 2.0\nn = 10\nh = 4.0/n\ninttype = 'linear'\na = 1/(M*h*h)\nx = np.linspace(start,stop,n+1)\nVx = np.array([-2.0,2.0])\nVy = np.array([0.0,0.0])\ngrid=griddata(Vx,Vy,x,method=inttype)\nfvalue=1\nlvalue=7\nd=grid+a\ne=np.zeros(n)+(-0.5)*a\nev=sclin.eigh_tridiagonal(d,e,select='a')\nprint(ev)\n\n\n\n\n\n \n \n\n \n\n\n\n\n","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"545893280","text":"from dice import dice_pool, dice_roll, d6_pool, Die\nfrom functools import reduce\nfrom interfaces import Characteristic, Skill\nimport math\nfrom random import sample\nimport csv\n\n# The damage bonus table can be found on page 33 of the 7th revised edition.\ndef damage_bonus_table(strength, size):\n key = strength + size\n damage_bonus = 0\n damage_bonus_string = None\n build = 0\n if key >= 165:\n number_d6 = math.ceil(((key-204)/80)+1)\n damage_bonus_string = f\"+{number_d6}d6\"\n damage_bonus = dice_roll(d6_pool(number_d6))\n build = number_d6 + 1\n elif key >= 125:\n damage_bonus_string = f\"+1d4\"\n damage_bonus = dice_roll(dice_pool(1,4))\n build = 1\n elif key >= 85:\n pass\n else:\n damage_bonus = -1 if key >= 65 else -2\n damage_bonus_string = str(damage_bonus)\n build = damage_bonus\n return {'damage-bonus': damage_bonus, 'build': build, 'dice': damage_bonus_string}\n# * being able to read in from a skill csv could be extended to read from a characteristics + skill csv\nclass Skill(Rollable):\n def __init__(self, name, score, check=False, rules=None, important=False):\n super().__init__(name, score)\n self.rules = rules\n self.important = important\n self.check = check\n\n def skill_improvement(self, check=True, roll=Die(100).result):\n if check == True:\n if roll > self.score:\n self.score += Die(6)\n\n def __str__(self):\n box = \"\\u25a1\"\n checked = \"\\u2611\"\n msg = f\"{box if self.check is False else checked} {self.name.title()}: {self.score}\"\n return msg\n\n def roll(self):\n result = Die(100).result\n outcome = True if (result <= self.score) else False\n if outcome is True:\n self.check = True\n return {\"outcome\": outcome, \"result\": result}\n\n @classmethod\n def from_tuple(cls, name_score_tuple):\n skill = super().from_tuple(name_score_tuple)\n return skill\n\nclass Characteristic(Rollable):\n def __init__(self, name, short_name=None, score=0, rules=None):\n if short_name == None:\n self.short_name = name[0:3]\n super().__init__(name, score)\n self.short_name = short_name.upper()\n\n def roll(self):\n result = Die(100).result\n outcome = True if (result <= self.score) else False\n if outcome is True:\n self.check = True\n return {\"outcome\": outcome, \"result\": result}\n \n def __str__(self):\n msg = f\"{self.short_name}: {self.score:>2}\"\n return msg\n def __repr__(self):\n msg = f\"{self.short_name}: {self.score:>2}\"\n return msg\nclass Investigator(object):\n \n @staticmethod\n def read_skill_dict(filename=\"skills.csv\"):\n skill_dict = {}\n with open(filename,newline='\\n') as csvfile:\n skill_reader = csv.reader(csvfile,delimiter=',',quotechar='|')\n for row in skill_reader:\n name = row[0]\n skill = Skill(name,int(row[1]),rules=row[2])\n skill_dict.update({name : skill})\n return skill_dict\n\n @staticmethod\n def calculate_movement_rate(strength, dexterity, size):\n rate = 0\n if dexterity < size and strength < size:\n rate = 7\n elif (strength >= size or dexterity >= size) or strength == size and dexterity == size:\n rate = 8\n elif strength > size and dexterity > size:\n rate = 9\n return Characteristic(\"Movement Rate\", \"MOV\", rate)\n \n\n def stat_dictionary(self):\n return {\n \"STR\": self.strength,\n \"CON\": self.constitution,\n \"POW\": self.power,\n \"DEX\": self.dexterity,\n \"APP\": self.appearance,\n \"INT\": self.intelligence,\n \"SIZ\": self.size,\n \"EDU\": self.education,\n }\n\n def raise_skill(self, skill_name, points):\n self.skill_dict[skill_name].add_points(points)\n\n def get_points(self, skill_name):\n ret = self.skill_dict[skill_name].points()\n return ret\n\n def apply_age_penalty(\n self, \n strength=None, \n constitution=None, \n size=None, \n dexterity=None, \n appearance=None, \n intelligence=None, \n power=None, \n education=None\n ):\n if strength == None:\n strength = self.strength\n if constitution == None:\n constitution = self.constitution\n if size == None:\n size = self.size\n if dexterity == None:\n dexterity = self.dexterity\n if appearance == None:\n appearance = self.appearance\n if intelligence == None:\n intelligence = self.intelligence\n if power == None:\n power = self.power\n if education == None:\n education = self.education\n if self.age >= 80:\n for _ in range(4):\n education.skill_improvement(check=True)\n self.strength = strength\n self.constitution = constitution\n self.dexterity = dexterity\n self.appearance.score -= 25\n self.movement_rate.score -= 5\n elif self.age >= 70:\n for _ in range(4):\n education.skill_improvement(check=True)\n self.strength = strength\n self.constitution = constitution\n self.dexterity = dexterity\n self.appearance.score -= 20\n self.movement_rate.score -= 4\n elif self.age >= 60:\n for _ in range(4):\n education.skill_improvement(check=True)\n self.strength = strength\n self.constitution = constitution\n self.dexterity = dexterity\n self.appearance.score -= 15\n self.movement_rate.score -= 3\n elif self.age >= 50:\n for _ in range(3):\n education.skill_improvement(check=True)\n self.strength = strength\n self.constitution = constitution\n self.dexterity = dexterity\n self.appearance.score -= 10\n self.movement_rate.score -= 2\n elif self.age >= 40:\n for _ in range(2):\n education.skill_improvement(check=True)\n self.strength = strength\n self.constitution = constitution\n self.dexterity = dexterity\n self.appearance.score -= 5\n self.movement_rate.score -= 1\n elif self.age >= 20:\n education.skill_improvement(check=True)\n elif self.age >= 15:\n self.size = size\n self.strength = strength\n self.education.score -= 5\n luck_roll = [d6_pool(3),d6_pool(3)]\n self.luck = max(dice_roll(x) for x in luck_roll) * 5\n self.damage_bonus_table = damage_bonus_table(strength, size)\n\n\n\n # TODO #8: make a new constructor for Skills and Characteristics to pull from a tuple like (Skill_name, score).\n # XXX: Characteristic could derive the short_name from the first 3 letters of the name\n # XXX: that will make it easier to implement rule packs and the csvs\n\n @classmethod\n def generate_random(cls, name,skill_dict):\n # RULES: STR, CON, POW, DEX, APP are all 3d6\n # RULES: INT and SIZ are 2d6 + 6\n # RULES: EDU is 2d6+6\n # RULES: under 7th edition rules I multiply them by 5\n # RULES: under the laundry files age is 17 + 2d6, but we are hard coding 7e for now\n\n age = sample(range(15,90),1) \n character = cls(\n name=name,\n strength=Characteristic(\"Strength\", \"STR\", dice_roll(d6_pool(3)) * 5),\n constitution=Characteristic(\"Constitution\", \"CON\", dice_roll(d6_pool(3)) * 5),\n size=Characteristic(\"Size\", \"SIZ\", dice_roll(d6_pool(2),6) * 5),\n dexterity=Characteristic(\"Dexterity\", \"DEX\", dice_roll(d6_pool(3)) * 5),\n appearance=Characteristic(\"Appearance\", \"APP\", dice_roll(d6_pool(3)) * 5),\n intelligence=Characteristic(\"Intelligence\", \"INT\", dice_roll(d6_pool(2),6) * 5),\n power=Characteristic(\"Power\", \"POW\", dice_roll(d6_pool(3)) * 5),\n education=Characteristic(\"Education\", \"EDU\", dice_roll(d6_pool(2),6) * 5),\n luck=Characteristic(\"Luck\", \"LUK\", dice_roll(d6_pool(3)) * 5),\n age=age,\n skill_dict=skill_dict,\n )\n # character.apply_age_penalty(age)\n\n\n return character\n\n\n def __init__(\n self,\n name,\n strength: Characteristic,\n constitution: Characteristic,\n power: Characteristic,\n dexterity: Characteristic,\n appearance: Characteristic,\n intelligence: Characteristic,\n size: Characteristic,\n education: Characteristic,\n luck: Characteristic,\n age: int,\n skill_dict,\n ):\n self.name = name\n # STR, CON, POW, DEX, APP are all 3d6\n self.strength = strength\n self.constitution = constitution\n self.power = power\n self.dexterity = dexterity\n self.appearance = appearance\n self.luck = luck\n\n # INT and SIZ are 2d6 + 6\n self.intelligence = intelligence\n self.size = size\n\n # EDU is 3d6+3\n self.education = education\n # Age is 17+2d6, but has a minimum of EDU + 5\n self.age = age\n self.apply_age_penalty(self.age)\n\n # Derived stats\n self.damage_bonus_table = damage_bonus_table(self.strength, self.size)\n self.max_hp = Characteristic(\"Hit Points\", \"HP\", score=math.floor((self.constitution + self.size)/10))\n self.current_hp = self.max_hp\n self.wound_threshold = math.ceil(self.max_hp.score / 2)\n self.movement_rate = Investigator.calculate_movement_rate(strength=self.strength,dexterity=self.dexterity,size=self.size)\n\n # Skills\n self.skill_dict = skill_dict\n\n def damage_roll(self, weapon_damage):\n damage_bonus = damage_bonus_table(self.strength, self.size)\n return weapon_damage + damage_bonus['damage-bonus']\n\n def __repr__(self):\n msg = (\n f\" Name: {self.name}\\n Age: {self.age:>2}\\n\"\n f\" {self.strength} | {self.intelligence}\\n\"\n f\" {self.constitution} | {self.power}\\n\"\n f\" {self.dexterity} | {self.appearance}\\n\"\n f\" {self.size} | {self.education}\\n\"\n f\" {self.current_hp}/{self.max_hp.score}\\n\"\n f\" {self.movement_rate}\"\n )\n skill_dict = self.get_skill_list(important=True)\n for key in skill_dict:\n msg += \"\\n\" + str(skill_dict[key])\n return msg\n\n def get_skill_list(self, important=False):\n if important is False:\n return [skill for skill in self.skill_dict]\n elif important is True:\n return [skill_dict[key] for key in self.skill_dict.keys() if skill_dict[key].important is True]\n\n def skill_roll(self, skill_name):\n skill = self.skill_dict[skill_name]\n return skill.roll()\n\n\nif __name__ == \"__main__\":\n print(\"Welcome to the Call of Cthulhu character generator.\")\n name = input(\"Please provide a name for your character: \")\n skill_dict = Investigator.read_skill_dict()\n # Now we will start creating a character.\n stat_block = {\n \"STR\" : Characteristic(\"Strength\", \"STR\", dice_roll(d6_pool(3)) * 5),\n \"CON\" : Characteristic(\"Constitution\", \"CON\", dice_roll(d6_pool(3)) * 5),\n \"SIZ\" : Characteristic(\"Size\", \"SIZ\", dice_roll(d6_pool(2),6) * 5),\n \"DEX\" : Characteristic(\"Dexterity\", \"DEX\", dice_roll(d6_pool(3)) * 5),\n \"APP\" : Characteristic(\"Appearance\", \"APP\", dice_roll(d6_pool(3)) * 5),\n \"INT\" : Characteristic(\"Intelligence\", \"INT\", dice_roll(d6_pool(2),6) * 5),\n \"POW\" : Characteristic(\"Power\", \"POW\", dice_roll(d6_pool(3)) * 5),\n \"EDU\" : Characteristic(\"Education\", \"EDU\", dice_roll(d6_pool(2),6) * 5),\n \"LUK\" : Characteristic(\"Luck\", \"LUK\", dice_roll(d6_pool(3)) * 5)\n }\n print(\"I have generated your stats in the background for now.\")\n age = int(input(\"Please enter an age for your investigator: \"))\n print(\"Thank you. This will have an effect on your investigator later on.\")\n leonard = Investigator(\n name=name, \n strength=stat_block['STR'], \n constitution=stat_block['CON'], \n size=stat_block['SIZ'],\n dexterity=stat_block['DEX'],\n appearance=stat_block['APP'],\n intelligence=stat_block['INT'],\n power=stat_block['POW'],\n education=stat_block['EDU'],\n luck=stat_block['LUK'],\n age=age,\n skill_dict=skill_dict\n )\n required_penalty = 0\n if age >= 80:\n required_penalty = 80\n elif age >= 70:\n required_penalty = 40\n elif age >= 60:\n required_penalty = 20\n elif age >= 50:\n required_penalty = 10\n elif age >= 40:\n required_penalty = 5 \n elif age >= 15:\n required_penalty = 0\n remaining_penalty = required_penalty\n while (remaining_penalty > 0):\n print(f\"You currently have to apply {remaining_penalty} out of {required_penalty} points.\")\n if age >= 20:\n stat_apply_to = input(\"Please choose between STR, CON, and DEX: \")\n else:\n stat_apply_to = input(\"Please choose between STR and SIZ: \").upper()\n penalty_to_apply = int(input(f\"Please input an amount no greater than {remaining_penalty}: \"))\n if penalty_to_apply >= remaining_penalty:\n penalty_to_apply = remaining_penalty\n if penalty_to_apply >= stat_block[stat_apply_to].score:\n penalty_to_apply = stat_block[stat_apply_to].score\n remaining_penalty -= penalty_to_apply\n stat_block[stat_apply_to].score -= penalty_to_apply\n leonard.apply_age_penalty(\n strength=stat_block['STR'], \n constitution=stat_block['CON'], \n size=stat_block['SIZ'], \n dexterity=stat_block['DEX'], \n appearance=stat_block['APP'], \n intelligence=stat_block['INT'], \n power=stat_block['POW'], \n education=stat_block['EDU']\n )\n print(leonard)\n occupation_name = input(\"Please provide a title for your occupation: \")\n # TODO #11 Find a way to import occupations\n # TODO #10 Find a way to generate credit scores\n print(f\"Now it is time to determine {name}'s occupation.\")\n credit_score = int(input(\"Please enter a credit score for your character: \"))\n occupation_skills = {}\n for _ in range(8):\n skill_name = input(\"Please provide a name for your skill: \")\n skill_base = input(\"Please input a base value for the skill: \")\n skill = Skill(skill_name, skill_base,important=True)\n occupation_skills.update({skill_name: skill})\n \n \n # TODO #5 implement Decide skills & allocate points step\n # update step for the skills we've been selecting\n leonard.skill_dict.update(occupation_skills)\n # TODO #4 implement Create a backstory step\n # TODO #3 implement Equip investigator step\n print(leonard)","sub_path":"coc.py","file_name":"coc.py","file_ext":"py","file_size_in_byte":15109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"587214400","text":"'''\r\nCreate a letter sequence graph from \"wordlist.txt\", store dict in \"LetterGraph.py\"\r\n\r\nFor each word in the text file \"wordlist.txt\", for each possible substring in the\r\nword, store all possible immediately chronological substrings in a list, and\r\nstore all prefix substrings as keys in a dict, with the suffix substring lists\r\nas the item.\r\ne.g. If the word is \"Dream\", the output is\r\n{\r\n \"d\":[\"r\",\"re\",\"rea\",\"ream\"],\r\n \"r\":[\"e\",\"ea\",\"eam\"],\r\n \"dr\":[\"e\",\"ea\",\"eam\"],\r\n \"e\":[\"a\",\"am\"],\r\n \"re\":[\"a\",\"am\"],\r\n \"dre\":[\"a\",\"am\"],\r\n \"a\":[\"m\"],\r\n \"ea\":[\"m\"],\r\n \"rea\":[\"m\"],\r\n \"drea\":[\"m\"]\r\n}\r\n\r\nusage:\r\n python LGGen.py [] []\r\n\r\nmaxPrefixSize: How many characters long the keys (in the dict stored in\r\n LetterGraph.py) can be. Defaults to math.inf\r\nmaxSuffixSize: How many characters an element in the item (in the dict stored in\r\n LetterGraph.py) can be. Defaults to math.inf\r\n\r\nNote:\r\n The larger you make maxPrefixSize and maxSuffixSize, the bigger the file\r\n size and the memory needed, but the more readable the words you make. The\r\n smaller these values, the smaller the file size and memory requirements, but\r\n fabricated words are less readable (Many consonants in a row).\r\n\r\n The default wordlist.txt file provided is the file 20k.txt from the Github\r\n repo\r\n https://github.com/first20hours/google-10000-english\r\n'''\r\n\r\nif __name__==\"__main__\":\r\n from string import ascii_lowercase as alc\r\n import sys\r\n from math import inf\r\n\r\n PREFIXSIZE = int(sys.argv[1]) if len(sys.argv)>=2 else inf\r\n SUFFIXSIZE = int(sys.argv[2]) if len(sys.argv)>=3 else inf\r\n\r\n print(PREFIXSIZE, SUFFIXSIZE)\r\n\r\n data = {}\r\n\r\n for i in alc:\r\n data[i] = set()\r\n\r\n with open(\"wordlist.txt\",'r') as FILEIN:\r\n for line in FILEIN.readlines():\r\n for i in range(len(line[:-2])):\r\n for j in range(min(PREFIXSIZE, i+1)):\r\n data[line[i-j:i+1].lower()] = data.get(line[i-j:i+1].lower(), set())\r\n for k in range(i+1,min(i+SUFFIXSIZE+1, len(line[:-1]))):\r\n data[line[i-j:i+1].lower()].add(line[i+1:k+1].lower())\r\n\r\n for i in data.keys():\r\n data[i] = list(data[i])\r\n data[i].sort()\r\n\r\n with open(\"LetterGraph.py\",'w') as FILEOUT:\r\n FILEOUT.flush()\r\n FILEOUT.write(\"data = {\\n\")\r\n for key in data.keys():\r\n FILEOUT.write(\"\\t\")\r\n FILEOUT.write(\"'\" + str(key) + \"'\")\r\n FILEOUT.write(\": \")\r\n FILEOUT.write(str(data[key]))\r\n FILEOUT.write(\",\\n\")\r\n FILEOUT.write(\"}\")\r\n","sub_path":"LGGen.py","file_name":"LGGen.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"278466974","text":"import tkinter as tk\nfrom datetime import datetime\nfrom glob import glob\nimport json\n\nWAGE = 14\n\nclass Application(tk.Frame):\n def __init__(self,master=None):\n super().__init__(master)\n self.master = master\n\n self.timing = False\n self.overtime = False\n self.start_time = datetime.now()\n\n self.time_elapsed_var = tk.DoubleVar()\n self.time_elapsed_hours = tk.DoubleVar()\n self.money_earned = tk.DoubleVar()\n\n self.todo_var = tk.StringVar()\n\n self.projects = glob('*.dat')\n\n self.project = tk.StringVar()\n self.project.trace('w', self.change_project)\n self.project.set(self.projects[0])\n\n self.create_layout()\n\n self.repeat()\n\n def save(self):\n with open(self.project.get(), mode='w') as file:\n file.write(json.dumps(self.data))\n\n def load(self):\n with open(self.project.get(),mode='r') as file:\n self.data = json.loads(file.read())\n print(\"Loaded:\",self.data)\n\n def change_project(self, *args):\n self.stop_timer()\n self.time_elapsed = 0.0\n try:\n self.load()\n self.populate_todo()\n if self.data['times']:\n for time in self.data['times']:\n self.time_elapsed += float(time)\n except FileNotFoundError:\n open(self.project.get(), mode='x')\n self.data = {'times':[],'todo':[]}\n self.time_elapsed_var.set(round(self.time_elapsed,2))\n self.time_elapsed_hours.set(round(self.time_elapsed/60/60,2))\n self.money_earned.set(round(WAGE*(self.time_elapsed/60/60),2))\n\n def start_timer(self):\n self.overtime = False\n x = not self.timing\n if x:\n self.start_time = datetime.now()\n self.timing = True\n return x\n\n def start_overtimer(self):\n if self.start_timer():\n self.overtime = True\n return True\n else:\n return False\n\n def stop_timer(self, cancel=False):\n if self.timing:\n self.timing = False\n if not cancel:\n dif = (datetime.now() - self.start_time)\n if self.overtime:\n dif *= 1.1\n self.time_elapsed += dif.total_seconds()\n self.data['times'].append(dif.total_seconds())\n self.save()\n self.update(True)\n\n def cancel_timer(self):\n self.stop_timer(cancel=True)\n\n def reset_timer(self):\n if self.timing:\n self.stop_timer()\n self.timing = False\n self.data['times'].append(-self.time_elapsed)\n self.save()\n self.time_elapsed = 0.0\n self.start_time = datetime.now()\n self.update(True)\n\n def repeat(self):\n self.update()\n self.after(10, self.repeat)\n\n def update(self, override=False):\n if self.timing or override:\n mul = 1\n if self.overtime: mul = 1.1;\n dif = ((self.start_time if override else datetime.now()) - self.start_time)\n seconds = self.time_elapsed + dif.total_seconds() * (1.1 if self.overtime else 1)\n hours = seconds / 60 / 60\n self.time_elapsed_var.set(round(seconds, 2))\n self.time_elapsed_hours.set(round(hours, 2))\n self.money_earned.set(round(WAGE * hours, 2))\n\n def add_todo(self):\n self.data['todo'].append(self.todo_var.get())\n self.populate_todo()\n self.save()\n\n def populate_todo(self):\n if hasattr(self, 'todo_listbox'):\n self.todo_listbox.delete(0, tk.END)\n for i in self.data['todo']:\n self.todo_listbox.insert(tk.END, i)\n\n def delete_todo(self,event):\n w = event.widget\n if len(w.curselection())>0:\n index = int(w.curselection()[0])\n value = w.get(index)\n self.data['todo'].remove(value)\n self.todo_listbox.delete(index,index)\n self.save()\n # print('You selected item {0};{1}'.format(index, value))\n\n def create_layout(self):\n # Top Level\n self.project_listbox = tk.OptionMenu(self,self.project,*self.projects)\n self.project_listbox.grid(row=0,column=0,sticky=tk.N+tk.E+tk.S+tk.W)\n\n self.todo_frame = tk.Frame(master=self)\n self.todo_frame.grid(row=0,column=1,rowspan=2)\n self.todo_frame.rowconfigure(0,weight=1,uniform='group2')\n self.todo_frame.columnconfigure(1, weight=1, uniform='group3')\n\n self.timer_frame = tk.Frame(master=self)\n self.timer_frame.grid(row=1,column=0)#,columnspan=2)\n self.timer_frame.configure(background='#FFFFFF')\n\n # Inside _TODO Frame\n self.todo_inputframe = tk.Frame(master=self.todo_frame)\n self.todo_inputframe.grid(row=0,column=0)\n\n self.todo_entry = tk.Entry(master=self.todo_inputframe,textvariable=self.todo_var)\n self.todo_entry.grid(row=0,column=0)\n self.todo_button = tk.Button(master=self.todo_inputframe,text='+',command=self.add_todo)\n self.todo_button.grid(row=0,column=1)\n\n self.todo_scrollframe = tk.Frame(master=self.todo_frame)\n self.todo_scrollframe.grid(row=1,column=0)\n\n self.todo_listbox = tk.Listbox(master=self.todo_scrollframe)\n self.todo_listbox.pack(fill=\"both\", side='left')\n\n self.todo_scrollbar = tk.Scrollbar(master=self.todo_scrollframe,orient='vertical',command=self.todo_listbox.yview)\n self.todo_scrollbar.pack(side='right',fill='y')\n self.todo_scrollbar.config(command=self.todo_listbox.yview)\n self.todo_listbox.config(yscrollcommand=self.todo_scrollbar.set)\n self.todo_listbox.bind('<>', self.delete_todo)\n\n self.populate_todo()\n\n # Inside Timer Frame\n self.display_frame = tk.Frame(master=self.timer_frame)\n self.display_frame.grid(row=0,column=0)\n self.display_frame.configure(background='#FFFFFF')\n self.display_frame.grid_columnconfigure(0, weight=1, uniform=\"group1\")\n self.display_frame.grid_columnconfigure(1, weight=1, uniform=\"group1\")\n\n self.button_frame = tk.Frame(master=self.timer_frame)\n self.button_frame.grid(row=1,column=0)\n self.button_frame.configure(background='#FFFFFF')\n\n # Inside Timer Frame -> Display Frame\n self.seconds_label = tk.Label(master=self.display_frame,text='Seconds:',anchor='e')\n self.seconds_label.grid(row=0,column=0,sticky=tk.N+tk.E+tk.S+tk.W)\n self.seconds_label.configure(background='#FFFFFF')\n self.time_elapsed_label_s = tk.Label(master=self.display_frame, textvariable=self.time_elapsed_var,anchor='w')\n self.time_elapsed_label_s.grid(row=0,column=1,sticky=tk.N+tk.E+tk.S+tk.W)\n self.time_elapsed_label_s.configure(background='#FFFFFF')\n\n self.hours_label = tk.Label(master=self.display_frame,text='Hours:',anchor='e')\n self.hours_label.grid(row=1,column=0,sticky=tk.N+tk.E+tk.S+tk.W)\n self.hours_label.configure(background='#FFFFFF')\n self.time_elapsed_label_h = tk.Label(master=self.display_frame, textvariable=self.time_elapsed_hours,anchor='w')\n self.time_elapsed_label_h.grid(row=1,column=1,sticky=tk.N+tk.E+tk.S+tk.W)\n self.time_elapsed_label_h.configure(background='#FFFFFF')\n\n self.pounds_label = tk.Label(master=self.display_frame,text='£',anchor='e')\n self.pounds_label.grid(row=2,column=0,sticky=tk.N+tk.E+tk.S+tk.W)\n self.pounds_label.configure(background='#FFFFFF')\n self.money_earned_label = tk.Label(master=self.display_frame, textvariable=self.money_earned,anchor='w')\n self.money_earned_label.grid(row=2,column=1,sticky=tk.N+tk.E+tk.S+tk.W)\n self.money_earned_label.configure(background='#FFFFFF')\n\n # Inside Timer Frame -> Button Frame\n self.start = tk.Button(master=self.button_frame, cnf={})\n self.start[\"text\"] = \"Start timing\";\n self.start[\"command\"] = self.start_timer;\n self.start.grid(row=0,column=0,sticky=tk.N+tk.E+tk.S+tk.W)\n self.start.configure(background='#00DD00',foreground='#FFFFFF')\n\n self.stop = tk.Button(master=self.button_frame, cnf={})\n self.stop[\"text\"] = \"Stop timing\";\n self.stop[\"command\"] = self.stop_timer;\n self.stop.grid(row=0,column=1,sticky=tk.N+tk.E+tk.S+tk.W)\n self.stop.configure(background='#DDAA00',foreground='#FFFFFF')\n\n self.over = tk.Button(master=self.button_frame, cnf={})\n self.over[\"text\"] = \"Overtimer\";\n self.over[\"command\"] = self.start_overtimer;\n self.over.grid(row=1,column=0,sticky=tk.N+tk.E+tk.S+tk.W)\n self.over.configure(background='#DD00EE',foreground='#FFFFFF')\n\n self.cancel = tk.Button(master=self.button_frame, cnf={})\n self.cancel[\"text\"] = \"Negate current sprint\";\n self.cancel[\"command\"] = self.cancel_timer;\n self.cancel.grid(row=1,column=1,sticky=tk.N+tk.E+tk.S+tk.W)\n self.cancel.configure(background='#DD0000',foreground='#FFFFFF')\n\n self.reset = tk.Button(master=self.button_frame, cnf={})\n self.reset[\"text\"] = \"Negate accumulated time\";\n self.reset[\"command\"] = self.reset_timer;\n self.reset.grid(row=2,column=0,columnspan=2,sticky=tk.N+tk.E+tk.S+tk.W)\n self.reset.configure(background='#0000DD',foreground='#FFFFFF')\n\n # self.refresh = tk.Button(master=self.timer_frame, cnf={})\n # self.refresh[\"text\"] = \"Refresh UI\";\n # self.refresh[\"command\"] = self.refresh_label();\n # self.refresh.pack(side=\"bottom\")\n\n # def refresh_label(self):\n # dif = (datetime.now() - self.start_time)\n # self.time_elapsed_label_s[\"text\"] = self.time_elapsed + dif.total_seconds()\n # self.time_elapsed_label_h[\"text\"] = (self.time_elapsed + dif.total_seconds())/60\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n app = Application(master=root)\n app.pack()\n app.mainloop(n=0)\n","sub_path":"timer2.py","file_name":"timer2.py","file_ext":"py","file_size_in_byte":9991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"290710152","text":"#!/usr/bin/env python\n\n# finds an approximation of a root using steffensen's Method\n# default iterations is 50\n\nimport sys\n\nif len(sys.argv) != 3 :\n print(\"steffensen [function] [seed]\")\n exit()\n\n# function:string, seed:number\ndef steffensenMethod (function, seed, precision, iterations) :\n\n # error check\n if precision == 0 :\n exit()\n \n # setup\n i = iterations\n h = precision\n x = seed\n f = lambda x : eval(function)\n g = lambda x : (f(x+h)-f(x))/h\n\n while (i > 0) :\n x -= f(x)/float(g(x))\n i -= 1\n return x\n \nfunction = sys.argv[1]\nseed = eval(sys.argv[2])\n\n# solution close to\nprint ((steffensenMethod(function,seed,0.1,50)))\n","sub_path":"old-code/steffensen_method/SteffensenMethod.py","file_name":"SteffensenMethod.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"597558898","text":"try:\n alphabet = \"абвгдеёжзийклмнопрстуфхцчшщъыьэюя\"\n message = str(input(\"Введите слово, которое нужно защифровать: \"))\n result = \"\"\n key = str(input(\"Введите ключ: \"))\n\n key *= (len(message) // len(key)) + 1\n\n for i, j in enumerate(message):\n tmp = alphabet.index(j) + alphabet.index(key[i])\n\n result += alphabet[tmp % 33 + 1]\n\n print(\"Зашифрованное слово:\", result)\n\nexcept ValueError:\n print(\"Вы ввели некорректные данные\")\n","sub_path":"vigenere_cipher.py","file_name":"vigenere_cipher.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"483583872","text":"import os\nimport pygame\nimport time\nimport signal\nimport sys\nfrom pyscope import pyscope\n\n# Constants\nFPS = 30 # Frames per second\nRESOLUTION = [540, 960]\n\n# Common colors\nWHITE = 255,255,255\nGREEN = 0,255,0\nBLACK = 0,0,0\nBLUE = 0,0,255\nRED = 255,0,0\n\n# Main\ndef main():\n global FPSCLOCK, mainloop\n mainloop = True\n\n # Setup display\n screen = pygame.display.set_mode(RESOLUTION)\n pygame.init()\n scope = pyscope()\n FPSCLOCK = pygame.time.Clock()\n pygame.mouse.set_visible(False)\n\n # Create background\n rect_game_mode = pygame.draw.rect(screen, WHITE, (20, 20, 60, 60), 0)\n \n # Main game loop\n while mainloop:\n drawScreen()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n mainloop = False\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_q or event.key == pygame.K_ESCAPE:\n mainloop = False\n pygame.display.update()\n FPSCLOCK.tick(FPS)\n \n pygame.quit()\n\ndef drawScreen():\n pass\n \n# Call main\nmain()\n","sub_path":"pytest/graphics_test_04.py","file_name":"graphics_test_04.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"528337856","text":"# BOUNDED VS CIRCULAR QUEUE DEQUEUE TIME COMPLEXITY ANALYSIS\n# RUDRANSH KUMAR\n# 2018/02/27\n# This program measures the time complexity of the dequeue methods of\n# a bounded queue versus a circular queue given the same items.\n# Queues must dequeue 15000 items, over 1000 trials\n\nfrom bqueue import BQueue\nfrom cqueue import CQueue\nfrom time import time\n\n# USER-DEFINED FUNCTION\n\ndef main():\n\n # runs main procedure as defined above\n # input: None\n # ouput: prints times to console\n\n boundedTotalTime = 0\n circularTotalTime = 0\n numOfTrials = len(range(1,1001))\n for test in range(1,1001):\n # define list of objects to be enqueued\n testObjects = range(1,15001)\n capacity = len(testObjects)\n bounded = BQueue(capacity)\n circular = CQueue(capacity)\n # enqueue each item in test list to both queues\n for testObject in testObjects:\n bounded.enqueue(testObject)\n circular.enqueue(testObject)\n # start bouneded timing only for dequeue\n boundedStart = time()\n for i in range(capacity):\n bounded.dequeue()\n # end bouded timing right after dequeue is finished for all items\n boundedEnd = time()\n # start circular timing only for dequeue\n circularStart = time()\n for j in range(capacity):\n circular.dequeue()\n # end circular timing right after dequeue is finished for all items\n circularEnd = time()\n # calculate time intervals\n boundedTotalTime += (boundedEnd - boundedStart)\n circularTotalTime =+ (circularEnd - circularStart)\n boundedReport = \\\n ('Over {} trials, the bounded queue took a total time of {} s' + \\\n ' to dequeue {} items, which averages to {} s/trial')\\\n .format(numOfTrials,boundedTotalTime,len(testObjects),\n boundedTotalTime/numOfTrials)\n circularReport = \\\n ('Over {} trials, the circular queue took a total time of {} s to ' + \\\n 'dequeue {} items, which averages to {} s/trial')\\\n .format(numOfTrials,circularTotalTime,len(testObjects),\n circularTotalTime/numOfTrials)\n print(boundedReport)\n print(circularReport)\n\n# RUN PROGRAM\nmain()\n","sub_path":"lab_exercises/lab_6.1/bqueue_vs_cqueue.py","file_name":"bqueue_vs_cqueue.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"77459924","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n # 유저 로그인/로그아웃/회원가입\n path('user_editor/', views.user_edit ,name=\"user_editor\"),\n path('user_login/', views.user_login ,name=\"user_login\"),\n path('user_logout/', views.user_logout, name=\"user_logout\"),\n path('user_signup/', views.user_signup, name=\"user_signup\"),\n path('portfolio_detail/', views.portfolio_detail.as_view(), name=\"portfolio_detail\"), # 유저 포트폴리오 개인 페이지 이동\n path('portfolio/', views.portfolio_list.as_view(),name=\"portfolio_list\"), # 유저 포트폴리오 리스트\n path('portfolio_search/', views.portfolio_searching,name=\"search_list\"), # 유저 포트폴리오 리스트\n]","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"113769111","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport argparse\nfrom fastai.vision import *\nfrom tqdm import tqdm\nfrom pathlib import Path\nimport pandas as pd\nimport os\nimport sys\n\nfrom fastai.callbacks import CSVLogger\n\n# suppress anoying and irrelevant warning, see https://forums.fast.ai/t/warnings-when-trying-to-make-an-imagedatabunch/56323/9\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=UserWarning, module=\"torch.nn.functional\")\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--size', dest='size', help='scale images to size', default=256, type=int)\nparser.add_argument('--bs', dest='bs', help='batch size', default=32, type=int)\nparser.add_argument('--cuda_device', dest='cuda_device', help='cuda device index', default=0, type=int)\nparser.add_argument('--confidence', dest='confidence', help='confidence cutoff in percent', default=10, type=int)\nparser.add_argument('--model', dest='model', help='model, one of resnet34, resnet50, vgg16', default='resnet34', type=str)\nparser.add_argument('--tfms', dest='tfms', help='transformations, one of the presets no, normal, extreme', default='normal', type=str)\nparser.add_argument('--loss', dest='loss', help='loss function, one of the presets ce, focal, softdice', default='ce', type=str)\nargs = parser.parse_args()\n\nour_models = {\"resnet34\": models.resnet34, \"resnet50\": models.resnet50, \"vgg16\": models.vgg16_bn}\nour_tfms = {\n \"no\": None,\n \"normal\": get_transforms(do_flip=False,max_rotate=20,max_lighting=.4,max_zoom=1.2),\n \"extreme\": get_transforms(do_flip=True,max_rotate=90,max_lighting=.4,max_zoom=1.2)\n}\n\nif args.loss not in [\"ce\", \"focal\", \"softdice\"]:\n sys.exit(\"Unknown loss function\") \nsize = args.size\nbs = args.bs\ncuda_device = args.cuda_device\nconfidence_cutoff = args.confidence/100\nmodel = our_models[args.model]\ntfms = our_tfms[args.tfms]\nname = \"noPretrain_{}_{}percent_size{}_{}Tfms_{}Loss\".format(args.model,args.confidence,size,args.tfms,args.loss)\n\ntorch.cuda.set_device(cuda_device)\n\nos.mkdir(name)\n\nget_y_fn = lambda x: str(x).replace(\"images\", \"masks_2class\")\n\nimgList = pd.read_csv(\"nifti/image_list_filtered_score.tsv\", sep=\"\\t\")\nfilteredList = imgList[imgList.score<=confidence_cutoff]\n\nsrc = (SegmentationItemList.from_df(filteredList,path=\"nifti\",cols=\"file\")\n .split_from_df(col='is_val')\n .label_from_func(get_y_fn, classes=np.array([\"background\",\"left_ventricle\",\"myocardium\"])))\n\ndata = (src.transform(tfms,size=size,padding_mode=\"zeros\",resize_method=ResizeMethod.PAD,tfm_y=True)\n .databunch(bs=bs)\n .normalize(imagenet_stats))\n\ndef acc_seg(input, target):\n target = target.squeeze(1)\n return (input.argmax(dim=1)==target).float().mean()\n\ndef multi_dice(input:Tensor, targs:Tensor, class_id=0, inverse=False)->Rank0Tensor:\n n = targs.shape[0]\n input = input.argmax(dim=1).view(n,-1)\n # replace all with class_id with 1 all else with 0 to have binary case\n output = (input == class_id).float()\n # same for targs\n targs = (targs.view(n,-1) == class_id).float()\n if inverse:\n output = 1 - output\n targs = 1 - targs\n intersect = (output * targs).sum(dim=1).float()\n union = (output+targs).sum(dim=1).float()\n res = 2. * intersect / union\n res[torch.isnan(res)] = 1\n return res.mean()\n\ndice0inv = partial(multi_dice, class_id=0, inverse=True)\ndice1 = partial(multi_dice, class_id=1)\ndice2 = partial(multi_dice, class_id=2)\ndice0inv.__name__ = 'diceComb'\ndice1.__name__ = 'diceLV'\ndice2.__name__ = 'diceMY'\n\nclass SoftDiceLoss(nn.Module):\n ''' \n WARNING: this implementation does not work in our case, assumes one hot and channel last - need to restructure or re-write\n Soft dice loss calculation for arbitrary batch size, number of classes, and number of spatial dimensions.\n Assumes the `channels_last` format.\n # Arguments\n targets: b x X x Y( x Z...) x c One hot encoding of ground truth\n inputs: b x X x Y( x Z...) x c Network output, must sum to 1 over c channel (such as after softmax) \n epsilon: Used for numerical stability to avoid divide by zero errors\n # References\n https://www.jeremyjordan.me/semantic-segmentation/ (https://gist.github.com/jeremyjordan/9ea3032a32909f71dd2ab35fe3bacc08#file-soft_dice_loss-py)\n V-Net: Fully Convolutional Neural Networks for Volumetric Medical Image Segmentation \n https://arxiv.org/abs/1606.04797\n More details on Dice loss formulation \n https://mediatum.ub.tum.de/doc/1395260/1395260.pdf (page 72)\n Adapted from https://github.com/Lasagne/Recipes/issues/99#issuecomment-347775022\n '''\n def __init__(self, epsilon=1e-8):\n super().__init__()\n self.epsilon = epsilon\n\n def forward(self, inputs, targets):\n # skip the batch and class axis for calculating Dice score\n print(inputs.shape)\n print(inputs)\n print(targets.shape)\n print(targets)\n axes = tuple(range(1, len(inputs.shape)-1)) \n numerator = 2. * np.sum(inputs * targets, axes)\n denominator = np.sum(np.square(inputs) + np.square(targets), axes)\n return 1 - np.mean(numerator / (denominator + self.epsilon)) # average over classes and batch\n\n# adjusted from https://forums.fast.ai/t/loss-function-of-unet-learner-flattenedloss-of-crossentropyloss/51605\nclass FocalLoss(nn.Module):\n def __init__(self, gamma=2., reduction='mean'):\n super().__init__()\n self.gamma = gamma\n self.reduction = reduction\n\n def forward(self, inputs, targets):\n CE_loss = CrossEntropyFlat(axis=1,reduction='none')(inputs, targets)\n pt = torch.exp(-CE_loss)\n F_loss = ((1 - pt)**self.gamma) * CE_loss\n if self.reduction == 'sum':\n return F_loss.sum()\n elif self.reduction == 'mean':\n return F_loss.mean()\n\nlearn = unet_learner(data, model, pretrained=False, metrics=[acc_seg,dice0inv,dice1,dice2], callback_fns=[partial(CSVLogger, append=True, filename=\"train_log\")], path=name)\n\nif args.loss == \"focal\":\n learn.loss_func = FocalLoss()\nif args.loss == \"softdice\":\n learn.loss_func = SoftDiceLoss()\n\nlearn.unfreeze()\n\nlr_find(learn)\nfig = learn.recorder.plot(return_fig=True)\nfig.savefig(name+\"/lrfind_unfreeze.png\")\n\nlr=1e-5\nlearn.fit_one_cycle(5, lr)\nlearn.save(name+'-unfreeze-5')\nfig = learn.recorder.plot_losses(return_fig=True)\nfig.savefig(name+\"/unfreeze-5.png\")\n\nlearn.fit_one_cycle(10, lr)\nlearn.save(name+'-unfreeze-15')\nfig = learn.recorder.plot_losses(return_fig=True)\nfig.savefig(name+\"/unfreeze-15.png\")\n\nlearn.fit_one_cycle(15, lr)\nlearn.save(name+'-unfreeze-30')\nfig = learn.recorder.plot_losses(return_fig=True)\nfig.savefig(name+\"/unfreeze-30.png\")\n\nlearn.export('model.pkl')\n\n# Make Predictions\n\nfullImgList = pd.read_csv(\"nifti/image_list.tsv\", sep=\"\\t\", header=None, names=[\"pid\",\"file\"])\n\npixelTable = pd.DataFrame({'file': [], 'lv_pixels': [], 'my_pixels': []})\nfor i in tqdm(range(int(fullImgList.shape[0]/10000)+1)):\n imgInBatch = fullImgList[(10000*i):(10000*(i+1))]\n trainedModel = load_learner(name, 'model.pkl')\n trainedModel.data.add_test(SegmentationItemList.from_df(imgInBatch,path=\"nifti\",cols=\"file\"), tfm_y=False)\n predictions,_=trainedModel.get_preds(DatasetType.Test)\n predictions = predictions.argmax(dim=1)\n lv_pixels = (predictions==1).sum(dim=(1,2))\n my_pixels = (predictions==2).sum(dim=(1,2))\n pixelTable = pd.concat([pixelTable, pd.DataFrame({'file': trainedModel.data.test_ds.items, 'lv_pixels': lv_pixels, 'my_pixels': my_pixels})])\n\npixelTable.to_csv(name+\"/predictions.tsv\",sep=\"\\t\",index=False)\n\n","sub_path":"code/kaggle/train_fastai_segmentation_noPretrain.py","file_name":"train_fastai_segmentation_noPretrain.py","file_ext":"py","file_size_in_byte":7591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"197254391","text":"import sys\nfrom PyQt5.QtWidgets import *\n\napp = QApplication(sys.argv)\nwin = QWidget()\nwin.resize(1200,800)\nwin.move(0,0)\nwin.show()\nwin.setWindowTitle('this is title')\n\nsys.exit(app.exec_())","sub_path":"lesson1.py","file_name":"lesson1.py","file_ext":"py","file_size_in_byte":191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"521227948","text":"import csv\nfrom meritsapi.models import *\nfrom datetime import datetime\n\ndef getPathOfTodaysHoldingStatement(timenow):\n holding_statement = HoldingStatementsLog.objects.get(uploaded_at=timenow)\n return holding_statement.file\n\ndef importHoldingStatements(pathToFile):\n with open(pathToFile, 'rb') as csvfile:\n spamreader = csv.reader(csvfile, delimiter='|')\n for row in spamreader:\n holding = Holding()\n i=0\n uid = 0\n for column in row:\n if(i==0):\n user = Users.objects.get(client_id=column)\n holding.appuser_id = user.id\n if(i==1):\n holding.symbol = column\n if(i==2):\n holding.type = column\n if(i==3):\n holding.quantity = column\n i+=1\n if(i>3):\n break\n holding.save()\n\n\"\"\"\n Algorithm To Import data from CSV into MySQL Database\n 1. Upload File\n 2. onClick('Save')\n read csv to spamreader\n for row in spamreader:\n holding = new Holding()\n i=0\n uid = 0\n for column in row:\n if(i==0):\n user = Users.objects.get(client_id=column)\n holding.appuser_id = user.id\n if(i==1):\n holding.symbol = column\n if(i==2):\n holding.type = column\n if(i==3):\n holding.quantity = column\n i+=1\n if(i>3):\n break\n holding.save() \n \n\"\"\"\n\n \n ","sub_path":"meritsapi/importer.py","file_name":"importer.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"303804167","text":"import numpy as np\nimport scipy.stats\nimport tqdm\n\nfrom scipy.special import gammaln, digamma, betaln\n\n\ndef calculate_pprob_from_evidences(pd1, pd2, priors=None):\n if priors is None:\n # p(m|d) = p(d | m) * p(m) / (sum_ p(d|m_i)p(m))))\n # because the prior is uniform we just return the normalized evidence:\n return pd1 / (pd1 + pd2)\n else:\n # p(m|d) = p(d | m) * p(m) / (sum_ p(d|m_i)p(m))))\n return pd1 * priors[0] / (pd1 * priors[0] + pd2 * priors[1])\n\n\ndef poisson_evidence(x, k, theta, log=False):\n \"\"\"\n Calculate evidence of a sample x under a Poisson model with Gamma prior.\n\n E(x) = gamma(k + sx) / ( gamma(k) * theta**k ) * (N + 1/theta)**(-k - sx) / prod x_i!\n\n Parameters\n ----------\n x : array-like\n batch of samples, shape (batch_size, )\n k : float\n shape parameter of the gamma prior\n theta: float\n scale parameter of the gamma prior\n log: bool\n if True, the log evidence is returned\n\n Returns\n -------\n log_evidence: float\n the log evidence (if log=True) or the evidence of the data\n \"\"\"\n sx = np.sum(x)\n n_batch = x.size\n\n log_evidence = gammaln(k + sx) - (gammaln(k) + k * np.log(theta)) - (k + sx) * np.log(n_batch + theta ** -1) \\\n - np.sum(gammaln(np.array(x) + 1))\n\n return log_evidence if log else np.exp(log_evidence)\n\n\ndef calculate_nb_evidence(x, k_k, theta_k, k_theta, theta_theta, log=False):\n # set up a grid of values around the priors\n # take grid over the whole range of the priors\n\n k_start = scipy.stats.gamma.ppf(1e-8, a=k_k)\n k_end = scipy.stats.gamma.ppf(1 - 1e-8, a=k_k)\n\n theta_start = scipy.stats.gamma.ppf(1e-8, a=k_theta)\n theta_end = scipy.stats.gamma.ppf(1 - 1e-8, a=k_theta)\n\n # set priors\n prior_k = scipy.stats.gamma(a=k_k, scale=theta_k)\n prior_theta = scipy.stats.gamma(a=k_theta, scale=theta_theta)\n\n (evidence, err) = scipy.integrate.dblquad(func=nb_evidence_integrant_direct,\n a=theta_start / (1 + theta_start),\n b=theta_end / (1 + theta_end),\n gfun=lambda x: k_start, hfun=lambda x: k_end,\n args=[x, prior_k, prior_theta])\n\n return np.log(evidence) if log else evidence\n\ndef nbinom_pmf(k, r, p):\n \"\"\"\n Calculate pmf values according to Wikipedia definition of the negative binomial distribution:\n p(X=x | r, p = (x + r - 1)choose(x) p^x (1 - p)^r\n \"\"\"\n\n return scipy.special.binom(k + r - 1, k) * np.power(p, k) * np.power(1-p, r)\n\n\ndef nb_evidence_integrant_direct(r, p, x, prior_k, prior_theta):\n \"\"\"\n Negative Binomial marginal likelihood integrant: NB likelihood times prior pds values for given set of prior params\n \"\"\"\n # set prior params for direct NB given params for indirect Poisson-Gamma mixture (Gamma priors on k and theta)\n\n # get pdf values\n pr = prior_k.pdf(r)\n # do change of variables or not?\n pp = np.power(1 - p, -2) * prior_theta.pdf(p / (1 - p))\n\n value = np.log(nbinom_pmf(x, r, p)).sum() + np.log(pr) + np.log(pp)\n\n return np.exp(value)\n\n\ndef calculate_pprob_from_evidences(pd1, pd2, priors=None):\n if priors is None:\n # p(m|d) = p(d | m) * p(m) / (sum_ p(d|m_i)p(m))))\n # because the prior is uniform we just return the normalized evidence:\n return pd1 / (pd1 + pd2)\n else:\n # p(m|d) = p(d | m) * p(m) / (sum_ p(d|m_i)p(m))))\n return pd1 * priors[0] / (pd1 * priors[0] + pd2 * priors[1])\n\n\ndef poisson_evidence(x, k, theta, log=False):\n \"\"\"\n Calculate evidence of a sample x under a Poisson model with Gamma prior.\n\n E(x) = gamma(k + sx) / ( gamma(k) * theta**k ) * (N + 1/theta)**(-k - sx) / prod x_i!\n\n Parameters\n ----------\n x : array-like\n batch of samples, shape (batch_size, )\n k : float\n shape parameter of the gamma prior\n theta: float\n scale parameter of the gamma prior\n log: bool\n if True, the log evidence is returned\n\n Returns\n -------\n log_evidence: float\n the log evidence (if log=True) or the evidence of the data\n \"\"\"\n sx = np.sum(x)\n n_batch = x.size\n\n log_evidence = gammaln(k + sx) - (gammaln(k) + k * np.log(theta)) - (k + sx) * np.log(n_batch + theta ** -1) \\\n - np.sum(gammaln(np.array(x) + 1))\n\n return log_evidence if log else np.exp(log_evidence)\n\n\ndef poisson_sum_evidence(x, k, theta, log=True):\n \"\"\"\n Calculate the evidence of the summary statistics of a sample under a Poisson model with Gamma prior.\n Given a batch of samples calculate the evidence (marginal likelihood) of the sufficient statistics\n (sum over the sample). Note the difference ot the poisson_evidence() method that calculates the evidence of the\n whole data sample.\n\n E(sx) = gamma(k + sx) / ( gamma(k) * (N*theta)**k ) * (1 + 1/(N*theta))**(-k - sx) / (sum x_i)!\n\n Parameters\n ----------\n x : array-like\n batch of samples, shape (batch_size, )\n k : float\n shape parameter of the gamma prior\n theta: float\n scale parameter of the gamma prior\n log: bool\n if True, the log evidence is returned\n\n Returns\n -------\n log_evidence: float\n the log evidence (if log=True) or the evidence of the data\n \"\"\"\n\n n_batch = x.size\n sx = np.sum(x)\n\n result = -k * np.log(theta * n_batch) - gammaln(k) - gammaln(sx + 1) + gammaln(k + sx) - \\\n (k + sx) * np.log(1 + (theta * n_batch)**-1)\n\n return result if log else np.exp(result)\n\n\ndef nbinom_evidence(x, r, a, b, log=False):\n \"\"\"\n Calculate the evidence of a sample x under a negative binomial model with fixed r and beta prior on the success\n probability p.\n\n E(x) = \\prod gamma(x_i +r) / ( gamma(x_i + 1) * gamma(r)**N ) * B(a + sx, b + Nr) / B(a, b)\n\n Parameters\n ----------\n x : array-like\n batch of samples, shape (batch_size, )\n r : int\n number of successes of the nbinom process\n a: float\n shape parameter alpha of the beta prior\n b: float\n shape parameter beta of the beta prior\n log: bool\n if True, the log evidence is returned\n\n Returns\n -------\n log_evidence: float\n the log evidence (if log=True) or the evidence of the data\n \"\"\"\n b_batch = x.size\n sx = np.sum(x)\n\n fac = np.sum(gammaln(np.array(x) + r) - (gammaln(np.array(x) + 1) + gammaln([r])))\n log_evidence = fac + betaln(a + sx, b + b_batch * r) - betaln(a, b)\n\n return log_evidence if log else np.exp(log_evidence)\n\n\ndef nbinom_evidence_scipy(x, r, a, b, log=False):\n b_batch = x.size\n sx = np.sum(x)\n\n fac = np.sum(gammaln(np.array(x) + r) - (gammaln(np.array(x) + 1) + gammaln([r])))\n log_evidence = fac + betaln(a + b_batch * r, b + sx) - betaln(a, b)\n\n return log_evidence if log else np.exp(log_evidence)\n\n\ndef nbinom_sum_evidence(x, r, a, b, log=True):\n\n \"\"\"\n Calculate the evidence of a the sufficient statistics sx of a sample x under a negative binomial model with fixed\n r and beta prior on the success probability p.\n\n E(sx) = binom(sx + Nr - 1, sx) * B(a + sx, b + Nr) / B(a, b)\n\n Parameters\n ----------\n x : array-like\n batch of samples, shape (batch_size, )\n r : int\n number of successes of the nbinom process\n a: float\n shape parameter alpha of the beta prior\n b: float\n shape parameter beta of the beta prior\n log: bool\n if True, the log evidence is returned\n\n Returns\n -------\n log_evidence: float\n the log evidence (if log=True) or the evidence of the data\n \"\"\"\n\n N = x.size\n sx = np.sum(x)\n bc = scipy.special.binom(sx + N * r - 1, sx)\n\n log_evidence = np.log(bc) + betaln(a + sx, b + N * r) - betaln(a, b)\n\n return log_evidence if log else np.exp(log_evidence)\n\n\ndef calculate_gamma_dkl(k1, theta1, k2, theta2):\n return (k1 - k2) * digamma(k1) - gammaln(k1) + gammaln(k2) + \\\n k2 * (np.log(theta2) - np.log(theta1)) + k1 * (theta1 - theta2) / theta2\n\n\ndef calculate_dkl_1D_scipy(p_pdf_array, q_pdf_array):\n \"\"\"\n Calculate DKL from array of pdf values.\n\n The arrays should cover as much of the range as possible.\n :param p_pdf_array:\n :param q_pdf_array:\n :return:\n \"\"\"\n return scipy.stats.entropy(pk=p_pdf_array, qk=q_pdf_array)\n\n\ndef calculate_dkl_monte_carlo(x, p_pdf, q_pdf):\n \"\"\"\n Estimate the DKL between 1D RV p and q.\n\n :param x: samples from p\n :param p_pdf: pdf function for p\n :param q_pdf: pdf function for q\n :return: estimate of dkl, standard error\n \"\"\"\n\n # eval those under p and q\n pp = p_pdf(x)\n pq = q_pdf(x)\n\n # estimate expectation of log\n log = np.log(pp) - np.log(pq)\n dkl = log.mean()\n # estimate the standard error\n stderr = log.std(ddof=1) / np.sqrt(x.shape[0])\n\n return dkl, stderr\n\n\ndef calculate_dkl(p, q):\n \"\"\"\n Calculate dkl between p and q.\n :param p: scipy stats object with .pdf() and .ppf() methods\n :param q: delfi.distribution object with .eval() method\n :return: dkl(p, q)\n \"\"\"\n # parameter range\n p_start = p.ppf(1e-9)\n p_end = p.ppf(1 - 1e-9)\n\n # integral function\n def integrant(x):\n log_pp = p.logpdf(x)\n log_pq = q.eval(np.reshape(x, (1, -1)), log=True)\n return np.exp(log_pp) * (log_pp - log_pq)\n\n (dkl, err) = scipy.integrate.quad(integrant, a=p_start, b=p_end)\n return dkl\n\n\ndef calculate_credible_intervals_success(theta, ppf_fun, intervals, args=None):\n \"\"\"\n Calculate credible intervals given a true parameter value and a percent point function of a distribution\n :param theta: true parameter\n :param ppf_fun: percent point function (inverse CDF)\n :param intervals: array-like, credible intervals to be calculated\n :param args: arguments to the ppf function\n :return: a binary vector, same length as intervals, indicating whether the true parameter lies in that interval\n \"\"\"\n tails = (1 - intervals) / 2\n\n # get the boundaries of the credible intervals\n lows, highs = ppf_fun(tails, *args), ppf_fun(1 - tails, *args)\n success = np.ones_like(intervals) * np.logical_and(lows <= theta, theta <= highs)\n\n return success\n\n\ndef check_credible_regions(theta_o, cdf_fun, credible_regions):\n\n q = cdf_fun(theta_o)\n\n if q > 0.5:\n # the mass in the CR is 1 - how much mass is above times 2\n cr_mass = 1 - 2 * (1 - q)\n else:\n # or 1 - how much mass is below, times 2\n cr_mass = 1 - 2 * q\n counts = np.ones_like(credible_regions) * (credible_regions > cr_mass)\n return counts\n\n\ndef calculate_ppf_from_samples(qs, samples):\n \"\"\"\n Given quantiles and samples, calculate values corresponding to the quantiles by approximating the\n MoG inverse CDF from samples.\n :param qs: quantiles, array-like\n :param samples: number of samples used to for sampling\n :return: corresponding values, array-like\n \"\"\"\n\n qs = np.atleast_1d(qs)\n values = np.zeros_like(qs)\n\n # use bins from min to max\n bins = np.linspace(samples.min(), samples.max(), 1000)\n # asign samples to bins\n bin_idx = np.digitize(samples, bins)\n # count samples per bin --> histogram\n n = np.bincount(bin_idx.squeeze())\n # take the normalized cum sum as the cdf\n cdf = np.cumsum(n) / np.sum(n)\n\n # for every quantile, get the corresponding value on the cdf\n for i, qi in enumerate(qs):\n quantile_idx = np.where(cdf >= qi)[0][0]\n values[i] = bins[quantile_idx]\n\n return values\n\n\ndef inverse_transform_sampling_1d(array, pdf_array, n_samples):\n \"\"\"\n Generate samples from an arbitrary 1D distribution given an array of pdf values. Using inverse transform sampling.\n\n Calculates CDF by summing up values in the PDF. Assumes values in array and PDF are spaced uniformly.\n\n :param array: array of RV values covering a representative range\n :param pdf_array: the corresponding PDF values of the values in 'array'.\n :param n_samples: number of samples to generate\n :return: array-like, array of pseudo-randomly generated sampled.\n \"\"\"\n uniform_samples = scipy.stats.uniform.rvs(size=n_samples)\n samples = np.zeros(n_samples)\n # calculate the cdf by taking the cumsum and normaliying by dt\n cdf = np.cumsum(pdf_array) * (array[1] - array[0])\n\n for i, s in enumerate(uniform_samples):\n # find idx in cmf\n idx = np.where(cdf >= s)[0][0]\n # add the corresponding value\n samples[i] = array[idx]\n\n return samples\n\n\ndef inverse_transform_sampling_2d(x1, x2, joint_pdf, n_samples):\n \"\"\"\n Generate samples from an arbitrary 2D distribution f(x, y) given a matrix of joint density values.\n\n Using 2D inverse transform sampling: Calculate the marginal p(x1) and the condition p(x2 | x1). Generate\n pseudo random samples from the x1 marginal. Then generate pseudo-random samples from the conditional, each sample\n conditioned on a x1 sample of the previos step.\n :param x1: values of RV x1\n :param x2: values of RV x2\n :param joint_pdf: 2D array of PDF values corresponding to the bins defined in x1 and x2\n :param n_samples: number of samples to draw\n :return: np array with samples (n_samples, 2)\n \"\"\"\n\n # calculate marginal of x1 by integrating over x2\n x1_pdf = np.trapz(joint_pdf, x=x2, axis=1)\n\n # sample from marginal\n samples_x1 = inverse_transform_sampling_1d(x1, x1_pdf, n_samples)\n\n # calculate the conditional of x2 given x1 using Bayes rule\n # this gives a matrix of pdf, one for each values of x1 that we condition on.\n x2_pdf = np.zeros_like(joint_pdf)\n x2_cdf = np.zeros_like(joint_pdf)\n # condition on every x1\n for i in range(x1.size):\n # conditioned on this x1, apply Bayes\n px1 = x1_pdf[i] if x1_pdf[i] > 0. else 1e-12\n x2_pdf[i, ] = joint_pdf[i, :] / px1\n # get the corresponding cdf by cumsum and normalization\n x2_cdf[i, ] = np.cumsum(x2_pdf[i,])\n x2_cdf[i, ] /= np.max(x2_cdf[i,])\n assert np.isclose(x2_cdf[i, 0], 0, atol=1e-5), 'cdf should go from 0 to 1, {}'.format(x2_cdf[i, 0])\n assert np.isclose(x2_cdf[i, -1], 1, atol=1e-5), 'cdf should go from 0 to 1, {}'.format(x2_cdf[i, 0])\n\n # sample new uniform numbers\n uniform_samples = scipy.stats.uniform.rvs(size=n_samples)\n\n samples_x2 = []\n for uni_sample, x1_sample in zip(uniform_samples, samples_x1):\n # get the index of the x1 sample for conditioning\n idx_x1 = np.where(x1 >= x1_sample)[0][0]\n # find idx in conditional cmf\n idx_u = np.where(x2_cdf[idx_x1,] >= uni_sample)[0][0]\n\n # add the corresponding value\n samples_x2.append(x2[idx_u])\n\n return np.vstack((samples_x1, np.array(samples_x2))).T\n\n\nclass NBExactPosterior:\n \"\"\"\n Class for the exact NB posterior. Defined by observed data and priors on k and theta, the shape and scale of the\n Gamma distribution in the Poisson-Gamma mixture.\n\n Has methods to calculate the exact posterior in terms of a joint pdf matrix using numerical integration.\n And methods to evaluate and to generate samples under this pdf.\n\n Once the posterior is calculated and samples are generated, it has properties mean and std to be compared to the\n predicted posterior.\n \"\"\"\n\n def __init__(self, x, prior_k, prior_theta):\n \"\"\"\n Instantiate the posterior with data and priors. the actual posterior has to be calculate using\n calculate_exact_posterior()\n :param x: observed data, array of counts\n :param prior_k: scipy.stats.gamma object\n :param prior_theta: scipy.stats.gamma object\n \"\"\"\n\n # set flags\n self.samples_generated = False # whether mean and std are defined\n self.calculated = False # whether exact solution has been calculated\n\n self.xo = x\n self.prior_k = prior_k\n self.prior_th = prior_theta\n\n # prelocate\n self.evidence = None\n self.joint_pdf = None\n self.joint_cdf = None\n self.ks = None\n self.thetas = None\n\n self.samples = []\n\n def calculat_exact_posterior(self, theta_o, n_samples=200, prec=1e-6, verbose=True):\n \"\"\"\n Calculate the exact posterior.\n :param theta_o: the true parameter theta\n :param n_samples: the number of entries per dimension on the joint_pdf grid\n :param prec: precision for the range of prior values\n :return: No return\n \"\"\"\n\n # if not calculated\n if not self.calculated:\n self.calculated = True\n # set up a grid. take into account the true theta value to cover the region around it in the posterior\n # get the quantiles of the true theto under the prior\n k_pos = self.prior_k.cdf(theta_o[0])\n th_pos = self.prior_th.cdf(theta_o[1])\n\n # set the tail around it,\n tail = 0.8\n # choose ranges such that there are enough left and right of the true theta, use prec for bounds\n self.ks = np.linspace(self.prior_k.ppf(np.max((prec, k_pos - tail))),\n self.prior_k.ppf(np.min((1 - prec, k_pos + tail))), n_samples)\n self.thetas = np.linspace(self.prior_th.ppf(np.max((prec, th_pos - tail))),\n self.prior_th.ppf(np.min((1 - prec, th_pos + tail))), n_samples)\n\n joint_pdf = np.zeros((self.ks.size, self.thetas.size))\n\n # calculate likelihodd times prior for every grid value\n with tqdm.tqdm(total=self.ks.size * self.thetas.size, desc='calculating posterior',\n disable=not verbose) as pbar:\n\n for i, k in enumerate(self.ks):\n for j, th in enumerate(self.thetas):\n r = k\n p = th / (1 + th)\n joint_pdf[i, j] = nb_evidence_integrant_direct(r, p, self.xo, self.prior_k, self.prior_th)\n pbar.update()\n\n # calculate the evidence as the integral over the grid of likelihood * prior values\n self.evidence = np.trapz(np.trapz(joint_pdf, x=self.thetas, axis=1), x=self.ks, axis=0)\n self.joint_pdf = joint_pdf / self.evidence\n\n # calculate cdf\n # Calculate CDF by taking cumsum on each axis\n s1 = np.cumsum(np.cumsum(self.joint_pdf, axis=0), axis=1)\n # approximate cdf by summation and normalization\n self.joint_cdf = s1 / s1.max()\n else:\n print('already done')\n\n def eval(self, x, log=False):\n \"\"\"\n Evaluate the joint pdf for value pairs given in x.\n :param x: np.array, shape (n, 2)\n :return: pdf values, np array, shape (n, )\n \"\"\"\n\n x = np.atleast_1d(x)\n assert self.calculated, 'calculate the joint posterior first using calculate_exaxt_posterior'\n assert x.ndim == 2, 'x should have two dimensions, (n_samples, 2)'\n assert x.shape[1] == 2, 'each datum should have two entries, [k, theta]'\n\n pdf_values = []\n # for each pair of (k, theta)\n for xi in x:\n # look up indices in the ranges\n idx_k = np.where(self.ks >= xi[0])[0][0]\n idx_th = np.where(self.thetas >= xi[1])[0][0]\n\n # take corresponding pdf values from pdf grid\n pdf_values.append(self.joint_pdf[idx_k, idx_th])\n\n return np.log(np.array(pdf_values)) if log else np.array(pdf_values)\n\n # to mimic scipy.stats behavior\n def pdf(self, x):\n \"\"\"\n Evaluate pdf at x\n :param x: samples\n :return: density values\n \"\"\"\n return self.eval(x)\n\n def logpdf(self, x):\n \"\"\"\n Evaluate log density at x\n :param x: samples\n :return: log density\n \"\"\"\n return self.eval(x, log=True)\n\n def ppf(self, q):\n \"\"\"\n Percent point function at q, or inverse CDF. Approximated by looking up the index in the cdf table\n that is closest to q.\n :param q: quantile\n :return: corresponding value on the RV range\n \"\"\"\n q = np.atleast_1d(q)\n\n # look up the index of the quantile in the 2D CDF grid\n values = []\n for qi in q:\n # find index in grid for every dimension\n idx1, idx2 = np.where(self.joint_cdf >= qi)\n values.append([self.ks[idx1[0]], self.thetas[idx2[0]]])\n\n return np.array(values)\n\n def cdf(self, x):\n\n x = np.atleast_1d(x)\n qs = []\n\n for xi in x:\n # find idx of x on the cdf grid\n idx_k = np.where(self.ks >= xi[0])[0][0]\n idx_th = np.where(self.thetas >= xi[1])[0][0]\n\n # get value from cdf\n qs.append(self.joint_cdf[idx_k, idx_th])\n\n return np.array(qs)\n\n def gen(self, n_samples):\n \"\"\"\n Generate samples under the joint pdf grid using inverse transform sampling\n :param n_samples:\n :return:\n \"\"\"\n\n assert self.calculated, 'calculate the joint posterior first using calculate_exaxt_posterior'\n self.samples_generated = True\n\n # generate new samples\n samples = inverse_transform_sampling_2d(self.ks, self.thetas, self.joint_pdf, n_samples)\n\n # add to list of all samples\n self.samples += samples.tolist()\n\n return samples\n\n def rvs(self, n_samples):\n return self.gen(n_samples)\n\n @property\n def mean(self):\n if len(self.samples) == 0:\n self.gen(1000)\n return np.mean(self.samples, axis=0).reshape(-1)\n\n @property\n def std(self):\n if len(self.samples) == 0:\n self.gen(1000)\n return np.sqrt(np.diag(np.cov(np.array(self.samples).T))).reshape(-1)\n\n @property\n def cov(self):\n if len(self.samples) == 0:\n self.gen(1000)\n return np.cov(np.array(self.samples).T)\n\n def get_marginals(self):\n\n k_pdf = np.trapz(self.joint_pdf, x=self.thetas, axis=1)\n th_pdf = np.trapz(self.joint_pdf, x=self.ks, axis=0)\n\n return [Distribution(self.ks, k_pdf), Distribution(self.thetas, th_pdf)]\n\n\nclass Distribution:\n \"\"\"\n Class for arbitrary distribution defined in terms of an array of pdf values. Used for representing the marginals\n of the numerically calculated NB posterior.\n \"\"\"\n\n def __init__(self, support_array, pdf_array):\n\n self.support = support_array\n self.pdf_array = pdf_array\n\n self.cdf_array = np.cumsum(self.pdf_array)\n self.cdf_array /= self.cdf_array.max()\n self.samples = []\n\n def eval(self, x, log=False):\n\n pdf_values = []\n # for each sample\n for xi in x:\n # look up index in the supported range\n idx_i = np.where(self.support >= xi)[0][0]\n\n # take corresponding pdf value from pdf\n pdf_values.append(self.pdf_array[idx_i])\n\n return np.log(np.array(pdf_values)) if log else np.array(pdf_values)\n\n def pdf(self, x):\n return self.eval(x)\n\n def logpdf(self, x):\n return self.eval(x, log=True)\n\n def gen(self, n_samples):\n \"\"\"\n Generate samples under the pdf using inverse transform sampling\n :param n_samples:\n :return: array-like, samples\n \"\"\"\n # generate samples\n samples = inverse_transform_sampling_1d(self.support, self.pdf_array, n_samples=n_samples)\n # add to all samples\n self.samples += samples.tolist()\n\n return samples\n\n def ppf(self, qs):\n \"\"\"\n Percent point function at q, or inverse CDF. Approximated by looking up the index in the cdf table\n that is closest to q.\n :param q: quantile\n :return: corresponding value on the RV range\n \"\"\"\n q = np.atleast_1d(qs)\n\n # look up the index of the quantile in the 2D CDF grid\n values = []\n for q in qs:\n # find index in grid for every dimension\n idx1 = np.where(self.cdf_array >= q)[0][0]\n values.append(self.support[idx1])\n\n return np.array(values)\n\n def cdf(self, xs):\n \"\"\"\n Evaluate CDF at every x in xs. Approximated by looking up the index in the cdf array.\n :param xs: RV values to evaluate\n :return: quantiles in [0, 1]\n \"\"\"\n # make it an array in case it is a scalar.\n xs = np.atleast_1d(xs)\n\n cdf_values = []\n for xi in xs:\n # look up index in the support array\n idx = np.where(self.support >= xi)[0][0]\n # get the corresponding quantile\n cdf_values.append(self.cdf_array[idx])\n\n return np.array(cdf_values)\n\n def get_credible_interval_counts(self, th, credible_intervals):\n # get the quantile of theta\n\n q = self.cdf(th)\n\n # q mass lies below th, therefore the CI is\n if q > 0.5:\n # for q > .5, 1 - how much mass is above q times 2 (2 tails)\n ci = 1 - 2 * (1 - q)\n else:\n # how much mass is below, times 2 (2 tails)\n ci = 1 - 2 * q\n counts = np.ones_like(credible_intervals) * (credible_intervals >= ci)\n return counts\n\n @property\n def mean(self):\n \"\"\"\n Mean estimated from samples\n :return:\n \"\"\"\n if len(self.samples) == 0:\n self.gen(1000)\n\n return np.mean(self.samples)\n\n @property\n def std(self):\n \"\"\"\n Mean estimated from samples\n :return:\n \"\"\"\n if len(self.samples) == 0:\n self.gen(1000)\n\n return np.std(self.samples)\n\n\nclass JointGammaPrior:\n\n def __init__(self, prior_k, prior_theta):\n\n self.prior_k = prior_k\n self.prior_theta = prior_theta\n\n def gen(self, n_samples):\n\n sk = self.prior_k.rvs(n_samples)\n sth = self.prior_theta.rvs(n_samples)\n\n return np.vstack((sk, sth)).reshape(n_samples, 2)\n\n def pdf(self, samples):\n\n samples = np.atleast_1d(samples)\n assert samples.shape[1] == 2, 'samples should be (n_samples, 2)'\n\n pk = self.prior_k.pdf(samples[:, 0])\n pth = self.prior_theta.pdf(samples[:, 1])\n\n return pk * pth\n\n def rvs(self, n_samples):\n return self.gen(n_samples)\n\n def eval(self, samples):\n return self.pdf(samples)","sub_path":"mcabc/utils/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":26484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"83111541","text":"import sys\nimport struct\nimport random\n\nimport numpy as np\nimport itk\nfrom wslink import register as rpc\nfrom wslink.websocket import LinkProtocol\n\nfrom twisted.internet import reactor\n\ndef print_matrix(itkmat, size=(3, 3)):\n for i in range(size[0]):\n for j in range(size[1]):\n sys.stdout.write('{} '.format(itkmat(i, j)))\n sys.stdout.write('\\n')\n\n# modified from: https://github.com/InsightSoftwareConsortium/itk-jupyter-widgets/blob/master/itkwidgets/trait_types.py#L49\ndef _itk_image_to_type(itkimage):\n component_str = repr(itkimage).split('itkImagePython.')[1].split(';')[0][8:]\n if component_str[:2] == 'UL':\n if os.name == 'nt':\n return 'uint32_t',\n else:\n return 'uint64_t',\n mangle = None\n if component_str[:2] == 'SL':\n if os.name == 'nt':\n return 'int32_t', 1,\n else:\n return 'int64_t', 1,\n if component_str[0] == 'V':\n # Vector\n mangle = component_str[1]\n elif component_str[:2] == 'CF':\n # complex flot\n return 'float', 10\n elif component_str[:2] == 'CD':\n # complex flot\n return 'double', 10\n elif component_str[0] == 'C':\n # CovariantVector\n mangle = component_str[1]\n elif component_str[0] == 'O':\n # Offset\n return 'int64_t', 4\n elif component_str[:2] == 'FA':\n # FixedArray\n mangle = component_str[2]\n elif component_str[:4] == 'RGBA':\n # RGBA\n mangle = component_str[4:-1]\n elif component_str[:3] == 'RGB':\n # RGB\n mangle = component_str[3:-1]\n elif component_str[:4] == 'SSRT':\n # SymmetricSecondRankTensor\n mangle = component_str[4:-1]\n else:\n mangle = component_str[:-1]\n _python_to_js = {\n 'SC':'Int8Arrray',\n 'UC':'Uint8Array',\n 'SS':'Int16Array',\n 'US':'Uint16Array',\n 'SI':'Int32Array',\n 'UI':'Uint32Array',\n 'F':'Float32Array',\n 'D':'Float64Array',\n 'B':'Uint8Array'\n }\n return _python_to_js[mangle]\n\ndef itk_to_vtkjs_image(itk_image):\n dims = list(itk_image.GetLargestPossibleRegion().GetSize())\n extent = []\n for v in dims:\n extent.append(0)\n extent.append(v - 1)\n\n values = itk.GetArrayFromImage(itk_image).flatten(order='C').tolist()\n\n return {\n 'vtkClass': 'vtkImageData',\n 'dataDescription': 8, # StructuredData.XYZ_GRID from vtk.js\n 'spacing': list(itk_image.GetSpacing()),\n 'origin': list(itk_image.GetOrigin()),\n 'direction': list(itk.GetArrayFromVnlMatrix(itk_image.GetDirection().GetVnlMatrix().as_matrix()).flatten()),\n 'extent': extent,\n 'pointData': {\n 'vtkClass': 'vtkDataSetAttributes',\n 'activeScalars': 0,\n 'arrays': [{\n 'data': {\n 'vtkClass': 'vtkDataArray',\n 'dataType': _itk_image_to_type(itk_image),\n 'name': 'Scalars',\n 'numberOfComponents': itk_image.GetNumberOfComponentsPerPixel(),\n 'rangeTuple': [0, 255], # TODO range of data\n 'size': len(values),\n 'values': values,\n },\n }],\n },\n }\n\ndef _vtkjs_type_convert(blob, jstype):\n js_to_py_type = {\n 'Int8Array': {\n 'struct': (1, 'b'),\n 'dtype': 'int8',\n },\n 'Int16Array': {\n 'struct': (2, 'h'),\n 'dtype': 'int16',\n },\n 'Int32Array': {\n 'struct': (4, 'i'),\n 'dtype': 'int32',\n },\n 'Uint8Array': {\n 'struct': (1, 'B'),\n 'dtype': 'uint8',\n },\n 'Uint16Array': {\n 'struct': (2, 'H'),\n 'dtype': 'uint16',\n },\n 'Uint32Array': {\n 'struct': (4, 'I'),\n 'dtype': 'uint32',\n },\n 'Float32Array': {\n 'struct': (4, 'f'),\n 'dtype': 'float32',\n },\n 'Float64Array': {\n 'struct': (8, 'd'),\n 'dtype': 'float64',\n },\n }\n\n size, fmt = js_to_py_type[jstype]['struct']\n dtype = np.dtype(js_to_py_type[jstype]['dtype'])\n # sanity\n assert len(blob) % size == 0\n full_fmt = '<{0}{1}'.format(int(len(blob) / size), fmt)\n return np.array(struct.unpack(full_fmt, blob), dtype=dtype, copy=False)\n\ndef unpack_data_arrays(vtk_obj):\n if isinstance(vtk_obj, list):\n for i, v in enumerate(vtk_obj):\n vtk_obj[i] = unpack_data_arrays(v)\n elif isinstance(vtk_obj, dict):\n if 'vtkClass' in vtk_obj and vtk_obj['vtkClass'] == 'vtkDataArray':\n vtk_obj['values'] = _vtkjs_type_convert(vtk_obj['values'], vtk_obj['dataType'])\n else:\n for k in vtk_obj:\n vtk_obj[k] = unpack_data_arrays(vtk_obj[k])\n return vtk_obj\n\n# converts vtk.js object to an itk image, if valid\ndef vtkjs_to_itk_image(vtkObject):\n if vtkObject['vtkClass'] == 'vtkImageData':\n imgArr = vtkObject['pointData']['arrays'][0]['data']['values']\n # numpy indexes in ZYX order, where X varies the fastest\n dims = [\n vtkObject['extent'][5] - vtkObject['extent'][4] + 1,\n vtkObject['extent'][3] - vtkObject['extent'][2] + 1,\n vtkObject['extent'][1] - vtkObject['extent'][0] + 1,\n ]\n direction = np.zeros((3,3))\n # why the direction is a json object instead of an array, nobody knows...\n # (actually I think it's b/c \"direction\" is stored as a Float32Array in vtkjs)\n for x in range(3):\n for y in range(3):\n direction[x][y] = vtkObject['direction'][str(x*3+y)]\n\n itkImage = itk.GetImageFromArray(np.reshape(imgArr, dims))\n # https://discourse.itk.org/t/set-image-direction-from-numpy-array/844/10\n vnlmat = itk.GetVnlMatrixFromArray(direction)\n itkImage.GetDirection().GetVnlMatrix().copy_in(vnlmat.data_block())\n itkImage.SetOrigin(vtkObject['origin'])\n itkImage.SetSpacing(vtkObject['spacing'])\n return itkImage\n return None\n\ndef make_guid():\n return 'guid:{}'.format(random.random())\n\ndef objdir_wrap(func):\n def handler(self, *args):\n newargs = [None] * len(args)\n for i, arg in enumerate(args):\n if type(arg) == dict and '__objguid__' in arg:\n guid = arg['__objguid__']\n newargs[i] = self.objdir_get(guid)\n else:\n newargs[i] = arg\n return func(self, *newargs)\n return handler\n\nclass ObjectProtocol(LinkProtocol):\n def __init__(self):\n super(ObjectProtocol, self).__init__()\n self._objdir = {}\n self._obj_extradata = {}\n\n def objdir_get(self, guid):\n if guid in self._objdir:\n return self._objdir[guid]\n raise Exception('No object with guid {}'.format(guid))\n\n def objdir_get_extradata(self, obj):\n obj_id = id(obj)\n return self._obj_extradata.get(obj_id, None)\n\n @rpc('objdir_put')\n def objdir_put(self, obj, guid=None):\n if 'vtkClass' in obj:\n obj = unpack_data_arrays(obj)\n if guid is None:\n guid = make_guid()\n self._objdir[guid] = obj\n # obj's lifetime is forever\n self._obj_extradata[id(obj)] = {}\n return guid\n\n# Uses twisted's global reactor to schedule a call\ndef deferResults(func):\n def handler(self, *args):\n result_id = make_guid()\n def finish(*results):\n self.publish('defer.results', {\n '$resultId': result_id,\n # TODO make sure this doesn't break code that\n # runs without deferResults.\n '$results': results[0] if len(results) == 1 else results,\n })\n reactor.callLater(0.1, lambda: finish(func(self, *args)))\n return {\n '$deferredResultId': result_id,\n }\n return handler\n","sub_path":"server/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":7977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"371200652","text":"from django.urls import path\n\nimport jobs_portal.jobs.signals\n\nfrom jobs_portal.jobs.views import CreateJob, JobDetails, like_job_post, UpdateJob, DeleteJob, post_comment, \\\n delete_comment, JobApplicationView, JobConnectWithJobPosterView\n\nurlpatterns = [\n path('create-job/', CreateJob.as_view(), name='create job'),\n path('update-job//', UpdateJob.as_view(), name='update job'),\n path('delete-job//delete', DeleteJob.as_view(), name='delete job'),\n path('details-job//', JobDetails.as_view(), name='details job'),\n path('like//', like_job_post, name='like job'),\n path('post-comment//', post_comment, name='post comment'),\n path('delete-comment//', delete_comment, name='delete comment'),\n path('apply//', JobApplicationView.as_view(), name='apply'),\n path('connect//', JobConnectWithJobPosterView.as_view(), name='connect'),\n]\n","sub_path":"jobs_portal/jobs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"93274922","text":"from fabric.api import *\nimport time\n\nenv.hosts = [\n '78.47.166.178'\n]\nenv.user = 'root'\n\n@hosts('127.0.0.1')\ndef rb(ip='127.0.0.1'):\n local('python manage.py runserver ' + ip + ':8000 --settings=gaggifs.settings.local')\n\n\ndef rf(ip='127.0.0.1'):\n local('python manage.py runserver ' + ip + ':8000 --settings=gaggifs.settings.frontend')\n\n\ndef rdm():\n with cd('/var/www/gaggifs'):\n with prefix('source ../virtualenvs/gaggifs_env/bin/activate'):\n run('echo \"machine github.com login behconsci password bakachim12\" > ~/.netrc')\n run('git pull origin master')\n run('pip install -r requirements.txt')\n run('python manage.py collectstatic --noinput --settings=gaggifs.settings')\n run('python manage.py migrate --settings=gaggifs.settings')\n run('touch conf/uwgi.ini')\n\n","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"617984257","text":"def reorder(s, f):\n if len(s) == 0:\n return\n i, j = 0, len(s) - 1\n while i < j:\n while i < j and f(s[i]):\n i += 1\n while i < j and not f(s[j]):\n j -= 1\n\n if i < j:\n s[i], s[j] = s[j], s[i]\n\n\na = [1, 2, 3, 4, 5]\n\n\ndef odd(x):\n if x & 0x1 == 1:\n return True\n return False\n\n\nreorder(a, odd)\nprint(a)\n","sub_path":"Chapter3/21.py","file_name":"21.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"273654067","text":"'''\n Copyright 2018 Jacob R. Haygood\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n '''\n#Nodes will be the sub units of network\n#Each node has a value and a transfer function\n\nclass Node:\n def __init__(self, value, transfer_eq=\"a\", bias=0, id=-1, layer=-1): \n self.value = value\n self.bias = bias\n self.transfer_eq = transfer_eq #Note: you should use \"a\" to denote the activation and \"b\" to denote the bias\n self.id = id #This id will be used to keep track of the node when it is in the Network datatype.\n self.layer = layer\n self.Flags = ['G', 0, 0] #Note: Flags[0] denotes the type of node: (G: general, I: input, O: output)\n #When the UpdateNode method is invokes, the node's value will be set to the evaluation transfer function with the activation as the input.\n def UpdateNode(self, activation):\n self.value = eval(self.transfer_eq.replace(\"a\", str(activation)).replace(\"b\", str(self.bias))) #Replace a with the activation and b with the bias and evaluate transfer_function\n","sub_path":"Core/Node.py","file_name":"Node.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"616519815","text":"def P(n):\n return int(n * (3*n - 1) * 0.5)\n\npentagonal = []\n\ndef quadratic_equation(a, b, c):\n disc = (b ** 2 - 4 * a * c) ** 0.5\n\n x1 = (-b + disc) / (2 * a)\n x2 = (-b - disc) / (2 * a)\n\n if float(x1).is_integer() and x1 > 0:\n return True\n\n elif float(x2).is_integer() and x2 > 0:\n return True\n\n else:\n return False\n\ndef find_sum(num1, num2):\n\n if quadratic_equation(1.5, -0.5, -1 * (num1 + num2)):\n return True\n\ndef find_difference():\n\n i = 1\n\n while True:\n\n current_Pn = P(i)\n\n pentagonal.append(current_Pn)\n\n for j in range(len(pentagonal)-1, 0, -1):\n\n difference = current_Pn - pentagonal[j]\n\n if difference in pentagonal:\n if find_sum(current_Pn, pentagonal[j]):\n print(difference)\n\n i += 1\n\n\nfind_difference()","sub_path":"44 - Pentagonal Numbers.py","file_name":"44 - Pentagonal Numbers.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"270481805","text":"import json\nimport math\nimport os\nimport sys\n\ndef load_data(filepath):\n if not os.path.exists(filepath):\n return None\n with open(filepath, encoding='windows-1251') as data_file:\n return json.load(data_file)\n\n\ndef get_biggest_bar(json_data):\n dict_bar = max(json_data, key=lambda item: item['SeatsCount'])\n return dict_bar\n\n\ndef get_smallest_bar(json_data):\n dict_bar = min(json_data, key=lambda item: item['SeatsCount'])\n return dict_bar\n\n\ndef distance(item, longitude, latitude):\n return math.sqrt((longitude - float(item['Longitude_WGS84']))**2 + (latitude - float(item['Latitude_WGS84']))**2)\n \n\ndef get_closest_bar(data, longitude, latitude):\n return min(data, key=lambda item: distance(item, longitude, latitude))\n\n\ndef ask(question, type_value=str):\n answer = input(question)\n if not answer:\n print('Ошибка: Значение не может быть пустым')\n sys.exit(1) \n try:\n answer = type_value(answer)\n except ValueError:\n print('Ошибка: Неправильно указано значение.')\n sys.exit(1)\n return answer\n\n \n \ndef print_bar(json_data):\n print('Название бара: {}'.format(json_data['Name']))\n print('Адрес: {}'.format(json_data['Address']))\n print('Количество мест: {}'.format(json_data['SeatsCount']))\n print()\n \ndef main():\n print('Программа Бары!\\n')\n \n filepath = ask(question='Введите путь до json файла: ')\n json_data = load_data(filepath)\n \n if not json_data:\n print('Ошибка: Путь до json файла указан не правильно.')\n sys.exit(1)\n \n longitude = ask(question='Введите долготу c gps-координаты: ', type_value=float)\n latitude = ask(question='Введите широту c gps-координаты: ', type_value=float)\n \n print()\n \n print('Самый маленький бар:')\n the_smallest_bar = get_smallest_bar(json_data)\n print_bar(the_smallest_bar)\n\n print('Самый большой бар:')\n the_biggest_bar = get_biggest_bar(json_data)\n print_bar(the_biggest_bar)\n\n print('Самый ближайший бар:')\n the_closest_bar = get_closest_bar(json_data, longitude, latitude) \n print_bar(the_closest_bar)\n\nif __name__ == '__main__':\n main()\n \n","sub_path":"bars.py","file_name":"bars.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"244401656","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 9 12:41:02 2018\n\n@author: mtrawick\n\"\"\"\n\n\nimport numpy as np\n\ndef create_inbounds_list(v):\n #creates a list of NEAR edge vertices, which probably have weird\n #numbers of neighbors.\n if v.pmw.periodBound.get():\n v.tri.inbounds = np.ones(len(v.locations), dtype=bool)\n return()\n \n bvertsidx = v.tri.outer_vertices\n bverts = v.tri.points[bvertsidx]\n centerx = (np.max(bverts[:,0]) + np.min(bverts[:,0])) / 2\n centery = (np.max(bverts[:,1]) + np.min(bverts[:,1])) / 2\n disp = bverts - np.array([centerx,centery])\n theta = np.arctan2(disp[:,1],disp[:,0])\n #sort these.\n order = np.argsort(theta)\n bverts = bverts[order]\n \n #There's probably a cuter way to do this:\n numpoints = len(v.tri.points)\n #numverts = len(bverts)\n v.tri.inbounds = np.zeros(numpoints, dtype=bool)\n threshhold = v.median_bondlength * 1.0\n p1 = bverts\n p2 = np.roll(bverts, 1, axis = 0)\n denom = 1/ np.linalg.norm(p2-p1, axis=1)\n for i in range(0,numpoints):\n p3 = v.tri.points[i]\n ds = np.abs(np.cross(p2-p1,p3-p1, axis = 1) * denom)\n min_d = np.min(ds)\n v.tri.inbounds[i] = (min_d > threshhold)\n\ndef neighbor_inds(p,v):\n return(np.array(range(v.tri.indptr[p], v.tri.indptr[p+1])))\n\n \ndef neighbors(p,v):\n return(v.tri.indices[neighbor_inds(p,v)])\n\n\ndef make_bond_between(p1, p2, v):\n #print(\"making bond between \",p1,p2)\n sign_p1 = np.sign(v.tri.unboundness[p1])\n sign_p2 = np.sign(v.tri.unboundness[p2])\n v.tri.unboundness[p1] += sign_p2\n v.tri.unboundness[p2] += sign_p1\n \n wp2 = np.where(neighbors(p1,v) == p2)\n v.tri.is_dislocation[neighbor_inds(p1,v)[wp2]] +=1\n\n wp1 = np.where(neighbors(p2,v) == p1)\n v.tri.is_dislocation[neighbor_inds(p2,v)[wp1]] +=1\n\n\ndef break_bond_between(p1, p2, v):\n v.tri.unboundness[p1] += np.sign(v.tri.cnum[p1] - 6)\n v.tri.unboundness[p2] += np.sign(v.tri.cnum[p2] - 6)\n\n wp2 = np.where(neighbors(p1,v) == p2)\n v.tri.is_dislocation[neighbor_inds(p1,v)[wp2]] -=1\n\n wp1 = np.where(neighbors(p2,v) == p1)\n v.tri.is_dislocation[neighbor_inds(p2,v)[wp1]] -=1\n\n\ndef can_retract_from(p1, p2, v, edges_ok):\n #print(\"Recursion level: \", v.tri.recursion_level)\n if v.tri.recursion_level > 500:\n return(False)\n v.tri.inprocess[p1]=True\n break_bond_between(p1, p2, v)\n\n if not v.tri.inbounds[p2]:\n v.tri.inprocess[p1]=False\n return(True)\n \n v.tri.recursion_level += 1\n \n if find_a_mate_nicely(p2, v, edges_ok):\n v.tri.inprocess[p1]=False\n v.tri.recursion_level -= 1\n return(True)\n\n if find_a_mate_rudely(p2, v, edges_ok):\n v.tri.inprocess[p1]=False\n v.tri.recursion_level -= 1\n return(True)\n \n v.tri.recursion_level -= 1\n v.tri.inprocess[p1]=False\n make_bond_between(p1, p2, v)\n return(False)\n \n\ndef can_butt_in_on(p1, p2, v, edges_ok):\n if (not v.tri.inbounds[p2]) and (not edges_ok): return(False)\n if not v.tri.buttInOnAble[p2, int(edges_ok)]: return(False)\n v.tri.inprocess[p1]=True\n for i in neighbor_inds(p2,v):\n p3 = v.tri.indices[i]\n if v.tri.is_dislocation[i] and (v.tri.inbounds[p3] or edges_ok):\n if can_retract_from(p2, p3, v, edges_ok):\n v.tri.inprocess[p1]=False\n return(True)\n v.tri.inprocess[p1]=False\n v.tri.buttInOnAble[p2,int(edges_ok)] = False\n return(False)\n #Note: I really wonder about setting p1 in process. Isn't it already in process?\n #And don't I screw stuff up if I make it NOT inprocess at the end?\n \n\ndef helpful_to_butt_in_on(p1, p2, v, edges_ok):\n helpful = ((np.sign(v.tri.cnum[p1] - 6) == -np.sign(v.tri.cnum[p2] - 6))\n and not v.tri.inprocess[p2]\n and v.tri.unboundness[p1] != 0) #added to algorith, 2018, probably redundant.\n #Also, what if a 4 is already bonded to a 7. Is it still helpful to butt in on itself?\n #if helpful: print(\"helpful:\", p1, p2)\n return(helpful)\n \n\ndef find_a_mate_rudely(p1, v, edges_ok):\n # print(\"finding mate RUDELY for\", p1)\n for p2 in neighbors(p1,v) :\n if helpful_to_butt_in_on(p1, p2, v, edges_ok):\n if can_butt_in_on(p1, p2, v, edges_ok):\n make_bond_between(p1, p2, v)\n return(True)\n return(False)\n\n\ndef helpful_to_bond_nicely(p1,p2,v,edges_ok):\n helpful = ((np.sign(v.tri.unboundness[p1]) == -np.sign(v.tri.unboundness[p2]))\n and not v.tri.inprocess[p2]\n and (v.tri.inbounds[p2] or edges_ok))\n return(helpful)\n\n\ndef find_a_mate_nicely(p1, v, edges_ok):\n #print(\"finding mate nicely for\",p1)\n for p2 in neighbors(p1,v) :\n if helpful_to_bond_nicely(p1, p2, v, edges_ok):\n make_bond_between(p1, p2, v)\n return(True)\n return(False)\n\n\ndef find_mates_for(p,v):\n if v.tri.inbounds[p]:\n v.tri.inprocess[p] = True\n #I suspect that these inprocess statements are unnecessary in this funciton.\n while v.tri.unboundness[p] != 0:\n if not find_a_mate_nicely(p, v, edges_ok=False): break\n\n while v.tri.unboundness[p] != 0:\n if not find_a_mate_rudely(p, v, edges_ok=False): break\n\n while v.tri.unboundness[p] != 0:\n if not find_a_mate_nicely(p, v, edges_ok=True): break\n\n while v.tri.unboundness[p] != 0:\n if not find_a_mate_rudely(p, v, edges_ok=True): break\n\n v.tri.inprocess[p] = False\n\n\ndef calculate_dislocations(v):\n create_inbounds_list(v)\n v.tri.is_dislocation = np.zeros(len(v.tri.indices), dtype=np.int16)\n #the bond between a 4 and an 8 would be a \"double bond\", with a 2 here.\n v.tri.inprocess=np.full(len(v.locations), False)\n v.tri.buttInOnAble=np.full((len(v.locations), 2), True)\n v.tri.unboundness = v.tri.cnum.copy() - 6\n v.tri.recursion_level = 0\n for i in range(0, len(v.locations)):\n find_mates_for(i, v)\n\n \nif __name__ == '__main__':\n #print(\"This file is not runnable as main. Run Pyxtalmain.py instead.\")\n import pyxtal\n pyxtal.vp_start_gui()\n","sub_path":"pyxtal_dislocations.py","file_name":"pyxtal_dislocations.py","file_ext":"py","file_size_in_byte":6203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"271513071","text":"from lib.scrapy_table import Scrapy_Table\n\nurl=\"https://pt.wikipedia.org/wiki/C%C3%A2mara_Municipal_de_S%C3%A3o_Paulo\"\n\nsite_connect = Scrapy_Table(url)\n\ntables = site_connect.get_tables(5)\n\nfor row in tables[1:]:\n \n name = row[0]\n\n #print(name[0:8])\n print(name[:8].replace(' ',''))\n \n\n\n","sub_path":"modulo1/Labs/myCode/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"104699023","text":"# coding=utf-8\n# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nimport os\nimport tarfile\nimport unittest\n\nfrom pants.engine.fs import PathGlobs, Snapshot, create_fs_rules\nfrom pants.engine.isolated_process import ExecuteProcessRequest, ExecuteProcessResult\nfrom pants.engine.rules import RootRule, rule\nfrom pants.engine.selectors import Get, Select\nfrom pants.util.objects import TypeCheckError, datatype\nfrom pants_test.engine.scheduler_test_base import SchedulerTestBase\n\n\nclass Concatted(datatype([('value', str)])): pass\n\n\nclass BinaryLocation(datatype(['bin_path'])):\n\n def __new__(cls, bin_path):\n this_object = super(BinaryLocation, cls).__new__(cls, str(bin_path))\n\n bin_path = this_object.bin_path\n\n if os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):\n return this_object\n\n raise TypeCheckError(\n cls.__name__,\n \"path {} does not name an existing executable file.\".format(bin_path))\n\n\nclass ShellCat(datatype([('binary_location', BinaryLocation)])):\n \"\"\"Wrapper class to show an example of using an auxiliary class (which wraps\n an executable) to generate an argv instead of doing it all in\n CatExecutionRequest. This can be used to encapsulate operations such as\n sanitizing command-line arguments which are specific to the executable, which\n can reduce boilerplate for generating ExecuteProcessRequest instances if the\n executable is used in different ways across multiple different types of\n process execution requests.\"\"\"\n\n @property\n def bin_path(self):\n return self.binary_location.bin_path\n\n def argv_from_snapshot(self, snapshot):\n cat_file_paths = [f.path for f in snapshot.files]\n\n option_like_files = [p for p in cat_file_paths if p.startswith('-')]\n if option_like_files:\n raise ValueError(\n \"invalid file names: '{}' look like command-line options\"\n .format(option_like_files))\n\n return (self.bin_path,) + tuple(cat_file_paths)\n\n\nclass CatExecutionRequest(datatype([('shell_cat', ShellCat), ('path_globs', PathGlobs)])): pass\n\n\n@rule(Concatted, [Select(CatExecutionRequest)])\ndef cat_files_process_result_concatted(cat_exe_req):\n cat_bin = cat_exe_req.shell_cat\n cat_files_snapshot = yield Get(Snapshot, PathGlobs, cat_exe_req.path_globs)\n process_request = ExecuteProcessRequest.create_from_snapshot(\n argv=cat_bin.argv_from_snapshot(cat_files_snapshot),\n env=tuple(),\n snapshot=cat_files_snapshot,\n )\n cat_process_result = yield Get(ExecuteProcessResult, ExecuteProcessRequest, process_request)\n yield Concatted(str(cat_process_result.stdout))\n\n\ndef create_cat_stdout_rules():\n return [\n cat_files_process_result_concatted,\n RootRule(CatExecutionRequest),\n ]\n\n\nclass JavacVersionExecutionRequest(datatype([('binary_location', BinaryLocation)])):\n\n @property\n def bin_path(self):\n return self.binary_location.bin_path\n\n def gen_argv(self):\n return (self.bin_path, '-version',)\n\n\n@rule(ExecuteProcessRequest, [Select(JavacVersionExecutionRequest)])\ndef process_request_from_javac_version(javac_version_exe_req):\n yield ExecuteProcessRequest.create_with_empty_snapshot(\n argv=javac_version_exe_req.gen_argv(),\n env=tuple())\n\n\nclass JavacVersionOutput(datatype([('value', str)])): pass\n\n\nclass ProcessExecutionFailure(Exception):\n \"\"\"Used to denote that a process exited, but was unsuccessful in some way.\n\n For example, exiting with a non-zero code.\n \"\"\"\n\n MSG_FMT = \"\"\"process '{desc}' failed with code {code}.\nstdout:\n{stdout}\nstderr:\n{stderr}\n\"\"\"\n\n def __init__(self, exit_code, stdout, stderr, process_description):\n # These are intentionally \"public\" members.\n self.exit_code = exit_code\n self.stdout = stdout\n self.stderr = stderr\n\n msg = self.MSG_FMT.format(\n desc=process_description, code=exit_code, stdout=stdout, stderr=stderr)\n\n super(ProcessExecutionFailure, self).__init__(msg)\n\n\n@rule(JavacVersionOutput, [Select(JavacVersionExecutionRequest)])\ndef get_javac_version_output(javac_version_command):\n javac_version_proc_req = yield Get(\n ExecuteProcessRequest, JavacVersionExecutionRequest, javac_version_command)\n javac_version_proc_result = yield Get(\n ExecuteProcessResult, ExecuteProcessRequest, javac_version_proc_req)\n\n exit_code = javac_version_proc_result.exit_code\n if exit_code != 0:\n stdout = javac_version_proc_result.stdout\n stderr = javac_version_proc_result.stderr\n # TODO(cosmicexplorer): We should probably make this automatic for most\n # process invocations (see #5719).\n raise ProcessExecutionFailure(\n exit_code, stdout, stderr, 'obtaining javac version')\n\n yield JavacVersionOutput(str(javac_version_proc_result.stderr))\n\n\nclass JavacSources(datatype([('path_globs', PathGlobs)])):\n \"\"\"PathGlobs wrapper for Java source files to show an example of making a\n custom type to wrap generic types such as PathGlobs to add usage context.\n\n See CatExecutionRequest and rules above for an example of using PathGlobs\n which does not introduce this additional layer of indirection.\n \"\"\"\n\n\nclass JavacCompileRequest(datatype([\n ('binary_location', BinaryLocation),\n ('javac_sources', JavacSources),\n])):\n\n @property\n def bin_path(self):\n return self.binary_location.bin_path\n\n def argv_from_source_snapshot(self, snapshot):\n snapshot_file_paths = [f.path for f in snapshot.files]\n\n return (self.bin_path,) + tuple(snapshot_file_paths)\n\n\n# TODO: make this contain the snapshot(s?) of the output files (or contain\n# something that contains it) once we've made it so processes can make snapshots\n# of the files they produce.\nclass JavacCompileResult(object): pass\n\n\n@rule(JavacCompileResult, [Select(JavacCompileRequest)])\ndef javac_compile_process_result(javac_compile_req):\n sources_snapshot = yield Get(Snapshot, PathGlobs, javac_compile_req.javac_sources.path_globs)\n process_request = ExecuteProcessRequest.create_from_snapshot(\n argv=javac_compile_req.argv_from_source_snapshot(sources_snapshot),\n env=tuple(),\n snapshot=sources_snapshot,\n )\n javac_proc_result = yield Get(ExecuteProcessResult, ExecuteProcessRequest, process_request)\n\n exit_code = javac_proc_result.exit_code\n if exit_code != 0:\n stdout = javac_proc_result.stdout\n stderr = javac_proc_result.stderr\n raise ProcessExecutionFailure(\n exit_code, stdout, stderr, 'javac compilation')\n\n yield JavacCompileResult()\n\n\ndef create_javac_compile_rules():\n return [\n javac_compile_process_result,\n RootRule(JavacCompileRequest),\n ]\n\n\nclass ExecuteProcessRequestTest(SchedulerTestBase, unittest.TestCase):\n def _default_args_execute_process_request(self, argv=tuple(), env=tuple()):\n return ExecuteProcessRequest.create_with_empty_snapshot(\n argv=argv,\n env=env,\n )\n\n def test_blows_up_on_invalid_args(self):\n try:\n self._default_args_execute_process_request()\n except ValueError:\n self.assertTrue(False, \"should be able to construct without error\")\n\n with self.assertRaises(ValueError):\n self._default_args_execute_process_request(argv=['1'])\n with self.assertRaises(ValueError):\n self._default_args_execute_process_request(argv=('1',), env=[])\n\n # TODO(cosmicexplorer): we should probably check that the digest info in\n # ExecuteProcessRequest is valid, beyond just checking if it's a string.\n with self.assertRaises(ValueError):\n ExecuteProcessRequest(argv=('1',), env=tuple(), input_files_digest='', digest_length='')\n with self.assertRaises(ValueError):\n ExecuteProcessRequest(argv=('1',), env=tuple(), input_files_digest=3, digest_length=0)\n with self.assertRaises(ValueError):\n ExecuteProcessRequest(argv=('1',), env=tuple(), input_files_digest='', digest_length=-1)\n\n\nclass IsolatedProcessTest(SchedulerTestBase, unittest.TestCase):\n\n def test_integration_concat_with_snapshots_stdout(self):\n scheduler = self.mk_scheduler_in_example_fs(create_cat_stdout_rules())\n\n cat_exe_req = CatExecutionRequest(\n ShellCat(BinaryLocation('/bin/cat')),\n PathGlobs.create('', include=['fs_test/a/b/*']))\n\n self.assertEqual(\n repr(cat_exe_req),\n \"CatExecutionRequest(shell_cat=ShellCat(binary_location=BinaryLocation(bin_path='/bin/cat')), path_globs=PathGlobs(include=(u'fs_test/a/b/*',), exclude=()))\")\n\n results = self.execute(scheduler, Concatted, cat_exe_req)\n self.assertEqual(1, len(results))\n concatted = results[0]\n self.assertEqual(Concatted(str('one\\ntwo\\n')), concatted)\n\n def test_javac_version_example(self):\n scheduler = self.mk_scheduler_in_example_fs([\n RootRule(JavacVersionExecutionRequest),\n process_request_from_javac_version,\n get_javac_version_output,\n ])\n\n request = JavacVersionExecutionRequest(BinaryLocation('/usr/bin/javac'))\n\n self.assertEqual(\n repr(request),\n \"JavacVersionExecutionRequest(binary_location=BinaryLocation(bin_path='/usr/bin/javac'))\")\n\n results = self.execute(scheduler, JavacVersionOutput, request)\n self.assertEqual(1, len(results))\n javac_version_output = results[0]\n self.assertIn('javac', javac_version_output.value)\n\n def test_javac_compilation_example_success(self):\n scheduler = self.mk_scheduler_in_example_fs(create_javac_compile_rules())\n\n request = JavacCompileRequest(\n BinaryLocation('/usr/bin/javac'),\n JavacSources(PathGlobs.create('', include=[\n 'scheduler_inputs/src/java/simple/Simple.java',\n ])))\n\n self.assertEqual(\n repr(request),\n \"JavacCompileRequest(binary_location=BinaryLocation(bin_path='/usr/bin/javac'), javac_sources=JavacSources(path_globs=PathGlobs(include=(u'scheduler_inputs/src/java/simple/Simple.java',), exclude=())))\")\n\n results = self.execute(scheduler, JavacCompileResult, request)\n self.assertEqual(1, len(results))\n # TODO: Test that the output snapshot contains Simple.class at the correct\n # path\n\n def test_javac_compilation_example_failure(self):\n scheduler = self.mk_scheduler_in_example_fs(create_javac_compile_rules())\n\n request = JavacCompileRequest(\n BinaryLocation('/usr/bin/javac'),\n JavacSources(PathGlobs.create('', include=[\n 'scheduler_inputs/src/java/simple/Broken.java',\n ])))\n\n self.assertEqual(\n repr(request),\n \"JavacCompileRequest(binary_location=BinaryLocation(bin_path='/usr/bin/javac'), javac_sources=JavacSources(path_globs=PathGlobs(include=(u'scheduler_inputs/src/java/simple/Broken.java',), exclude=())))\")\n\n with self.assertRaises(ProcessExecutionFailure) as cm:\n self.execute_raising_throw(scheduler, JavacCompileResult, request)\n e = cm.exception\n self.assertEqual(1, e.exit_code)\n self.assertIn(\"NOT VALID JAVA\", e.stderr)\n\n def mk_example_fs_tree(self):\n fs_tree = self.mk_fs_tree(os.path.join(os.path.dirname(__file__), 'examples'))\n test_fs = os.path.join(fs_tree.build_root, 'fs_test')\n with tarfile.open(os.path.join(test_fs, 'fs_test.tar')) as tar:\n tar.extractall(test_fs)\n return fs_tree\n\n def mk_scheduler_in_example_fs(self, rules):\n rules = list(rules) + create_fs_rules()\n return self.mk_scheduler(rules=rules, project_tree=self.mk_example_fs_tree())\n","sub_path":"tests/python/pants_test/engine/test_isolated_process.py","file_name":"test_isolated_process.py","file_ext":"py","file_size_in_byte":11369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"143497437","text":"from datetime import date\nfrom uuid import uuid4\nfrom django.db import models\nfrom rest_framework import serializers\n\n\nclass NannyPreviousName(models.Model):\n \"\"\"\n Model for PREVIOUS_NAME table, used to contain previous\n \"\"\"\n\n # Primary key\n previous_name_id = models.UUIDField(primary_key=True, default=uuid4)\n\n # Foreign key for application id\n application_id = models.UUIDField(blank=True)\n\n # Actual name fields\n first_name = models.CharField(max_length=200, blank=True)\n middle_names = models.CharField(max_length=200, blank=True)\n last_name = models.CharField(max_length=200, blank=True)\n\n # Date fields\n start_day = models.IntegerField(blank=True, null=True)\n start_month = models.IntegerField(blank=True, null=True)\n start_year = models.IntegerField(blank=True, null=True)\n end_day = models.IntegerField(blank=True, null=True)\n end_month = models.IntegerField(blank=True, null=True)\n end_year = models.IntegerField(blank=True, null=True)\n\n order = models.IntegerField(blank=True, null=True)\n\n class Meta:\n db_table = 'PREVIOUS_NAME'\n\n @property\n def timelog_fields(self):\n \"\"\"\n Specify which fields to track in this model once application is returned.\n :return: tuple of fields which needs update tracking when application is returned\n \"\"\"\n return (\n 'first_name',\n 'middle_names',\n 'last_name'\n )\n\n\nclass PreviousNameSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = NannyPreviousName\n fields = '__all__'\n\n def get_summary_table(self, index):\n\n name = self.data[\"first_name\"] + \" \" + self.data.get(\"middle_names\") + \" \" + self.data[\"last_name\"]\n start_date = str(self.data[\"start_day\"]) + \"/\" + str(self.data[\"start_month\"]) + \"/\" + str(self.data[\"start_year\"])\n end_date = str(self.data[\"end_day\"]) + \"/\" + str(self.data[\"end_month\"]) + \"/\" + str(self.data[\"end_year\"])\n return [{\n \"name\": \"Previous name\",\n \"value\": name,\n 'pk': self.data['previous_name_id'],\n \"index\": index,\n \"section\": \"applicant_personal_details_section\",\n \"reverse\": \"personal-details:Personal-Details-Previous-Names\",\n \"change_link_description\": \"previous name\"},\n {\"name\": \"Start date\",\n \"value\": start_date,\n 'pk': self.data['previous_name_id'],\n \"index\": index + 1,\n \"section\": \"applicant_personal_details_section\",\n \"reverse\": \"personal-details:Personal-Details-Previous-Names\",\n \"change_link_description\": \"previous name\"},\n {\"name\": \"End date\",\n \"value\": end_date,\n 'pk': self.data['previous_name_id'],\n \"index\": index + 2,\n \"section\": \"applicant_personal_details_section\",\n \"reverse\": \"personal-details:Personal-Details-Previous-Names\",\n \"change_link_description\": \"previous name\"}]\n\n\n","sub_path":"application/models/previous_name.py","file_name":"previous_name.py","file_ext":"py","file_size_in_byte":3036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"301011705","text":"import numpy as np \nfrom PIL import Image, ImageDraw\n\ndef normalize(matrix):\n\tsum = np.sum(matrix)\n\tif sum > 0.:\n\t\treturn matrix / sum\n\telse:\n\t\treturn matrix\t\n\ndef bounded(x):\n\tif (x > 255): x = 255\n\tif (x < 0): x = 0\n\treturn int(x)\n\ndef processPixel(height, width, pix, i, j, kernel):\n\tkernel = normalize(kernel)\n\tr, g, b = 0, 0, 0\n\t#height, width = pix.size[0], pix.size[1]\n\t(kernelHeight, kernelWidth) = kernel.shape\n\tfor (r,c), k in np.ndenumerate(kernel):\n\t\tposY = i + (r - kernelHeight/2)\n\t\tposX = j + (c - kernelWidth/2)\t\t\t\n\t\tif not (0 <= posX < width) or not (0 <= posY < height):\n\t\t\tcontinue\n\t\tr += pix[posY, posX][0] * k\n\t\tg += pix[posY, posX][1] * k\n\t\tb += pix[posY, posX][2] * k\n\treturn (bounded(r), bounded(g), bounded(b))\n\ndef process(image, kernel):\n\tdraw = ImageDraw.Draw(image)\n\theight = image.size[0]\n\twidth = image.size[1]\n\tpix = image.load()\n\tfor i in range(height):\n\t\tfor j in range(width):\n\t\t\tdraw.point((i, j), processPixel(height, width, pix, i, j, kernel))\n\n\n","sub_path":"convolve.py","file_name":"convolve.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"18514458","text":"# To change this template, choose Tools | Templates\n# and open the template in the editor.\n\n__author__=\"karlo\"\n__date__ =\"$Jun 5, 2011 4:02:31 PM$\"\n\nfrom gasp import *\ndef scores():\n begin_graphics(800, 600, title=\"Catch\", background=color.YELLOW)\n set_speed(120)\n\n player_score = 0\n comp_score = 0\n\n player = Text(\"Player: %d Points\" % player_score, (10, 570), size=24)\n computer = Text(\"Computer: %d Points\" % comp_score, (640, 570), size=24)\n\n while player_score < 5 and comp_score < 5:\n sleep(1)\n winner = random_between(0, 1)\n if winner:\n player_score += 1\n remove_from_screen(player)\n player = Text(\"Player: %d Points\" % player_score, (10, 570), size=24)\n else:\n comp_score += 1\n remove_from_screen(computer)\n computer = Text(\"Computer: %d Points\" % comp_score, (640, 570), size=24)\n\n if player_score == 5:\n Text(\"Player Wins!\", (340, 290), size=32)\n else:\n Text(\"Computer Wins!\", (340, 290), size=32)\n\n sleep(4)\n\n end_graphics()\n\nif __name__ == \"__main__\":\n scores()\n","sub_path":"scores.py","file_name":"scores.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"146729970","text":"import string\nimport random\n\nclass Encryption():\n\n def __init__(self,seed):\n\n # Sets a random seed and a self.seed attribute\n random.seed(seed)\n\n self.seed = seed\n\n # Creates an empty string attribute to hold the encrypted phrase\n self.encrypted_message = \"\"\n\n # One is the standard alphabet, the other is a shuffled alphabet\n self.true_alphabet = list(string.ascii_lowercase)\n self.random_alphabet = random.sample(self.true_alphabet, len(self.true_alphabet))\n\n def encrypt(self, message):\n \"\"\"\n This method will take in a string message and encrypt it.\n \"\"\"\n\n output = \"\"\n\n # Replaces every letter with a random letter\n for i in range(len(message)):\n output += message[i]\n output += random.sample(self.true_alphabet, 1)[0]\n\n # Reverses the string\n self.encrypted_message = output[::-1]\n\n # Uses a random shuffled alphabet for a caesar cipher\n encrypted_message_two = list(range(len(self.encrypted_message)))\n\n for i,letter in enumerate(self.encrypted_message.lower()):\n\n if letter in self.true_alphabet:\n index = self.true_alphabet.index(letter)\n encrypted_message_two[i] = self.random_alphabet[index]\n # For punctuation and spaces\n else:\n encrypted_message_two[i] = letter\n\n self.encrypted_message = \"\".join(encrypted_message_two)\n return self.encrypted_message\n pass\n\n def decrypt(self,message,seed):\n \"\"\"\n This method takes in a messsage and a seed for the random shuffled alphabet.\n It then returns the decrypted alphabet.\n \"\"\"\n\n random.seed(seed)\n session_random_alphabet = random.sample(self.true_alphabet, len(self.true_alphabet))\n\n decrypted_message = list(range(len(message)))\n\n # Undo randomized cipher\n for i, letter in enumerate(message.lower()):\n\n if letter in self.true_alphabet:\n index = session_random_alphabet.index(letter)\n decrypted_message[i] = self.true_alphabet[index]\n # For punctuation and spaces\n else:\n decrypted_message[i] = letter\n\n decrypted_message = \"\".join(decrypted_message)[::-1][::2]\n return decrypted_message\n\n\nx = Encryption(9)\n\ndef encryptMsg(msg):\n return x.encrypt(msg)\n\ndef decryptMsg(secret_message):\n return x.decrypt(str(secret_message), 9)","sub_path":"venv/encAndDec.py","file_name":"encAndDec.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"390815966","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 25 15:09:42 2016\n@author: dorin\n\nCAUTA MARGINILe PATRATULUI PORNIND DE LA EXTERIOR CATRE INTERIOR. \nPRIMA DATA PATRATUL EXTERIOR SI DUPA AIA PATRATUL INTERIOR \nposibil sa apara probleme daca imaginea e foarte intunecata intre margini\n\n\"\"\"\n\nfrom PIL import Image \nimport numpy as np\n#import matplotlib.pyplot as plt\nfrom collections import Counter\n\n\ndef harta(nume): #Deschidere si procesare imagine(alb-negru, treshold)\n img = Image.open(nume)\n img = img.convert('L')\n ar = np.array(img)\n for i in range(len(ar)):\n for j in range(len(ar[0])):\n if ar[i][j] > 50: #aSTA E VALOAREA DE TRESHOLD\n ar[i][j] = 255\n else:\n ar[i][j] = 0\n return ar\n\nnume_harta = 1\n\nnh = str(nume_harta)+'.jpg'\n\niar = harta(nh)\n\nwidth, height = Image.fromarray(iar).size\nprint('Width = ', width)\nprint('Height = ', height)\n\nstpd = int(height/100*26.8)#de aici incepe patratul de sus\nprint('STPD = ', stpd)\n\ncpv = int(height/100*60)#centrul aproximativ al patratului\nprint('CPV = ', cpv)\n\nmv = int(height/100*0.9833 +0.5)\nmo = int(width/100*1.302+0.5)\nprint('MV = ', mv)\nprint('MO = ', mo)\n #CAUTAM MARGINEA PATRATULUI DE JOS (STANGA SI DREAPTA)\n\n #cautare margine jos stanga\n\npix_list =[] \n\nfor i in range(3,int(width/2)):\n for j in reversed(range(int(height/2), height-3)):\n if iar[j][i] == 0:\n pix_list.append(j)\n break\n\npix_dic = (Counter(pix_list))\n\nx = 0\nfor k, v in pix_dic.items():\n if v>x:\n x=v\n edl=k #randul de unde incepe patratul extern(jos stanga)\nprint('EDL = ', edl)\n\npix_list =[] \n\nfor i in range(3,int(width/2)):\n for j in reversed(range(int(height/2), edl-mv)):\n if iar[j][i] == 0:\n pix_list.append(j)\n break\n\npix_dic = (Counter(pix_list))\n\nx = 0\nfor k, v in pix_dic.items():\n if v>x:\n x=v\n idl=k #randul de unde incepe patratul intern(jos stanga)\nprint('IDL = ', idl)\n\n\n\n #cautare margine jos -dreapta\n\npix_list =[] \n\nfor i in range(int(width/2), width-3):\n for j in reversed(range(int(height/2), height-3)):\n if iar[j][i] == 0:\n pix_list.append(j)\n break\n\npix_dic = (Counter(pix_list))\n\nx = 0\nfor k, v in pix_dic.items():\n if v>x:\n x=v\n edr=k #randul de unde incepe patratul extern(jos dreapta)\nprint('EDR = ', edr)\n\npix_list =[] \n\nfor i in range(int(width/2), width-3):\n for j in reversed(range(int(height/2), edr-mv)):\n if iar[j][i] == 0:\n pix_list.append(j)\n break\n\npix_dic = (Counter(pix_list))\n\nx = 0\nfor k, v in pix_dic.items():\n if v>x:\n x=v\n idr=k #randul de unde incepe patratul intern(jos dreapta)\nprint('IDR = ', idr)\n\n\n\n\n\n\n\n #CAUTAM MARGINEA PATRATULUI DE SUS (STANGA SI DREAPTA)\n\n\n\n #cautare margine sus stanga\n\npix_list =[] \n\nfor i in range(3,int(width/2)):\n for j in range(stpd, int(height/2)):\n if iar[j][i] == 0:\n pix_list.append(j)\n break\n\npix_dic = (Counter(pix_list))\n\nx = 0\nfor k, v in pix_dic.items():\n if v>x:\n x=v\n eul=k #randul de unde incepe patratul extern(sus stanga)\nprint('EUL = ', eul)\n\npix_list =[] \n\nfor i in range(3,int(width/2)):\n for j in range(eul+mv, int(height/2)):\n if iar[j][i] == 0:\n pix_list.append(j)\n break\n\npix_dic = (Counter(pix_list))\n\nx = 0\nfor k, v in pix_dic.items():\n if v>x:\n x=v\n iul=k #randul de unde incepe patratul intern(sus stanga)\nprint('IUL = ', iul)\n\n\n\n #cautare margine sus dreapta\n\npix_list =[] \n\nfor i in range(int(width/2), width-3):\n for j in range(stpd, int(height/2)):\n if iar[j][i] == 0:\n pix_list.append(j)\n break\n\npix_dic = (Counter(pix_list))\n\nx = 0\nfor k, v in pix_dic.items():\n if v>x:\n x=v\n eur=k #randul de unde incepe patratul extern(sus dreapta)\nprint('EUR = ', eur)\n\npix_list =[] \n\nfor i in range(int(width/2), width-3):\n for j in range(eur+mv, int(height/2)):\n if iar[j][i] == 0:\n pix_list.append(j)\n break\n\npix_dic = (Counter(pix_list))\n\nx = 0\nfor k, v in pix_dic.items():\n if v>x:\n x=v\n iur=k #randul de unde incepe patratul intern(sus dreapta)\nprint('IUR = ', iur)\n\n\n\n #CAUTAM MARGINEA PATRATULUI DIN STANGA (SUS SI JOS)\n\n\n\n #cautare margine stanga sus\n\npix_list =[] \n\nfor j in range(stpd,cpv):\n for i in range(3, int(width/2)):\n if iar[j][i] == 0:\n pix_list.append(i)\n break\n\npix_dic = (Counter(pix_list))\n\nx = 0\nfor k, v in pix_dic.items():\n if v>x:\n x=v\n elu=k #randul de unde incepe patratul extern(stanga sus)\nprint('ELU = ', elu)\n\npix_list =[] \n\nfor j in range(stpd,cpv):\n for i in range(elu+mo, int(width/2)):\n if iar[j][i] == 0:\n pix_list.append(i)\n break\n\npix_dic = (Counter(pix_list))\n\nx = 0\nfor k, v in pix_dic.items():\n if v>x:\n x=v\n ilu=k #randul de unde incepe patratul intern(stanga sus)\nprint('Ilu = ', ilu)\n\n\n\n #cautare margine stanga jos\n\npix_list =[] \n\nfor j in range(cpv, height-3):\n for i in range(3, int(width/2)):\n if iar[j][i] == 0:\n pix_list.append(i)\n break\n\npix_dic = (Counter(pix_list))\n\nx = 0\nfor k, v in pix_dic.items():\n if v>x:\n x=v\n eld=k #randul de unde incepe patratul extern(stanga jos)\nprint('ELD = ', eld)\n\npix_list =[] \n\nfor j in range(cpv, height-3):\n for i in range(eld+mo, int(width/2)):\n if iar[j][i] == 0:\n pix_list.append(i)\n break\n\npix_dic = (Counter(pix_list))\n\nx = 0\nfor k, v in pix_dic.items():\n if v>x:\n x=v\n ild=k #randul de unde incepe patratul intern(stanga jos)\nprint('IlD = ', ild)\n\n\n\n\n\n\n #CAUTAM MARGINEA PATRATULUI DIN DREAPTA (SUS SI JOS)\n\n\n\n #cautare margine dreapta sus\n\npix_list =[] \n\nfor j in range(stpd,cpv):\n for i in reversed(range(int(width/2), width-3)):\n if iar[j][i] == 0:\n pix_list.append(i)\n break\n\npix_dic = (Counter(pix_list))\n\nx = 0\nfor k, v in pix_dic.items():\n if v>x:\n x=v\n eru=k #randul de unde incepe patratul extern(dreapta sus)\nprint('ErU = ', eru)\n\npix_list =[] \n\nfor j in range(stpd,cpv):\n for i in reversed(range(int(width/2), eru-mo)):\n if iar[j][i] == 0:\n pix_list.append(i)\n break\n\npix_dic = (Counter(pix_list))\n\nx = 0\nfor k, v in pix_dic.items():\n if v>x:\n x=v\n iru=k #randul de unde incepe patratul intern(dreapta sus)\nprint('Iru = ', iru)\n\n\n\n #cautare margine dreapta jos\n\npix_list =[] \n\nfor j in range(cpv, height-3):\n for i in reversed(range(int(width/2), width-3)):\n if iar[j][i] == 0:\n pix_list.append(i)\n break\n\npix_dic = (Counter(pix_list))\n\nx = 0\nfor k, v in pix_dic.items():\n if v>x:\n x=v\n erd=k #randul de unde incepe patratul extern(dreapta jos)\nprint('ErD = ', erd)\n\npix_list =[] \n\nfor j in range(cpv, height-3):\n for i in reversed(range(int(width/2), erd-mo)):\n if iar[j][i] == 0:\n pix_list.append(i)\n break\n\npix_dic = (Counter(pix_list))\n\nx = 0\nfor k, v in pix_dic.items():\n if v>x:\n x=v\n ird=k #randul de unde incepe patratul intern(dreapta jos)\nprint('IrD = ', ird)\n\n\n\n#ASTEA SUNT COLURILEEEE!!!!\n\na= iul,ilu # Colt stanga sus\nb= iur, iru # Colt stanga sus\nc= idl, ild # Colt stanga sus\nd= idr, ird # Colt stanga sus\n\n\n#Test colturi: \nim = Image.open(nh)\nar = np.array(im)\n\nfor i in range(iur-5, iur+5):\n for j in range(iru-5, iru+5):\n ar[i][j] = 0\n\nfor i in range(iul-5, iul+5):\n for j in range(ilu-5, ilu+5):\n ar[i][j] = 0\n\nfor i in range(idl-5, idl+5):\n for j in range(ild-5, ild+5):\n ar[i][j] = 0\n \nfor i in range(idr-5, idr+5):\n for j in range(ird-5, ird+5):\n ar[i][j] = 0\n\n\n\nimg2 = Image.fromarray(ar)\nimg2.save('test_colturi.jpg')\n\n\n\n\n\n\n\n\n\n\n","sub_path":"final_colturi.py","file_name":"final_colturi.py","file_ext":"py","file_size_in_byte":8440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"79178070","text":"import numpy as np\nimport matplotlib.pyplot as pyplot\n\n\nDir = ''\n\nSimStats_GFI = np.loadtxt( 'output_GFI/SimStats_GFI_.csv', delimiter=',')\nSimStats_nonlocal = np.loadtxt( 'output_nonlocal/SimStats_nonLocalPrior_.csv', delimiter=',')\nSimStats_eNet = np.loadtxt( 'output_eNet/SimStats_elasticNet_.csv', delimiter=',')\nSimStats_SSL = np.loadtxt( 'output_SSL_Rockova/SimStats_SSL_Rockova_.csv', delimiter=',')\nSimStats_SCAD = np.loadtxt( 'output_SCAD/SimStats_SCAD_.csv', delimiter=',')\n\nxaxis = np.array([ 100, 200, 300, 400, 500])\n\nylabels = [r'$r(M_{o}|y)$'+' or '+r'$P(M_{o}|y)$', r'$r(M_{o}|y)$'+' or '+r'$P(M_{o}|y)$',\n\t\t 'Proportion of correct model selections', 'Proportion of correct model selections',\n\t \t 'RMSE', 'RMSE']\nfilename = ['postProbs.pdf', 'postProbs_corr.pdf', \n\t\t\t'trueModelProp.pdf', 'trueModelProp_corr.pdf',\n\t\t\t'rmse.pdf', 'rmse_corr.pdf']\n\n#------------------------------------------------------\nfor p in range(6):\n\tpyplot.plot( xaxis, SimStats_GFI[:,p], color='black',linestyle='solid',\n\t linewidth=3,label=r'$\\varepsilon$'+'-admissible subsets')\n\tpyplot.plot( xaxis, SimStats_nonlocal[:,p],color='black',linestyle=(0,(1,1)),linewidth=3,label='nonlocal prior')\n\t\n\tif p > 1:\n\t\tpyplot.plot( xaxis, SimStats_SSL[:,p], color='black', linestyle=(0,(5,1)), linewidth=3, label='SSL')\n\t\tpyplot.plot( xaxis, SimStats_eNet[:,p], color='black', marker='^', markersize='10', label='elastic net')\n\t\tpyplot.plot( xaxis, SimStats_SCAD[:,p], color='black', marker='o', markersize='10', label='SCAD')\n\n\tpyplot.yticks( np.arange(1.1, step=.1) if p < 4 else np.arange(1,1.5, step=.1), fontsize=12)\n\tpyplot.xticks(xaxis, fontsize=12)\n\tpyplot.xlabel('p', fontsize=16)\n\tpyplot.ylabel(ylabels[p], fontsize=16)\n\tpyplot.title( r'$\\rho = 0$' if p==0 else ( r'$\\rho = .25$' if p==1 else '' ), fontsize=20)\n\tif p == 2:\n\t\tpyplot.legend(loc='center left', fontsize=16)\n\tpyplot.savefig(Dir+filename[p])\n\tpyplot.clf()\n#------------------------------------------------------\n\n","sub_path":"WilliamsHannig-Code/SimulationSetup_1/Outfile.py","file_name":"Outfile.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"97195287","text":"from selenium import webdriver\r\n#from urllib.request import urlopen\r\nfrom bs4 import BeautifulSoup\r\n\r\nclass Player():\r\n\r\n def __itit__(self):\r\n self.name = \"\"\r\n self.link = \"\"\r\n self.height = \"\"\r\n self.weight = \"\"\r\n self.born = \"\"\r\n\"\"\"\r\n def get_details(self,names , links):\r\n #name = self.name\r\n #link = self.link\r\n full_info = {}\r\n i=0\r\n for na in names:\r\n full_info[na] = links[i]\r\n i+=1\r\n return full_info \r\n\"\"\"\r\ndef players_details():\r\n driver = webdriver.PhantomJS(\"E:\\\\Udemy web scraping\\\\phantomjs-2.1.1-windows\\\\bin\\\\phantomjs\")\r\n driver.get(\"http://www.nba.com/players/\")\r\n html_text = driver.page_source\r\n\r\n #html_text = urlopen(\"http://www.nba.com/players/\")\r\n bsObj = BeautifulSoup(html_text , 'lxml')\r\n\r\n \r\n div = bsObj.find(\"div\" , {\"id\":\"player-list\"})\r\n players_list = []\r\n for a in div.find_all(\"a\"):\r\n span = a.find(\"span\" ,class_=\"name-label\")\r\n one_player = Player()\r\n one_player.name = span.text\r\n one_player.link = a['href']\r\n players_list.append(one_player)\r\n driver.quit()\r\n return players_list\r\n\r\n\r\ndef get_details_for_all_players(players_list):\r\n\r\n driver = webdriver.PhantomJS(\"E:\\\\Udemy web scraping\\\\phantomjs-2.1.1-windows\\\\bin\\\\phantomjs\")\r\n for e in players_list[0:2]:\r\n url = e.link\r\n\r\n driver.get(\"http://www.nba.com\"+url)\r\n html_text = driver.page_source\r\n bsObj = BeautifulSoup(html_text , 'lxml')\r\n\r\n height = \"\"\r\n weight = \"\"\r\n h_tag = bsObj.find_all(\"p\" , class_ = \"nba-player-vitals__top-heading\")\r\n #print(h_tag.text)\r\n i = 0\r\n while(i <=1):\r\n for sib in h_tag[i].findNextSiblings():\r\n if(i==0):\r\n height += sib.text\r\n elif(i==1):\r\n weight += sib.text\r\n\r\n i=i+1\r\n \r\n span_tag = bsObj.find(\"span\" , class_=\"nba-player-vitals__bottom-info\")\r\n born = span_tag.text\r\n\r\n\r\n e.height = height\r\n e.weight = weight\r\n e.born = born\r\n\r\n\r\n driver.quit()\r\n return (players_list)\r\n\r\n\r\nget = get_details_for_all_players(players_details())\r\nfor e in get[0:2]:\r\n print(\"\\n\")\r\n print(e.name)\r\n print(e.link)\r\n print(e.height)\r\n print(e.weight)\r\n print(e.born)\r\n print(\"\\n\")\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n","sub_path":"Project 3 part 2 get the full details of all the players.py","file_name":"Project 3 part 2 get the full details of all the players.py","file_ext":"py","file_size_in_byte":2448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"194009974","text":"import socket\nimport threading\nimport json\nimport keyboard\n\n\nPORT = 5050\nHOST = '13.212.164.38'\nADDRESS = (HOST,PORT)\nHEADER = 64\nFORMAT = 'utf-8'\n\nclient = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\nclient.connect(ADDRESS)\nmy_id = str(input('Enter your ID : '))\nmy_rid = str(input('Enter your partner ID : '))\ncontinue_listen = True\n\ndef send_str(msg,option,rid=False,type='string'):\n global my_id\n global continue_listen\n try:\n data_format = {\n 'sid':my_id,\n 'rid':rid,\n 'option':option,\n 'status':200,\n 'type':type,\n 'data':msg\n }\n data_format = json.dumps(data_format)\n msg_len = len(data_format)\n send_msg_len = f\"{msg_len}{' '*(HEADER - len(str(msg_len)))}\"\n client.send(send_msg_len.encode(FORMAT))\n client.send(data_format.encode(FORMAT))\n if option == 'disconnect':\n continue_listen = False\n except:\n pass\n\n\n\ndef listen():\n global my_id\n global continue_listen\n while continue_listen:\n try:\n reply = client.recv(HEADER).decode(FORMAT)\n if reply:\n try:\n reply_len = int(reply)\n except:\n continue\n mesg = client.recv(reply_len).decode(FORMAT)\n mesg_dict = json.loads(mesg)\n if mesg_dict.get('option') == 'init':\n if my_id !=mesg_dict.get('data'):\n send_str(my_id,'init')\n\n elif mesg_dict.get('option') == 'exist':\n print(\"Your ID Already Exists but we replaced it\")\n\n else:\n print(f\"{mesg_dict.get('sid')} send '{mesg_dict.get('data')}'\")\n except:\n continue\n\nlisten_thread = threading.Thread(target=listen)\nlisten_thread.start()\nwhile True:\n key_value = keyboard.read_event()\n msg = {\n 'key':key_value.__dict__.get('name'),\n 'event_type':key_value.__dict__.get('event_type')\n }\n if my_rid:\n if msg.get('key')=='a' or msg.get('key')=='s' or msg.get('key')=='d' or msg.get('key')=='w':\n send_str(msg,'send',rid=my_rid,type='key')\n\n\n\n\n\n\n\n\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"203883081","text":"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"\nLambda Callback\n^^^^^^^^^^^^^^^\n\nCreate a simple callback on the fly using lambda functions.\n\n\"\"\"\n\nfrom typing import Callable, Optional\n\nfrom pytorch_lightning.callbacks.base import Callback\n\n\nclass LambdaCallback(Callback):\n r\"\"\"\n Create a simple callback on the fly using lambda functions.\n\n Args:\n **kwargs: hooks supported by :class:`~pytorch_lightning.callbacks.base.Callback`\n\n Example::\n\n >>> from pytorch_lightning import Trainer\n >>> from pytorch_lightning.callbacks import LambdaCallback\n >>> trainer = Trainer(callbacks=[LambdaCallback(setup=lambda *args: print('setup'))])\n \"\"\"\n\n def __init__(\n self,\n on_before_accelerator_backend_setup: Optional[Callable] = None,\n setup: Optional[Callable] = None,\n teardown: Optional[Callable] = None,\n on_init_start: Optional[Callable] = None,\n on_init_end: Optional[Callable] = None,\n on_fit_start: Optional[Callable] = None,\n on_fit_end: Optional[Callable] = None,\n on_sanity_check_start: Optional[Callable] = None,\n on_sanity_check_end: Optional[Callable] = None,\n on_train_batch_start: Optional[Callable] = None,\n on_train_batch_end: Optional[Callable] = None,\n on_train_epoch_start: Optional[Callable] = None,\n on_train_epoch_end: Optional[Callable] = None,\n on_validation_epoch_start: Optional[Callable] = None,\n on_validation_epoch_end: Optional[Callable] = None,\n on_test_epoch_start: Optional[Callable] = None,\n on_test_epoch_end: Optional[Callable] = None,\n on_epoch_start: Optional[Callable] = None,\n on_epoch_end: Optional[Callable] = None,\n on_batch_start: Optional[Callable] = None,\n on_validation_batch_start: Optional[Callable] = None,\n on_validation_batch_end: Optional[Callable] = None,\n on_test_batch_start: Optional[Callable] = None,\n on_test_batch_end: Optional[Callable] = None,\n on_batch_end: Optional[Callable] = None,\n on_train_start: Optional[Callable] = None,\n on_train_end: Optional[Callable] = None,\n on_pretrain_routine_start: Optional[Callable] = None,\n on_pretrain_routine_end: Optional[Callable] = None,\n on_validation_start: Optional[Callable] = None,\n on_validation_end: Optional[Callable] = None,\n on_test_start: Optional[Callable] = None,\n on_test_end: Optional[Callable] = None,\n on_keyboard_interrupt: Optional[Callable] = None,\n on_save_checkpoint: Optional[Callable] = None,\n on_load_checkpoint: Optional[Callable] = None,\n on_after_backward: Optional[Callable] = None,\n on_before_zero_grad: Optional[Callable] = None,\n ):\n if on_before_accelerator_backend_setup is not None:\n self.on_before_accelerator_backend_setup = on_before_accelerator_backend_setup\n if setup is not None:\n self.setup = setup\n if teardown is not None:\n self.teardown = teardown\n if on_init_start is not None:\n self.on_init_start = on_init_start\n if on_init_end is not None:\n self.on_init_end = on_init_end\n if on_fit_start is not None:\n self.on_fit_start = on_fit_start\n if on_fit_end is not None:\n self.on_fit_end = on_fit_end\n if on_sanity_check_start is not None:\n self.on_sanity_check_start = on_sanity_check_start\n if on_sanity_check_end is not None:\n self.on_sanity_check_end = on_sanity_check_end\n if on_train_batch_start is not None:\n self.on_train_batch_start = on_train_batch_start\n if on_train_batch_end is not None:\n self.on_train_batch_end = on_train_batch_end\n if on_train_epoch_start is not None:\n self.on_train_epoch_start = on_train_epoch_start\n if on_train_epoch_end is not None:\n self.on_train_epoch_end = on_train_epoch_end\n if on_validation_epoch_start is not None:\n self.on_validation_epoch_start = on_validation_epoch_start\n if on_validation_epoch_end is not None:\n self.on_validation_epoch_end = on_validation_epoch_end\n if on_test_epoch_start is not None:\n self.on_test_epoch_start = on_test_epoch_start\n if on_test_epoch_end is not None:\n self.on_test_epoch_end = on_test_epoch_end\n if on_epoch_start is not None:\n self.on_epoch_start = on_epoch_start\n if on_epoch_end is not None:\n self.on_epoch_end = on_epoch_end\n if on_batch_start is not None:\n self.on_batch_start = on_batch_start\n if on_validation_batch_start is not None:\n self.on_validation_batch_start = on_validation_batch_start\n if on_validation_batch_end is not None:\n self.on_validation_batch_end = on_validation_batch_end\n if on_test_batch_start is not None:\n self.on_test_batch_start = on_test_batch_start\n if on_test_batch_end is not None:\n self.on_test_batch_end = on_test_batch_end\n if on_batch_end is not None:\n self.on_batch_end = on_batch_end\n if on_train_start is not None:\n self.on_train_start = on_train_start\n if on_train_end is not None:\n self.on_train_end = on_train_end\n if on_pretrain_routine_start is not None:\n self.on_pretrain_routine_start = on_pretrain_routine_start\n if on_pretrain_routine_end is not None:\n self.on_pretrain_routine_end = on_pretrain_routine_end\n if on_validation_start is not None:\n self.on_validation_start = on_validation_start\n if on_validation_end is not None:\n self.on_validation_end = on_validation_end\n if on_test_start is not None:\n self.on_test_start = on_test_start\n if on_test_end is not None:\n self.on_test_end = on_test_end\n if on_keyboard_interrupt is not None:\n self.on_keyboard_interrupt = on_keyboard_interrupt\n if on_save_checkpoint is not None:\n self.on_save_checkpoint = on_save_checkpoint\n if on_load_checkpoint is not None:\n self.on_load_checkpoint = on_load_checkpoint\n if on_after_backward is not None:\n self.on_after_backward = on_after_backward\n if on_before_zero_grad is not None:\n self.on_before_zero_grad = on_before_zero_grad\n","sub_path":"pytorch_lightning/callbacks/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":7047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"31442712","text":"\nfrom __future__ import absolute_import \ntry: \n\t__VICE_SETUP__ \nexcept NameError: \n\t__VICE_SETUP__ = False \n\nif not __VICE_SETUP__: \n\n\t__all__ = [\"test\"] \n\tfrom .....testing import moduletest \n\tfrom ._generic import generic_test \n\tfrom ._no_migration import no_migration_test \n\tfrom ._separation import separation_test \n\n\t@moduletest \n\tdef test(): \n\t\tr\"\"\" \n\t\tvice.core.multizone edge cases module test \n\t\t\"\"\" \n\t\treturn [\"vice.core.multizone edge cases\", \n\t\t\t[ \n\t\t\t\tgeneric_test(run = False), \n\t\t\t\tno_migration_test(run = False), \n\t\t\t\tseparation_test(run = False) \n\t\t\t] \n\t\t] \n\nelse: \n\tpass \n","sub_path":"vice/src/multizone/tests/cases/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"470310341","text":"import json\nfrom DateTime import DateTime\nfrom Acquisition import aq_inner\nfrom five import grok\nfrom plone import api\nfrom plone.directives import form\n\nfrom zope import schema\nfrom zope.schema.vocabulary import getVocabularyRegistry\nfrom zope.component import getMultiAdapter\nfrom zope.component import queryUtility\n\nfrom zope.lifecycleevent import modified\n\nfrom zope.component.hooks import getSite\n\nfrom plone.app.textfield import RichText\nfrom plone.namedfile.interfaces import IImageScaleTraversable\nfrom plone.namedfile.field import NamedBlobImage\nfrom plone.indexer import indexer\nfrom Products.CMFCore.utils import getToolByName\n\nfrom plone.app.contentlisting.interfaces import IContentListing\nfrom plone.registry.interfaces import IRegistry\nfrom plone.uuid.interfaces import IUUID\nfrom plone.app.layout.globals.interfaces import IViewView\nfrom plone.app.layout.viewlets.interfaces import IAboveContent\n\nfrom pressapp.presscontent import MessageFactory as _\n\n\nclass IPressRelease(form.Schema, IImageScaleTraversable):\n \"\"\"\n A press release content type.\n \"\"\"\n kicker = schema.TextLine(\n title=_(u\"Kicker\"),\n description=_(u\"Enter optional kicker / teaser line.\"),\n required=False,\n )\n title = schema.TextLine(\n title=_(u\"Title\"),\n required=True,\n )\n subtitle = schema.TextLine(\n title=_(u\"Subtitle\"),\n description=_(u\"Please enter an optional subtitle here.\"),\n required=False,\n )\n location = schema.TextLine(\n title=_(u\"Location\"),\n description=_(u\"Provide a location for this press release that will \"\n u\"be prepended to the main body text.\"),\n required=True,\n )\n text = RichText(\n title=_(u\"Text\"),\n required=True,\n )\n form.primary('image')\n image = NamedBlobImage(\n title=_(u\"Image Attachment\"),\n description=_(u\"Upload an image for this press release. The \"\n u\"image should be already optimized since sending \"\n u\"a large image file via E-mail is not recommended\"),\n required=True,\n )\n imagename = schema.TextLine(\n title=_(u\"Image Title\"),\n required=True,\n )\n caption = schema.TextLine(\n title=_(u\"Image Attachment Caption\"),\n description=_(u\"Enter optional caption describing the image\"),\n required=False,\n )\n description = schema.Text(\n title=_(u\"Summary\"),\n description=_(u\"Optional summary that is useful as a preview text \"\n u\"in email clients that support this feature.\"),\n required=False,\n )\n archive = schema.Bool(\n title=_(u\"Visible in Archive?\"),\n description=_(u\"Mark this press release as visible in the archive.\"),\n required=False,\n default=True,\n )\n snippet = schema.Bool(\n title=_(u\"Visible on main site?\"),\n description=_(u\"Mark this press release as suitable for displaying as \"\n u\"snippet on the main site.\"),\n required=False,\n default=False,\n )\n distributor = schema.List(\n title=_(u\"Selected Dsitributors\"),\n description=_(u\"Select external distributors to filter display in \"\n u\"the press archive listing\"),\n value_type=schema.Choice(\n title=_(u\"Distributor\"),\n vocabulary='pressapp.presscontent.externalDistributors',\n ),\n required=False,\n )\n\n\n@grok.adapter(IPressRelease, name=\"archive\")\n@indexer(IPressRelease)\ndef archiveIndexer(context):\n return context.archive\n\n\n@grok.adapter(IPressRelease, name=\"snippet\")\n@indexer(IPressRelease)\ndef snippetIndexer(context):\n return context.snippet\n\n\n@grok.adapter(IPressRelease, name=\"distributor\")\n@indexer(IPressRelease)\ndef distributorIndexer(context):\n return context.distributor\n\n\nclass View(grok.View):\n grok.context(IPressRelease)\n grok.implements(IViewView)\n grok.require('cmf.ModifyPortalContent')\n grok.name('view')\n\n def update(self):\n self.has_files = len(self.contained_attachments()) > 0\n\n def has_channel_info(self):\n context = aq_inner(self.context)\n channel = getattr(context, 'channel', None)\n if channel:\n return True\n\n def channel_names(self):\n context = aq_inner(self.context)\n names = []\n registry = queryUtility(IRegistry)\n if registry:\n records = registry['pressapp.channelmanagement.channelList']\n channels = getattr(context, 'channel', None)\n for channel in channels:\n info = {}\n info['channel'] = channel\n try:\n channelname = records[channel]\n except KeyError:\n channelname = channel\n info['channelname'] = channelname\n names.append(info)\n return names\n\n def distributors(self):\n context = aq_inner(self.context)\n vr = getVocabularyRegistry()\n dist_vocab = vr.get(context,\n 'pressapp.presscontent.externalDistributors')\n distributor = context.distributor\n data = []\n if distributor:\n for item in distributor:\n info = {}\n term = dist_vocab.getTerm(item)\n info['title'] = term.title\n info['value'] = term.value\n data.append(info)\n return data\n\n def has_recipients_info(self):\n context = aq_inner(self.context)\n recipients = getattr(context, 'recipients', None)\n if recipients:\n return True\n\n def constructPreviewURL(self):\n context = aq_inner(self.context)\n portal_url = api.portal.get().absolute_url()\n uuid = IUUID(context, None)\n url = portal_url + '/@@pressitem-view?uid=' + uuid\n return url\n\n def get_state_info(self, state):\n info = _(u\"draft\")\n if state == 'published':\n info = _(u\"sent\")\n return info\n\n def dispatched_date(self):\n context = aq_inner(self.context)\n date = context.EffectiveDate()\n if not date or date == 'None':\n return None\n return DateTime(date)\n\n def user_details(self):\n context = aq_inner(self.context)\n creator = context.Creator()\n user = api.user.get(username=creator)\n fullname = user.getProperty('fullname')\n if fullname:\n return fullname\n else:\n return _(u\"Administrator\")\n\n def contained_attachments(self):\n context = aq_inner(self.context)\n catalog = getToolByName(context, 'portal_catalog')\n items = catalog(portal_type=['pressapp.presscontent.fileattachment',\n 'pressapp.presscontent.imageattachment',\n 'Image'],\n path=dict(query='/'.join(context.getPhysicalPath()),\n depth=1))\n results = IContentListing(items)\n return results\n\n\nclass Preview(grok.View):\n grok.context(IPressRelease)\n grok.require('zope2.View')\n grok.name('pressrelease-preview')\n\n def constructPreviewURL(self):\n context = aq_inner(self.context)\n portal_url = api.portal.get().absolute_url()\n uuid = IUUID(context, None)\n url = portal_url + '/@@pressitem-view?uid=' + uuid\n return url\n\n def get_state_info(self, state):\n info = _(u\"draft\")\n if state == 'published':\n info = _(u\"sent\")\n return info\n\n def user_details(self):\n context = aq_inner(self.context)\n creator = context.Creator()\n user = api.user.get(username=creator)\n fullname = user.getProperty('fullname')\n if fullname:\n return fullname\n else:\n return _(u\"Administrator\")\n\n\nclass AsHtmlView(grok.View):\n grok.context(IPressRelease)\n grok.require('zope2.View')\n grok.name('asHTML')\n\n def additional_data(self):\n context = aq_inner(self.context)\n mtool = getToolByName(context, 'portal_membership')\n member = mtool.getMemberById(context.Creator())\n data = {}\n data['location'] = context.location\n data['img'] = self.getImageTag(context)\n data['date'] = context.Date()\n data['org'] = member.getProperty('organization', '')\n data['link'] = member.getProperty('home_page', '')\n return data\n\n def getImageTag(self, item):\n obj = item\n scales = getMultiAdapter((obj, self.request), name='images')\n scale = scales.scale('image', scale='mini')\n imageTag = None\n if scale is not None:\n imageTag = scale.tag()\n return imageTag\n\n def queryAttachments(self):\n context = aq_inner(self.context)\n catalog = getToolByName(context, 'portal_catalog')\n items = catalog(portal_type=['pressapp.presscontent.fileattachment',\n 'pressapp.presscontent.imageattachment'],\n path=dict(query='/'.join(context.getPhysicalPath()),\n depth=1))\n #results = IContentListing(items)\n return items\n\n\nclass PressReleaseActions(grok.Viewlet):\n grok.name('pressapp.membercontent.PressReleaseActions')\n grok.context(IPressRelease)\n grok.require('zope2.View')\n grok.viewletmanager(IAboveContent)\n\n def update(self):\n context = aq_inner(self.context)\n self.context_url = context.absolute_url()\n self.portal_state = getMultiAdapter((context, self.request),\n name='plone_portal_state')\n self.anonymous = self.portal_state.anonymous()\n\n def homefolder_url(self):\n context = aq_inner(self.context)\n mtool = getToolByName(context, 'portal_membership')\n if not mtool.isAnonymousUser():\n member = mtool.getAuthenticatedMember()\n home_folder = member.getHomeFolder().absolute_url()\n return home_folder\n\n\nclass ArchiveSettings(grok.View):\n grok.context(IPressRelease)\n grok.require('cmf.ModifyPortalContent')\n grok.name('update-archive-settings')\n\n def update(self):\n context = aq_inner(self.context)\n state = self.request.form.get('state', '')\n results = {'results': None,\n 'success': False,\n 'message': ''\n }\n if state:\n if state == 'true':\n setattr(context, 'archive', True)\n results['success'] = True\n else:\n setattr(context, 'archive', False)\n results['success'] = True\n results['results'] = {\n 'state': 'changed',\n 'transitions': (),\n }\n modified(context)\n context.reindexObject(idxs='modified')\n self.results = results\n\n def render(self):\n results = self.results\n self.request.response.setHeader('Content-Type',\n 'application/json; charset=utf-8')\n return json.dumps(results)\n","sub_path":"src/pressapp.presscontent/pressapp/presscontent/pressrelease.py","file_name":"pressrelease.py","file_ext":"py","file_size_in_byte":11141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"524093592","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom numpy.ma import sort\n\nimport download as dl\nimport model as mod\n\n\ndef plot_scores():\n all_data = pd.DataFrame()\n for ye in range(2020, 2021):\n for le in ['F1']:\n all_data = all_data.append(dl.download_scores(ye, le), ignore_index=True, sort=False)\n\n scores = np.zeros((np.max(all_data.FTAG) + 1, np.max(all_data.FTHG) + 1))\n for i in range(0, np.max(all_data.FTHG)+1):\n for j in range(0, np.max(all_data.FTAG)+1):\n scores[j, i] = len(all_data[(all_data.FTHG == i) & (all_data.FTAG == j)].index)\n fig, ax = plt.subplots()\n im = ax.imshow(scores)\n for i in range(np.max(all_data.FTAG) + 1):\n for j in range(np.max(all_data.FTHG) + 1):\n text = ax.text(j, i, \"{:.2f}\".format(scores[i, j]),\n ha=\"center\", va=\"center\", color=\"w\")\n ax.set_title(\"Log nb scores in French League 1\")\n fig.tight_layout()\n plt.show()\n\n\ndef plot_accuracy():\n acc_traces = mod.get_model()[1]\n fig, (ax1, ax2) = plt.subplots(1, 2)\n fig.suptitle('Horizontally stacked subplots')\n ax1.plot(acc_traces['nb_epochs'], acc_traces['accuracy_train'])\n ax1.plot(acc_traces['nb_epochs'], acc_traces['accuracy_test'])\n ax2.plot(acc_traces['nb_epochs'], acc_traces['loss_train'])\n ax2.plot(acc_traces['nb_epochs'], acc_traces['loss_test'])\n\n\n#plot_scores()\nplot_accuracy()\n","sub_path":"plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"25749424","text":"# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution(object):\n def getIntersectionNode(self, headA, headB):\n \"\"\"\n :type head1, head1: ListNode\n :rtype: ListNode\n \"\"\"\n \n curA, curB = headA, headB\n begin, tailA, tailB = None, None, None\n\n while curA and curB:\n if curA == curB:\n begin = curA\n break\n\n if curA.next:\n curA = curA.next\n elif tailA is None:\n tailA = curA\n curA = headB\n else:\n break\n\n if curB.next:\n curB = curB.next\n elif tailB is None:\n tailB = curB\n curB = headA\n else:\n break\n\n return begin\n\n\none = ListNode(1)\ntwo = ListNode(2)\none.next = two\n\nthree = ListNode(3)\n\ntwo.next = three\n\nfour = ListNode(4)\nfive = ListNode(5)\nfour.next = five\nfive.next = three\n\nresult = Solution().getIntersectionNode(list1, list2)\nprint(result)\n","sub_path":"intersection-of-two-linked-list/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"570019149","text":"# coding=utf8\n\n__author__ = 'Alexander.Li'\n\nfrom setuptools import setup, find_packages\n\nversion = '0.7.7'\n\nsetup(name='alva_robot',\n version=version,\n description=\"A robot base of AlvaIM\",\n long_description=\"\"\"\\\nRecieve and process messages and make response for robot of AlvaIM\"\"\",\n classifiers=[],\n keywords='AlvaIM, Robot',\n author='Alexander.Li',\n author_email='superpowerlee@gmail.com',\n url='https://alvaim.com',\n license='GPL',\n packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),\n include_package_data=True,\n zip_safe=True,\n install_requires=[\n # -*- Extra requirements: -*-\n 'pollworker',\n 'secp256k1py',\n 'boto3',\n 'click',\n 'oss2'\n ],\n entry_points={\n 'console_scripts': ['alva-robot=alva_robot.main:main'],\n },\n )\n","sub_path":"pypi_install_script/alva_robot-0.7.7.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"514308485","text":"from NetworkxDepGraph import *\r\nfrom SimpleIncludeParser import *\r\n\r\ndef get_includes(file_list, args):\r\n include_parser = IncludeParser(strict=False)\r\n \r\n include_dict = dict()\r\n for file in file_list:\r\n include_dict[os.path.basename(file)] = include_parser.get_includes(file, [], args)\r\n \r\n return include_dict\r\n \r\ndef assemble_graph(include_dict):\r\n g = DepGraph()\r\n \r\n for file in include_dict:\r\n g.add_node(file, include_dict[file])\r\n\r\n g.finalize()\r\n return g\r\n\r\ndef get_parent_dependencies_and_decrement(graph, map_of_dependencies, node):\r\n parents = graph.get_parents(node)\r\n \r\n dependencies = set()\r\n for parent in parents:\r\n assert(parent in map_of_dependencies)\r\n parent_entry = map_of_dependencies[parent]\r\n \r\n assert(parent_entry[2] > 0)\r\n parent_entry[2] -= 1\r\n \r\n dependencies = dependencies.union(parent_entry[0].union(parent_entry[1]))\r\n \r\n if parent_entry[2] == 0:\r\n parent_entry[0] = None\r\n \r\n return [set(parents), dependencies]\r\n \r\ndef get_unnecessary_dependencies(graph):\r\n map_of_dependencies = dict()\r\n topological_ordering = graph.get_topological_ordering()\r\n \r\n for node in topological_ordering:\r\n parents, parent_dependencies = get_parent_dependencies_and_decrement(graph, map_of_dependencies, node)\r\n\r\n # O(len(parents))\r\n current_node_necessary_dependencies = parents - parent_dependencies\r\n current_node_unnecessary_dependencies = parents - current_node_necessary_dependencies\r\n\r\n map_of_dependencies[node] = [parent_dependencies | parents,\r\n current_node_unnecessary_dependencies,\r\n graph.get_num_children(node)]\r\n\r\n # sanity check\r\n for node in map_of_dependencies:\r\n assert(map_of_dependencies[node][2] == 0)\r\n \r\n return map_of_dependencies\r\n\r\ndef get_file_list(filename):\r\n file_list = []\r\n\r\n hfile = open(filename)\r\n for line in hfile:\r\n line = line.strip()\r\n if line != \"\":\r\n file_list.append(line)\r\n \r\n return file_list\r\n \r\nif __name__ == \"__main__\":\r\n file_list = get_file_list(\"file_list.conf\")\r\n\r\n include_dict = get_includes(file_list, [])\r\n g = assemble_graph(include_dict)\r\n \r\n g.plot(\"before.png\")\r\n \r\n unnecessary_dependencies = get_unnecessary_dependencies(g)\r\n for node in unnecessary_dependencies:\r\n deps = unnecessary_dependencies[node][1]\r\n print(node + \" -> \" + str(deps))\r\n \r\n for unnecessary_dep in deps:\r\n g.remove_dependency(node, unnecessary_dep)\r\n\r\n g.plot(\"after.png\")","sub_path":"prune_dep.py","file_name":"prune_dep.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"124144659","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n#Перестановка элементов списка\n\n\n#создаем список для примера\nstring=input('введите строку\\n')\nsource=list(string)\nprint(source)\n\n#решил сделать через создание нового списка\nindex=0\nresult=[]\nwhile len(result) early_stop:\n if np.unique(nmll_[-early_stop:]).shape[0] == 1:\n break\n return _model, _likel, nmll_[-1]\n X_tr_ = torch.tensor(X_, dtype = torch.float)\n y_tr_ = torch.tensor(y_, dtype = torch.float)\n # initialize likelihood and model\n _likel = gpytorch.likelihoods.GaussianLikelihood()\n _model = _GPR(X_tr_, y_tr_, _likel, kernel, degree, num_dim, random_init)\n return __optimize(_model, _likel, X_tr_, y_tr_, max_training_iter, early_stop)\n\n# Select the best model using multiple initializations\ndef _model_selection(X_, y_, kernel, degree, num_dim, n_random_init, max_training_iter, early_stop):\n # Storage Variables Initialization\n model_ = []\n nmll_ = []\n # No Random Initialization\n _GPR, _likel, nmll = _GPR_fit(X_, y_, kernel, degree, num_dim, max_training_iter, early_stop, random_init = False)\n # Get Results\n model_.append([_GPR, _likel])\n nmll_.append(nmll)\n # Perform multiple Random Initializations\n for i in range(n_random_init):\n _GPR, _likel, nmll = _GPR_fit(X_, y_, kernel, degree, num_dim, max_training_iter, early_stop, random_init = True)\n # Get Results\n model_.append([_GPR, _likel])\n nmll_.append(nmll)\n # Best Results of all different Initialization\n _GPR, _likel = model_[np.argmin(nmll_)]\n nmll = nmll_[np.argmin(nmll_)]\n return _GPR, _likel, nmll\n\n# Calculing prediction for new sample\ndef _GPR_predict(_model, _likel, X_):\n X_ts_ = torch.tensor(X_, dtype = torch.float)\n _model.eval()\n _likel.eval()\n with torch.no_grad(), gpytorch.settings.fast_pred_var():\n _f_hat = _likel(_model(X_ts_))\n return _f_hat.mean.numpy(), _f_hat.variance.numpy()\n\n# Gaussian Process Regression Chain\ndef _get_GPR_prediction_chain(data_, kernel, degree):\n N_tr = data_[0][0][0].shape[0]\n N_ts = data_[0][1][0].shape[0]\n N_tasks = len(data_)\n Y_recurrent_tr_ = np.empty((N_tr, 0))\n Y_recurrent_ts_ = np.empty((N_ts, 0))\n Y_ts_hat_ = np.zeros((N_ts, N_tasks))\n S2p_ts_hat_ = np.zeros((N_ts, N_tasks))\n s2n_ts_hat_ = np.zeros((N_tasks))\n nmll_ts_ = np.zeros((N_tasks))\n t_tr = 0.\n t_ts = 0.\n model_ = []\n # Loop over independet outputs\n for i_task in range(N_tasks):\n X_tr_, y_tr_ = data_[i_task][0]\n X_ts_, y_ts_ = data_[i_task][1]\n X_tr_ = np.concatenate((X_tr_, Y_recurrent_tr_), axis = 1)\n X_ts_ = np.concatenate((X_ts_, Y_recurrent_ts_), axis = 1)\n # Add Bias\n X_tr_ = np.concatenate((X_tr_, np.ones((X_tr_.shape[0], 1))), axis = 1)\n X_ts_ = np.concatenate((X_ts_, np.ones((X_ts_.shape[0], 1))), axis = 1)\n # Training Model\n t_init = time.time()\n _GPR, _likel, nmll_ts_[i_task] = _model_selection(X_tr_, y_tr_[:, 0], kernel, degree, num_dim = X_tr_.shape[1],\n n_random_init = 4,\n max_training_iter = 250,\n early_stop = 3)\n t_tr += time.time() - t_init\n # Testing Model\n t_init = time.time()\n Y_ts_hat_[:, i_task], S2p_ts_hat_[:, i_task] = _GPR_predict(_GPR, _likel, X_ts_)\n s2n_ts_hat_[i_task] = _likel.noise\n t_ts += time.time() - t_init\n # Update Covariate for Next Chain prediction\n Y_recurrent_tr_ = np.concatenate((Y_recurrent_tr_, y_tr_), axis = 1)\n Y_recurrent_ts_ = np.concatenate((Y_recurrent_ts_, Y_ts_hat_[:, i_task][:, np.newaxis]), axis = 1)\n model_.append([_GPR, _likel])\n return nmll_ts_, Y_ts_hat_, S2p_ts_hat_, s2n_ts_hat_, [t_tr, t_ts], model_\n\n# Implementation of K-fold cross-validation\ndef _KFold_CV(data_, kernel, degree, n_kfolds):\n N_tasks = len(data_)\n E_ = np.zeros((n_kfolds, N_tasks))\n NMLL_ = np.zeros((n_kfolds, N_tasks))\n j = 0\n for idx_val_tr_, idx_val_ts_ in KFold(n_splits = n_kfolds,\n random_state = None,\n shuffle = False).split(data_[0][0]):\n data_val_ = []\n scaler_val_ = []\n for i_task in range(N_tasks):\n # Validation training and testing sets\n X_val_tr_ = data_[i_task][0][idx_val_tr_, :]\n Y_val_tr_ = data_[i_task][1][idx_val_tr_, :]\n X_val_ts_ = data_[i_task][0][idx_val_ts_, :]\n Y_val_ts_ = data_[i_task][1][idx_val_ts_, :]\n # Get Outliers Index\n outliers_idx_val_tr_ = _get_outliers_index(X_val_tr_, n_samples = 2300)\n X_val_tr_ = X_val_tr_[outliers_idx_val_tr_, :]\n Y_val_tr_ = Y_val_tr_[outliers_idx_val_tr_, :]\n # Define Data Standarization\n _scaler_x = StandardScaler().fit(X_val_tr_)\n _scaler_y = StandardScaler().fit(Y_val_tr_)\n # Performe Data Standarization\n X_val_tr_prime_ = _scaler_x.transform(X_val_tr_)\n #Y_val_tr_prime_ = _scaler_y.transform(Y_val_tr_)\n Y_val_tr_prime_ = Y_val_tr_\n X_val_ts_prime_ = _scaler_x.transform(X_val_ts_)\n print(j, i_task, X_val_tr_prime_.shape, Y_val_tr_prime_.shape, X_val_ts_prime_.shape, Y_val_ts_.shape)\n # Save Dataset\n data_val_.append([[X_val_tr_prime_, Y_val_tr_prime_], [X_val_ts_prime_, Y_val_ts_]])\n scaler_val_.append([_scaler_x, _scaler_y])\n #try:\n # Training and Testing GPR\n nmll_ts_, Y_val_ts_hat_prime_ = _get_GPR_prediction_chain(data_val_, kernel, degree)[:2]\n # Undo Normalization of the prediction\n Y_val_ts_hat_ = np.zeros(Y_val_ts_hat_prime_.shape)\n Y_val_ts_ = np.zeros(Y_val_ts_hat_prime_.shape)\n for i_task in range(N_tasks):\n #Y_val_ts_hat_[:, i_task] = scaler_val_[i_task][1].inverse_transform(Y_val_ts_hat_prime_[:, i_task][:, np.newaxis])[:, 0]\n Y_val_ts_hat_[:, i_task] = Y_val_ts_hat_prime_[:, i_task]\n Y_val_ts_[:, i_task] = data_val_[i_task][1][1][:, 0]\n E_[j, :] = mean_absolute_percentage_error(Y_val_ts_, Y_val_ts_hat_)\n NMLL_[j, :] = nmll_ts_\n print(E_[j, :])\n print(NMLL_[j, :])\n #Compute Validation error metrics\n # except:\n # print('ERROR!')\n # print(theta_)\n # e_[j] = 1e10\n j += 1\n return np.mean(E_, axis = 0), np.mean(NMLL_, axis = 0)\n\n# GPR K-Fold Cross-Validation of the model Parameters\ndef _get_GPR_cross_validation(data_, kernel, degree, n_kfolds):\n # Kfold Cross-validation Implementation\n e_, nmll_ = _KFold_CV(data_, kernel, degree, n_kfolds)\n return e_, nmll_\n\n# GPR Model validation without kernels\ndef _meta_GPR_cross_validation(data_, kernel, degree, n_kfolds):\n # RVM Parameters Cross-validation\n e_val_, nmll_val_ = _get_GPR_cross_validation(data_, kernel, degree, n_kfolds)\n return e_val_, nmll_val_\n\n# Model Traning and Testing\ndef _GPR_traing_and_testing(data_, kernel, degree):\n N_tasks = len(data_)\n dataset_ = []\n scaler_ = []\n # Loop over Task doing the stardarization\n for i_task in range(N_tasks):\n # Validation training and testing sets\n X_tr_ = data_[i_task][0][0]\n Y_tr_ = data_[i_task][0][1]\n X_ts_ = data_[i_task][1][0]\n Y_ts_ = data_[i_task][1][1]\n # Define Data Standarization\n _scaler_x = StandardScaler().fit(X_tr_)\n _scaler_y = StandardScaler().fit(Y_tr_)\n # Performe Data Standarization\n X_tr_prime_ = _scaler_x.transform(X_tr_)\n #Y_tr_prime_ = _scaler_y.transform(Y_tr_)\n Y_tr_prime_ = Y_tr_\n X_ts_prime_ = _scaler_x.transform(X_ts_)\n print(i_task, X_tr_prime_.shape, Y_tr_prime_.shape, X_ts_prime_.shape, Y_ts_.shape)\n # Save Dataset\n dataset_.append([[X_tr_prime_, Y_tr_prime_], [X_ts_prime_, Y_ts_]])\n scaler_.append([_scaler_x, _scaler_y])\n #try:\n # Training and testing GPR\n nmll_ts_, Y_ts_hat_prime_, S2p_ts_hat_prime_, s2n_ts_hat_prime_, time_, _GPR = _get_GPR_prediction_chain(dataset_, kernel, degree)\n # Undo Normalization of the prediction\n Y_ts_hat_ = np.zeros(Y_ts_hat_prime_.shape)\n Sp_ts_hat_ = np.zeros(S2p_ts_hat_prime_.shape)\n sn_ts_hat_ = np.zeros(s2n_ts_hat_prime_.shape)\n Y_ts_ = np.zeros(Y_ts_hat_prime_.shape)\n # Loop over Task undoing the stardarization\n for i_task in range(N_tasks):\n # Y_ts_hat_[:, i_task] = scaler_[i_task][1].inverse_transform(Y_ts_hat_prime_[:, i_task][:, np.newaxis])[:, 0]\n # Sp_ts_hat_[:, i_task] = np.sqrt(S2p_ts_hat_prime_[:, i_task])*scaler_[i_task][1].scale_\n # sn_ts_hat_[i_task] = np.sqrt(s2n_ts_hat_prime_[i_task])*scaler_[i_task][1].scale_\n Y_ts_hat_[:, i_task] = Y_ts_hat_prime_[:, i_task]\n Sp_ts_hat_[:, i_task] = np.sqrt(S2p_ts_hat_prime_[:, i_task])\n sn_ts_hat_[i_task] = np.sqrt(s2n_ts_hat_prime_[i_task])\n Y_ts_[:, i_task] = dataset_[i_task][1][1][:, 0]\n\n e_ts_ = mean_absolute_percentage_error(Y_ts_, Y_ts_hat_)\n #Compute Validation error metrics\n # except:\n # print('ERROR!')\n # print(theta_)\n # e_[j] = 1e10\n return e_ts_, nmll_ts_, Y_ts_hat_, Sp_ts_hat_, sn_ts_hat_, time_, [_GPR, scaler_]\n\ndef _get_covariates(i_task, i_cov = None, i_sec = None):\n # CSI = 0 // PYRA = 2\n idx_pred = 0\n idx_pred_horizon_ = [0, 1, 2, 3, 4, 5]\n # Dataset Covariantes and Predictors Definition\n if i_task == 'persistence': return [idx_pred, idx_pred_horizon_, [0], [], 0, 0, [], [0, 1, 2, 3, 4, 5], []]\n # CSI = 0 // PYRA = 2\n idx_pred = 0\n idx_pred_horizon_ = [i_task]\n # All\n if (i_task == 0) and (i_sec == 0): idx_cov_horizon_ = [0, 1, 2, 3, 4, 5]\n if (i_task == 1) and (i_sec == 0): idx_cov_horizon_ = [0, 1, 2, 3, 4, 5]\n if (i_task == 2) and (i_sec == 0): idx_cov_horizon_ = [0, 1, 2, 3, 4, 5]\n if (i_task == 3) and (i_sec == 0): idx_cov_horizon_ = [0, 1, 2, 3, 4, 5]\n if (i_task == 4) and (i_sec == 0): idx_cov_horizon_ = [0, 1, 2, 3, 4, 5]\n if (i_task == 5) and (i_sec == 0): idx_cov_horizon_ = [0, 1, 2, 3, 4, 5]\n # Neibors order 1\n if (i_task == 0) and (i_sec == 1): idx_cov_horizon_ = [0, 1, 2, 3]\n if (i_task == 1) and (i_sec == 1): idx_cov_horizon_ = [0, 1, 2, 3, 4]\n if (i_task == 2) and (i_sec == 1): idx_cov_horizon_ = [0, 1, 2, 3, 4, 5]\n if (i_task == 3) and (i_sec == 1): idx_cov_horizon_ = [0, 1, 2, 3, 4, 5]\n if (i_task == 4) and (i_sec == 1): idx_cov_horizon_ = [1, 2, 3, 4, 5]\n if (i_task == 5) and (i_sec == 1): idx_cov_horizon_ = [2, 3, 4, 5]\n # Neibors order 2\n if (i_task == 0) and (i_sec == 2): idx_cov_horizon_ = [0, 1, 2]\n if (i_task == 1) and (i_sec == 2): idx_cov_horizon_ = [0, 1, 2, 3]\n if (i_task == 2) and (i_sec == 2): idx_cov_horizon_ = [1, 2, 3, 4]\n if (i_task == 3) and (i_sec == 2): idx_cov_horizon_ = [1, 2, 3, 4]\n if (i_task == 4) and (i_sec == 2): idx_cov_horizon_ = [2, 3, 4, 5]\n if (i_task == 5) and (i_sec == 2): idx_cov_horizon_ = [3, 4, 5]\n # Neibors order 3\n if (i_task == 0) and (i_sec == 3): idx_cov_horizon_ = [0, 1]\n if (i_task == 1) and (i_sec == 3): idx_cov_horizon_ = [0, 1, 2]\n if (i_task == 2) and (i_sec == 3): idx_cov_horizon_ = [1, 2, 3]\n if (i_task == 3) and (i_sec == 3): idx_cov_horizon_ = [2, 3, 4]\n if (i_task == 4) and (i_sec == 3): idx_cov_horizon_ = [3, 4, 5]\n if (i_task == 5) and (i_sec == 3): idx_cov_horizon_ = [4, 5]\n # Neibors order 4\n if (i_task == 0) and (i_sec == 4): idx_cov_horizon_ = [0, 1, 2, 3]\n if (i_task == 1) and (i_sec == 4): idx_cov_horizon_ = [0, 1, 2]\n if (i_task == 2) and (i_sec == 4): idx_cov_horizon_ = [1, 2, 3]\n if (i_task == 3) and (i_sec == 4): idx_cov_horizon_ = [2, 3, 4]\n if (i_task == 4) and (i_sec == 4): idx_cov_horizon_ = [2, 3, 4, 5]\n if (i_task == 5) and (i_sec == 4): idx_cov_horizon_ = [4, 5]\n # Cross-validation of AR\n cov_idx_0_ = [idx_pred, idx_pred_horizon_, [0, 1, 2, 3, 4, 5], [], 0, 0, [], idx_cov_horizon_, []]\n # Cross-validation of AR + Angles\n cov_idx_1_ = [idx_pred, idx_pred_horizon_, [0, 1, 2, 3, 4, 5], [0, 1], 0, 0, [], idx_cov_horizon_, []]\n # Cross-validation of AR + Angles + Raw Temperatures\n cov_idx_2_ = [idx_pred, idx_pred_horizon_, [0, 1, 2, 3, 4, 5], [0, 1], 0, 0, [0, 1], idx_cov_horizon_, [0]]\n # Cross-validation of AR + Angles + Processed Temperatures\n cov_idx_3_ = [idx_pred, idx_pred_horizon_, [0, 1, 2, 3, 4, 5], [0, 1], 3, 0, [0, 1], idx_cov_horizon_, [0]]\n # Cross-validation of AR + Angles + Processed Heights\n cov_idx_4_ = [idx_pred, idx_pred_horizon_, [0, 1, 2, 3, 4, 5], [0, 1], 0, 2, [0, 1], idx_cov_horizon_, [1]]\n # Cross-validation of AR + Angles + Raw Temperatures + Processed Heights\n cov_idx_5_ = [idx_pred, idx_pred_horizon_, [0, 1, 2, 3, 4, 5], [0, 1], 0, 2, [0, 1], idx_cov_horizon_, [0, 1]]\n # Cross-validation of AR + Angles + Raw Temperatures + Processed Heights + Magnitude\n cov_idx_6_ = [idx_pred, idx_pred_horizon_, [0, 1, 2, 3, 4, 5], [0, 1], 0, 2, [0, 1], idx_cov_horizon_, [0, 1, 2]]\n # Cross-validation of AR + Angles + Raw Temperatures + Processed Heights + Magnitude + Divergence\n cov_idx_7_ = [idx_pred, idx_pred_horizon_, [0, 1, 2, 3, 4, 5], [0, 1], 0, 2, [0, 1], idx_cov_horizon_, [0, 1, 2, 4]]\n # Cross-validation of AR + Angles + Raw Temperatures + Processed Heights + Magnitude + Divergence + Vorticity\n cov_idx_8_ = [idx_pred, idx_pred_horizon_, [0, 1, 2, 3, 4, 5], [0, 1], 0, 2, [0, 1], idx_cov_horizon_, [0, 1, 2, 3, 4]]\n # Index of all Covariances\n return [cov_idx_0_, cov_idx_1_, cov_idx_2_, cov_idx_3_, cov_idx_4_, cov_idx_5_, cov_idx_6_, cov_idx_7_, cov_idx_8_][i_cov]\n\n# Training and testing without shuffling the samples\ndef _split_dataset(X_, Y_, Z_, idx_tr_, idx_ts_):\n return X_[idx_tr_, :], Y_[idx_tr_, :], Z_[idx_tr_, :], X_[idx_ts_, :], Y_[idx_ts_, :], Z_[idx_ts_, :]\n\n# Add this day samples to the training dataset\ndef _add_samples_to_training_dataset(idx_tr_, idx_ts_, labels_idx_tr_, labels_idx_ts_, day_idx_ts_):\n # Add index to the training set\n idx_tr_prime_ = np.concatenate((idx_tr_, idx_ts_[day_idx_ts_]), axis = 0)\n labels_idx_tr_prime_ = labels_idx_tr_.copy()\n # Add index to the labels traning set\n for i_label in range(len(labels_idx_tr_)):\n labels_idx_tr_prime_[i_label] = np.concatenate((labels_idx_tr_[i_label],\n labels_idx_ts_[i_label][day_idx_ts_]), axis = 0)\n return idx_tr_prime_, labels_idx_tr_prime_\n\n# Local Outlier Factor Indexes\ndef _get_outliers_index(X_, n_samples, n_neighbors = 3):\n _LOF = LocalOutlierFactor(n_neighbors = n_neighbors)\n _LOF.fit_predict(X_)\n p_val_tr_ = _LOF.negative_outlier_factor_\n idx_ = np.zeros(p_val_tr_.shape[0], dtype = bool)\n idx_[np.argsort(p_val_tr_)[-n_samples:]] = True\n return idx_\n\n# Nodes and jobs information for communication from MPI\ni_ker = int(sys.argv[1])\ni_cov = int(sys.argv[2])\ni_sec = int(sys.argv[3])\ni_nor = 1\ni_label = int(sys.argv[4])\n# Get Experiment for the i-th Job\nkernel, degree = _get_experiment(i_ker)\nprint(i_ker, i_cov, i_sec, i_nor, kernel, degree)\n# Load Dataset\ndataset_ = pickle.load(open('/users/terren/solar_forecasting/data/dataset_v31-1.pkl','rb'))\n# Index of training samples with no detected clouds\nidx_0_tr_ = pickle.load(open('/users/terren/solar_forecasting/data/clear_sky_index_v31-1.pkl', 'rb'))\n# Load Weather Features\nW_tr_, W_ts_ = pickle.load(open('/users/terren/solar_forecasting/data/weather_features_v31-1.pkl','rb'))\n# Load Training and Testing indexes\nidx_tr_, idx_ts_ = pickle.load(open('/users/terren/solar_forecasting/data/training_testing_index_v31-1.pkl','rb'))\n# Load Persistent Pyranometer and Clear Sky Index\nP_ts_, P_ts_hat_persistence_ = pickle.load(open('/users/terren/solar_forecasting/data/pyra_persistence_v31-1.pkl','rb'))\nC_ts_, C_ts_hat_persistence_ = pickle.load(open('/users/terren/solar_forecasting/data/csi_persistence_v31-1.pkl','rb'))\n# Load Atmospheric condition label indexes\nlabels_idx_tr_, labels_idx_ts_ = pickle.load(open('/users/terren/solar_forecasting/data/labels_index_v31-1.pkl','rb'))\n\n\nN_tasks = 6\ndata_val_ = []\n# Loop over forcasting Horizons\nfor i_task in range(N_tasks):\n # Generate database\n X_, Y_, Z_ = _generate_database(dataset_, cov_idx_ = _get_covariates(i_task, i_cov, i_sec))\n # Traning and Testing Dataset\n X_tr_, Y_tr_, Z_tr_, X_ts_, Y_ts_, Z_ts_ = _split_dataset(X_, Y_, Z_, idx_tr_, idx_ts_)\n # Get Traning Index with only Clear Sky days when label is of a clear sky day\n idx_val_tr_ = labels_idx_tr_[i_label]\n # Get Validation Dataset\n X_tr_ = X_tr_[idx_val_tr_, :]\n Y_tr_ = Y_tr_[idx_val_tr_, :]\n print(i_task, X_tr_.shape, Y_tr_.shape)\n # Save Dataset\n data_val_.append([X_tr_, Y_tr_])\n\n# Cross-Validate Kernel Learning Model\ne_val_machine_, nmll_val_machine_ = _meta_GPR_cross_validation(data_val_, kernel, degree, n_kfolds = 3)\nprint(e_val_machine_)\nprint(nmll_val_machine_)\n\ndata_ = []\n# Loop over forcasting Horizons\nfor i_task in range(N_tasks):\n # Generate database\n X_, Y_, Z_ = _generate_database(dataset_, cov_idx_ = _get_covariates(i_task, i_cov, i_sec))\n # Split in Training and testing Dataset\n X_tr_, Y_tr_, Z_tr_, X_ts_, Y_ts_, Z_ts_ = _split_dataset(X_, Y_, Z_, idx_tr_, idx_ts_)\n # Get Traning Index with only Clear Sky days when label is of a clear sky day\n idx_tr_prime_ = labels_idx_tr_[i_label]\n idx_ts_prime_ = labels_idx_ts_[i_label]\n # Get what is Not Outliers Index\n outliers_idx_tr_prime_ = _get_outliers_index(X_tr_[idx_tr_prime_, :], n_samples = 3500)\n # Select Training and Testing data\n X_tr_ = X_tr_[idx_tr_prime_, :][outliers_idx_tr_prime_, :]\n Y_tr_ = Y_tr_[idx_tr_prime_, :][outliers_idx_tr_prime_, :]\n X_ts_ = X_ts_[idx_ts_prime_, :]\n Y_ts_ = Y_ts_[idx_ts_prime_, :]\n print(X_tr_.shape, Y_tr_.shape, X_ts_.shape, Y_ts_.shape)\n data_.append([[X_tr_, Y_tr_], [X_ts_, Y_ts_]])\n\n# Training and Testing of the Cross-Validate Kernel Learning Model\ne_ts_machine_, nmll_ts_machine_, Y_ts_hat_, Sp_ts_hat_, sn_ts_hat_, time_, _model = _GPR_traing_and_testing(data_, kernel, degree)\nprint(e_ts_machine_)\nprint(nmll_ts_machine_)\n\n# Define directory Roor\nroot = '/users/terren/solar_forecasting'\n# Save Errors\nname = r'{}/logs/kernel_learning/GPRs/CV-RGPR_S0_v31-1_{}.csv'.format(root, i_label)\nwith open(name, 'a', newline = '\\n') as f:\n writer = csv.writer(f)\n writer.writerow([i_ker, i_cov, i_sec, i_nor] + time_ + e_val_machine_.tolist() + e_ts_machine_.tolist())\n# Save Errors\nname = r'{}/logs/kernel_learning/GPRs/NMLL-CV-RGPR_S0_v31-1_{}.csv'.format(root, i_label)\nwith open(name, 'a', newline = '\\n') as f:\n writer = csv.writer(f)\n writer.writerow([i_ker, i_cov, i_sec, i_nor] + time_ + nmll_val_machine_.tolist() + nmll_ts_machine_.tolist())\n# Save Results\nname = r'{}/data/kernel_learning/GPRs/CV-RGPR_S0_v31-1_{}{}{}{}-{}.pkl'.format(root, i_ker, i_cov, i_sec, i_nor, i_label)\nwith open(name, 'wb') as handle:\n pickle.dump([Y_ts_hat_, Sp_ts_hat_, sn_ts_hat_], handle, protocol = pickle.HIGHEST_PROTOCOL)\n# Save Models\n# name = r'{}/model/kernel_learning/GPRs/CV-RGPR_v31-1_{}{}{}{}-{}.pkl'.format(root, i_ker, i_cov, i_sec, i_nor, i_label)\n# with open(name, 'wb') as handle:\n# pickle.dump(_model, handle, protocol = pickle.HIGHEST_PROTOCOL)\n","sub_path":"CV-RGPR.py","file_name":"CV-RGPR.py","file_ext":"py","file_size_in_byte":22064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"363703568","text":"#%%\nimport toughio\nimport pyvista\n\n\n# Reload mesh\nmesh = toughio.mesh.read(\"../Preprocessing/mesh.pickle\")\n\n# Import TOUGH results into mesh\nmesh.read_output(\"../OUTPUT_ELEME.csv\")\n\n# Plot\np = pyvista.Plotter(window_size = (1200, 1200), notebook = False)\np.add_mesh(\n mesh.to_pyvista(),\n scalars = \"SAT_G\",\n stitle = \"Gas saturation\",\n cmap = \"coolwarm\",\n n_colors = 20,\n show_edges = False,\n edge_color = (0.5, 0.5, 0.5),\n scalar_bar_args = {\n \"height\": 0.1,\n \"width\": 0.5,\n \"position_x\": 0.75,\n \"position_y\": 0.01,\n \"vertical\": False,\n \"n_labels\": 4,\n \"fmt\": \"%.3f\",\n \"title_font_size\": 20,\n \"font_family\": \"arial\",\n \"shadow\": True,\n },\n)\np.show_grid(\n show_xaxis = True,\n show_yaxis = False,\n show_zaxis = True,\n xlabel = \"Distance (m)\",\n zlabel = \"Elevation (m)\",\n ticks = \"outside\",\n font_family = \"arial\",\n shadow = True,\n)\np.view_xz()\np.show()","sub_path":"samples/eco2n/CR2011/Postprocessing/plot_pyvista.py","file_name":"plot_pyvista.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"429027412","text":"num1 = \"1234567\"\r\nnum2 = \"123456789\"\r\n\r\nnum1 = '0'*(max(len(num2)-len(num1), 0)+1) + num1\r\nnum2 = '0'*(max(len(num1)-len(num2), 0)+1) + num2\r\n\r\nres = []\r\noverflow = 0\r\nfor i, (n1, n2) in enumerate(zip(num1[::-1], num2[::-1])):\r\n print(i, n1, n2)\r\n x = int(n1) + int(n2) + overflow\r\n res.append(x % 10)\r\n overflow = int(x >= 10)\r\n\r\nif res[-1] == 0:\r\n del res[-1]\r\n\r\nprint(''.join([str(i) for i in res[::-1]]))\r\n","sub_path":"415AddStrings.py","file_name":"415AddStrings.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"575718797","text":"import os\nfrom lxml import etree\n\nfrom django.conf import settings\n\nfrom common.retr.farm import Farm\nfrom common.retr.scraper import Scraper, ValidateException\nfrom common.retr.proxypool import Proxypool\nfrom common.utils import parse_number\n\nfrom flipdom.models import Domain\n\n\nclass DomaintoolsParser(Scraper):\n NAME = 'domaintools'\n TIMEOUT = 15\n BASE_URL = 'http://domainreport.domaintools.com/{}'\n DATA = {\n 'dt_screenshots': [' Historical', 0],\n 'dt_whois': [' Historical', 1],\n 'dt_dns': [' Web Hosting', 2],\n 'dt_domains': [' Connected', 3],\n 'dt_pages': [' Pages', 4]\n }\n summary_xpath = etree.XPath(\n '//div[contains(@class, \"preview-header\")]//text()')\n no_txts = ['We did not find any results for your lookup',\n 'Domain not found']\n\n def __init__(self):\n self.main_dir = os.path.join(settings.SCRAPER, self.NAME)\n\n etree.set_default_parser(etree.HTMLParser())\n\n proxy_file = os.path.join(settings.DOWNLOADS, 'proxies.lst')\n pp = Proxypool(proxy_file)\n super().__init__(pp, {'User-Agent': settings.USER_AGENT},\n timeout=self.TIMEOUT)\n\n def validate(self, url, r):\n super().validate(url, r)\n\n for t in self.no_txts:\n if t in r.text:\n raise ValidateException('retry', 'noanchor')\n\n if 'www.googletagmanager.com/ns.html?id=GTM-5P2JCN' in r.text:\n pass\n else:\n raise ValidateException('retry', 'noanchor')\n\n def get(self, domains, num_workers=100):\n rd = ({'fullname': _.strip()} for _ in domains)\n\n farm = Farm(num_workers, DomaintoolsParser, rd)\n\n res = []\n for d in farm.run():\n self.info(d)\n try:\n res.append(d)\n except Exception as e:\n self.warn(e)\n\n return res\n\n def do(self, q):\n output_file = os.path.join(self.main_dir, 'output/{}.html')\n fn = output_file.format(q['fullname'])\n\n if not os.access(fn, os.R_OK):\n try:\n url = self.BASE_URL.format(q['fullname'])\n r = self.request('get', url)\n except KeyboardInterrupt:\n return\n with open(fn, 'w') as f:\n f.write(r.text)\n\n yield self.parse(q, fn)\n\n def parse(self, q, fn):\n with open(fn) as f:\n txt = f.read()\n\n for t in self.no_txts:\n if t in txt:\n os.remove(fn)\n yield q\n return\n\n tree = etree.fromstring(txt)\n summary = self.summary_xpath(tree)\n\n if len(summary) < 12:\n print(q['fullname'])\n yield q\n\n try:\n summary[:11] = []\n for key, value in self.DATA.items():\n if summary[value[1]*5 + 1].startswith(value[0]):\n q[key] = parse_number(summary[value[1]*5 + 0])\n else:\n self.error(summary)\n return\n except Exception as e:\n print(e)\n print('Domain not found' not in txt)\n print(q['fullname'])\n # print(txt)\n yield q\n\n def clear(self):\n # files = list(os.walk(self.main_dir))[0][2]\n print('Clearing')\n output_dir = os.path.join(self.main_dir, 'output/')\n files = list(os.walk(output_dir))[0][2]\n c = 0\n l = len(files)\n for fn in files:\n print('%d/%d' % (c, l))\n c += 1\n file_path = os.path.join(output_dir, fn)\n\n data = {}\n fullname = fn[:-5]\n print('Trying %s' % (fullname))\n with open(file_path) as f:\n txt = f.read()\n\n for t in self.no_txts:\n if t in txt:\n os.remove(file_path)\n continue\n\n tree = etree.fromstring(txt)\n summary = self.summary_xpath(tree)\n\n if len(summary) < 12:\n # os.remove(file_path)\n continue\n try:\n summary[:11] = []\n for key, value in self.DATA.items():\n if summary[value[1]*5 + 1].startswith(value[0]):\n data[key] = parse_number(summary[value[1]*5 + 0])\n else:\n self.error(summary)\n continue\n\n except Exception:\n os.remove(file_path)\n continue\n\n print('%s updated' % (fullname))\n print(data)\n os.remove(file_path)\n Domain.objects.filter(fullname=fullname).update(**data)\n","sub_path":"scraper/feature/domaintools.py","file_name":"domaintools.py","file_ext":"py","file_size_in_byte":4894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"249695875","text":"from django.conf.urls import url\r\nfrom django.contrib import admin\r\nfrom . import views\r\n\r\nadmin.autodiscover()\r\n\r\n\r\nurlpatterns = [\r\n url(r'^post/(?P[0-9]+)/map/$', views.post_map, name='post_map'),\r\n url(r'^post/(?P[0-9]+)/$', views.post_detail, name='post_detail'),\r\n url(r'^post_user/(?P[0-9]+)/$', views.post_detail_user, name='post_detail_user'),\r\n url(r'^post/new_user/$', views.post_new_user, name='post_new_user'),\r\n url(r'^post/new/$', views.post_new, name='post_new'),\r\n url(r'^post/(?P[0-9]+)/edit/$', views.post_edit, name='post_edit'),\r\n url(r'^post_user/(?P[0-9]+)/edit/$', views.post_edit_user, name='post_edit_user'),\r\n # url(r'^drafts/(\\d+)/$', views.post_draft_list, name='post_draft_list'),\r\n url(r'^drafts/$', views.post_draft_list, name='post_draft_list'),\r\n url(r'^drafts_user/$', views.post_draft_list_user, name='post_draft_list_user'),\r\n url(r'^post/(?P[0-9]+)/publish/$', views.post_publish, name='post_publish'),\r\n url(r'^post_user/(?P[0-9]+)/publish/$', views.post_publish_user, name='post_publish_user'),\r\n url(r'^post/(?P\\d+)/remove/$', views.post_remove, name='post_remove'),\r\n url(r'^post_user/(?P\\d+)/remove/$', views.post_remove_user, name='post_remove_user'),\r\n url(r'^comment/(?P\\d+)/approve/$', views.comment_approve, name='comment_approve'),\r\n url(r'^comment_user/(?P\\d+)/approve/$', views.comment_approve_user, name='comment_approve_user'),\r\n url(r'^comment/(?P\\d+)/remove/$', views.comment_remove, name='comment_remove'),\r\n url(r'^comment_user/(?P\\d+)/remove/$', views.comment_remove_user, name='comment_remove_user'),\r\n # url(r'^comment/(?P\\d+)/edit/$', views.comment_edit, name='comment_edit'),\r\n url(r'^auth/register/$', views.register, name='register'),\r\n url(r'^auth/login/', views.login, name='login'),\r\n url(r'^auth/logout/', views.logout, name='logout'),\r\n url(r'^profile/$', views.user_profile, name='user_profile'),\r\n url(r'^page/(\\d+)/$', views.post_list, name='post_list'),\r\n url(r'^page_user/(\\d+)/$', views.post_list_user, name='post_list_user'),\r\n url(r'^page_user/$', views.post_list_user, name='post_list_user'),\r\n url(r'^', views.post_list, name='post_list'),\r\n]","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"93100837","text":"# Create a toy dataset from labels\ninstances = [Instance({'label': LabelField(str(label))})\n for label in 'abcdefghij']\ndataset = AllennlpDataset(instances)\nvocab = Vocabulary.from_instances(dataset)\ndataset.index_with(vocab)\n\n# Use the default batching mechanism\nprint(\"Default:\")\ndata_loader = DataLoader(dataset, batch_size=3)\nfor batch in data_loader:\n print(batch)\n\n# Use Samplers to customize the sequencing / batching behavior\nsampler = SequentialSampler(data_source=dataset)\nbatch_sampler = BasicBatchSampler(sampler, batch_size=3, drop_last=True)\n\nprint(\"\\nDropping last:\")\ndata_loader = DataLoader(dataset, batch_sampler=batch_sampler)\nfor batch in data_loader:\n print(batch)\n\n# Example: using a RandomSampler instead of a SequentialSampler\nsampler = RandomSampler(data_source=dataset)\nbatch_sampler = BasicBatchSampler(sampler, batch_size=3, drop_last=False)\n\nprint(\"\\nWith RandomSampler:\")\ndata_loader = DataLoader(dataset, batch_sampler=batch_sampler)\nfor batch in data_loader:\n print(batch)\n","sub_path":"exercises/part2/reading-data/data_loader_basic.py","file_name":"data_loader_basic.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"111584971","text":"# -*- coding: utf-8 -*-\n\nfrom time import sleep\nimport pymysql.cursors\n\n# Connect to the database\nconnection = pymysql.connect(host='115.159.39.220',\n user='root',\n password='mooc-all',\n db='Moocs',\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n\ndef Add_user (names):\n\n try:\n with connection.cursor() as cursor:\n Add_sql = \"\"\"INSERT INTO ACMer(name)\n values(\"%s\");\"\"\"\n for item in names :\n cmd = (Add_sql % (item));\n try:\n print(cmd)\n cursor.execute(cmd)\n connection.commit()\n except:\n connection.rollback()\n\n finally:\n connection.close()\n\n\ndef Add_competition(rank):\n try:\n with connection.cursor() as cursor:\n sql = \"\"\"select * from ACMer\"\"\"\n cursor.execute(sql)\n res = cursor.fetchall()\n n_res = {}\n for item in res:\n n_res[item['name']] = item;\n for winner in range(len(rank)):\n winner = len(rank) - 1 - winner;\n n_res[rank[winner]]['times'] = n_res[rank[winner]]['times'] + 1;\n for losser in range(winner + 1, len(rank)):\n if(n_res[rank[winner]]['credits'] < n_res[rank[losser]]['credits'] + 5) :\n n_res[rank[winner]]['credits'] = n_res[rank[losser]]['credits'] + 5\n n_res[rank[winner]]['enemy'] = n_res[rank[losser]]['name']\n\n\n\n update_sql = \"\"\"UPDATE ACMer SET times = %s, credits = %s, enemy = '%s' \n WHERE name = '%s' \"\"\"\n\n for item in rank:\n upup = n_res[item];\n cmd = (update_sql % (upup['times'], upup['credits'], upup['enemy'], upup['name']));\n try:\n print(cmd)\n cursor.execute(cmd)\n connection.commit()\n except:\n connection.rollback()\n\n\n finally:\n connection.close()\n\n\n# Add_user([\"王超毅\", \"\"])\n#\"罗龙君\", \"唐胜洋\", \"顾家祺\"\nAdd_competition([\"唐胜洋\", \"罗龙君\", \"顾家祺\"])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Competition.py","file_name":"Competition.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"269723739","text":"# Based on - https://github.com/adafruit/Adafruit_CircuitPython_INA219/blob/master/adafruit_ina219.py\n\nfrom pyftdi.i2c import I2cController, I2cNackError, I2cPort\ntry:\n import struct\nexcept ImportError:\n import ustruct as struct\n\n\n#pylint: disable=bad-whitespace\n# Internal constants:\n_INA219_DEFAULT_ADDRESS = 0x44\n\n_REG_CONFIG = 0x00\n_REG_SHUNTVOLTAGE = 0x01\n_REG_BUSVOLTAGE = 0x02\n_REG_POWER = 0x03\n_REG_CURRENT = 0x04\n_REG_CALIBRATION = 0x05\n\n_CONFIG_BVOLTAGERANGE_32V = 0x2000\n_CONFIG_SADCRES_12BIT_1S_532US = 0x0018\n_CONFIG_GAIN_8_320MV = 0x1800\n_CONFIG_BADCRES_12BIT = 0x0400\n_CONFIG_MODE_SANDBVOLT_CONTINUOUS = 0x0007\n\n\"\"\"\n=== _to_signed function ===\n\"\"\"\ndef _to_signed(num:int=0)->int:\n if num > 0x7FFF:\n num -= 0x10000\n return num\n\nclass INA219:\n def __init__(self, i2c): \n self.slave=self.__i2c_slave_port(i2c) \n self.i2c_addr =_INA219_DEFAULT_ADDRESS\n # Multiplier in mA used to determine current from raw reading\n self._current_lsb = 0\n # Multiplier in W used to determine power from raw reading\n self._power_lsb = 0\n # Set chip to known config values to start\n self._cal_value = 4096\n\n # call set_calibration_32V_2A\n self.set_calibration_32V_2A()\n\n def __i2c_slave_port(self, i2c:I2cController=None)->I2cPort:\n \"\"\"\n Get slave port \n :args: \n i2c:pyftdi.i2c.I2cController - I2C controller object \n :param: \n slave:pyftdi.i2c.I2cPort - \n :return: \n slave port object \n \"\"\"\n try: \n slave = i2c.get_port(_INA219_DEFAULT_ADDRESS)\n except: \n print('Unable to get Port for %s' %_INA219_DEFAULT_ADDRESS)\n exit(1)\n return slave \n\n def _write_register(self, reg:int=0x00, value:int=0):\n \"\"\"\n Write to register\n :args:\n reg:int - register to write to \n value:int - value to write to register\n :param:\n seq:bytearray - bytearry of value \n \"\"\"\n seq = bytearray([(value >> 8) & 0xFF, value & 0xFF])\n self.slave.write_to(reg, seq)\n\n def _read_register(self, reg:int=0x00)->int:\n \"\"\"\n Read from register \n :args: \n reg:int - register to read from \n :param:\n buff : - raw result from read\n :return:\n result from read\n \"\"\"\n buf = self.slave.read_from(reg, 3)\n return (buf[0] << 8) | (buf[1])\n\n def set_calibration_32V_2A(self): \n \"\"\"\n Configures to INA219 to be able to measure up to 32V and 2A of current. \n Counter overflow occurs at 3.2A.\n \"\"\"\n # By default we use a pretty huge range for the input voltage,\n # which probably isn't the most appropriate choice for system\n # that don't use a lot of power. But all of the calculations\n # are shown below if you want to change the settings. You will\n # also need to change any relevant register settings, such as\n # setting the VBUS_MAX to 16V instead of 32V, etc.\n\n # VBUS_MAX = 32V (Assumes 32V, can also be set to 16V)\n # VSHUNT_MAX = 0.32 (Assumes Gain 8, 320mV, can also be 0.16, 0.08, 0.04)\n # RSHUNT = 0.1 (Resistor value in ohms)\n\n # 1. Determine max possible current\n # MaxPossible_I = VSHUNT_MAX / RSHUNT\n # MaxPossible_I = 3.2A\n\n # 2. Determine max expected current\n # MaxExpected_I = 2.0A\n\n # 3. Calculate possible range of LSBs (Min = 15-bit, Max = 12-bit)\n # MinimumLSB = MaxExpected_I/32767\n # MinimumLSB = 0.000061 (61uA per bit)\n # MaximumLSB = MaxExpected_I/4096\n # MaximumLSB = 0,000488 (488uA per bit)\n\n # 4. Choose an LSB between the min and max values\n # (Preferrably a roundish number close to MinLSB)\n # CurrentLSB = 0.0001 (100uA per bit)\n self._current_lsb = .1 # Current LSB = 100uA per bit\n\n # 5. Compute the calibration register\n # Cal = trunc (0.04096 / (Current_LSB * RSHUNT))\n # Cal = 4096 (0x1000)\n self._cal_value = 4096\n\n # 6. Calculate the power LSB\n # PowerLSB = 20 * CurrentLSB\n # PowerLSB = 0.002 (2mW per bit)\n self._power_lsb = .002 # Power LSB = 2mW per bit\n\n # 7. Compute the maximum current and shunt voltage values before overflow\n #\n # Max_Current = Current_LSB * 32767\n # Max_Current = 3.2767A before overflow\n #\n # If Max_Current > Max_Possible_I then\n # Max_Current_Before_Overflow = MaxPossible_I\n # Else\n # Max_Current_Before_Overflow = Max_Current\n # End If\n #\n # Max_ShuntVoltage = Max_Current_Before_Overflow * RSHUNT\n # Max_ShuntVoltage = 0.32V\n #\n # If Max_ShuntVoltage >= VSHUNT_MAX\n # Max_ShuntVoltage_Before_Overflow = VSHUNT_MAX\n # Else\n # Max_ShuntVoltage_Before_Overflow = Max_ShuntVoltage\n # End If\n\n # 8. Compute the Maximum Power\n # MaximumPower = Max_Current_Before_Overflow * VBUS_MAX\n # MaximumPower = 3.2 * 32V\n # MaximumPower = 102.4W\n\n # Set Calibration register to 'Cal' calculated above\n self._write_register(_REG_CALIBRATION, self._cal_value)\n\n # Set Config register to take into account the settings above\n config = _CONFIG_BVOLTAGERANGE_32V | \\\n _CONFIG_GAIN_8_320MV | \\\n _CONFIG_BADCRES_12BIT | \\\n _CONFIG_SADCRES_12BIT_1S_532US | \\\n _CONFIG_MODE_SANDBVOLT_CONTINUOUS\n self._write_register(_REG_CONFIG, config)\n\n def shunt_voltage(self): \n \"\"\"\n The shunt voltage (between V+ and V-) in Volts (so +-.327V)\n :param: \n raw_shunt_voltage:int - raw shunt voltage returnd \n shunt_voltage_mv:int - shunt voltage \n :return:\n shunt voltage in least signficant bit is 10uV which is 0.00001 volts\n \"\"\"\n raw_shunt_voltage = self._read_register(_REG_SHUNTVOLTAGE)\n shunt_voltage_mv = _to_signed(raw_shunt_voltage)\n return shunt_voltage_mv * 0.00001\n\n def bus_voltage(self):\n \"\"\"\n The bus voltage (between V- and GND) in Volts\n :param: \n raw_voltage:int - raw bus voltage\n voltage_mv:int - bus voltage\n :return:\n bus voltage signficant bit is 4mV\n \"\"\"\n raw_voltage=self._read_register(_REG_BUSVOLTAGE)\n voltage_mv = _to_signed(raw_voltage) \n return voltage_mv * 0.001\n\n def current_value(self):\n \"\"\"\n current through the shunt resistor in milliamps\n :param: \n raw_current;int raw current\n :return:\n current in milliamps\n \"\"\"\n self._write_register(_REG_CALIBRATION, self._cal_value)\n raw_current=self._read_register(_REG_CURRENT)\n raw_current=_to_signed(raw_current)\n return raw_current * self._current_lsb\n\nif __name__ == '__main__': \n i2c=I2cController()\n i2c.set_retry_count(1)\n i2c.configure('ftdi://ftdi:232h:FT2BZGR5/')\n gd=INA219(i2c)\n print(\"Bus Voltage: {} V\".format(gd.bus_voltage()))\n print(\"Shunt Voltage: {} mV\".format(gd.shunt_voltage() / 1000))\n print(\"Current: {} mA\".format(gd.current_value()))\n","sub_path":"python/foglamp/plugins/south/wind_sensors/ina219.py","file_name":"ina219.py","file_ext":"py","file_size_in_byte":7353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"200962766","text":"import json\nimport random\nfrom sklearn import model_selection as cross_validation, svm, metrics\nimport pandas as pd\n\ninput_raw_embedding_file = 'sanfrancisco/embedding/pca/segment/sanfrancisco_pca4d_and_node2vec_128d_plus_segment.embeddings'\ntag_json_file = 'sanfrancisco/segment/sf_segments_tiger_nametype.json'\nkeyset = ('St',)\nnegtive_fraction = 230\nvalidate_repo = 3\n\npath_array = input_raw_embedding_file.rsplit('.', 1)\nresult_array = path_array[0].split('/', 2)\nlabeled_path = path_array[0] + '_labeled.' + path_array[1]\n\nf_labeled = open(labeled_path, 'w+')\nf_embeddings = open(input_raw_embedding_file, 'r')\nf_nodes_selected = open(tag_json_file, 'r')\n\n\ndef label_embeddings(selected, embeddings, output, keyset, fraction=1, ):\n data_selected_label = json.loads(selected.readline())\n positive_count = {k: 0 for k in keyset}\n negtive_count = 0\n for line in embeddings.readlines():\n line = line.strip()\n sid_vector = line.split(' ')\n sid, node_vec = sid_vector[0], sid_vector[1:]\n if len(node_vec) < 2:\n continue\n type_value = data_selected_label[sid] if sid in data_selected_label else None\n if type_value is not None and type_value in keyset:\n output.write(line + ' ' + type_value + '\\n')\n positive_count[type_value] += 1\n else:\n rd = random.randint(0, 999) + 1\n if rd > fraction:\n continue\n output.write(line + ' ' + 'unknown' + '\\n')\n negtive_count += 1\n print(\"positive count: \", positive_count)\n print(\"negtive count: \", negtive_count)\n\n\nlabel_embeddings(f_nodes_selected, f_embeddings, f_labeled, keyset=keyset, fraction=negtive_fraction, )\n\nf_labeled.close()\nf_embeddings.close()\nf_nodes_selected.close()\n\n\n# ======classification=====\nmax_score = 0\nmax_report = None\nfor i in range(validate_repo):\n data_matrix = pd.read_csv(labeled_path, header=None, sep=' ', index_col=0)\n # print(tbl.dtypes)\n rows_size, cols_size = data_matrix.shape\n label = data_matrix[cols_size]\n del data_matrix[cols_size]\n\n # wh = pd.concat(dimensions_64, axis=1)\n\n data_train, data_test, label_train, label_test = cross_validation.train_test_split(data_matrix, label)\n clf = svm.LinearSVC(max_iter=10000)\n clf.fit(data_train, label_train)\n predict = clf.predict(data_test)\n ac_score = metrics.accuracy_score(label_test, predict)\n cl_report = metrics.classification_report(label_test, predict, digits=4)\n if ac_score > max_score:\n max_score = ac_score\n max_report = cl_report\n\nprint(max_score)\nprint(max_report)\n\n","sub_path":"new_classification_tiger.py","file_name":"new_classification_tiger.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"422776707","text":"# 4. Найти сумму n элементов следующего ряда чисел: 1, -0.5, 0.25, -0.125,…\r\n# Количество элементов (n) вводится с клавиатуры.\r\n\r\nn = int(input('Введиче длину ряда: '))\r\nsumm = 0\r\n\r\nfor sqrt in range(n):\r\n s = (-0.5)**sqrt\r\n summ += s\r\n\r\nprint(summ)","sub_path":"Lesson_2/les_2_task_4.py","file_name":"les_2_task_4.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"172968552","text":"import random\nfrom pathlib import Path\nimport os\nimport io\nimport pandas as pd\n\nfrom .IDataset import IDataset\n\n\nclass MiniNews (IDataset):\n\n def getDataset(self):\n try:\n path = Path(__file__).parent / \\\n \"../Data/mini-news/dataset.csv\"\n dataset = pd.read_csv(path)\n except:\n dataset = []\n path = Path(__file__).parent / \\\n \"../Data/mini-news/data\"\n for Root, Dirs, Files in os.walk(f\"{path}\"):\n for di in Dirs:\n for root, dirs, files in os.walk(f\"{path}/{di}\"):\n for file in files:\n sub_data = []\n with io.open(f\"{path}/{di}/\"+file, 'r') as f:\n text = f.read()\n sub_data.append(text)\n sub_data.append(di)\n dataset.append(sub_data)\n random.shuffle(dataset)\n for i in range(2000):\n text = dataset[i][0]\n sentences = text.split('\\n')\n b = False\n new = []\n for sen in sentences:\n if sen.startswith('Lines'):\n b = True\n continue\n if b == True:\n new.append(sen)\n text = ' '.join([str(word) for word in new])\n dataset[i][0] = text\n dataset = pd.DataFrame(dataset, columns=['Sentence', 'Category'])\n # dataset.dropna(inplace=True)\n path = Path(__file__).parent / \\\n \"../Data/mini-news/dataset.csv\"\n dataset.to_csv(path, index=False)\n print(\"No csv file was found!, new file was created :)\")\n #dataset = dataset.sample(frac=1).reset_index(drop=True)\n return dataset\n\n def getParameters(self):\n return {\"tweet\": self.tweet, \"stemming\": self.stemming, \"classes_num\": 20}\n\n def __init__(self, tweet=False, stemming=False):\n self.tweet = tweet\n self.stemming = stemming\n\n def __str__(self):\n return 'Mini News'\n\n def getClasses(self):\n return self.getDataset().iloc[:, 1].values\n\n def getFeatures(self):\n return self.getDataset().iloc[:, 0].values\n\n def getPath(self):\n return Path(__file__).parent / \\\n \"../Data/mini-news\"\n\n\n# H = MiniNews(False, True)\n# H.getDataset()\n# H.getDataset()\n","sub_path":"GUI/lib/Library/MiniNews.py","file_name":"MiniNews.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"184699340","text":"# vim: ai ts=4 sts=4 et sw=4\n# encoding=utf-8\nimport re\nimport json\nimport locale\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom django.core.exceptions import PermissionDenied\nfrom django.forms import ModelForm, ModelChoiceField\nfrom rapidsms_httprouter.models import Message\nfrom rapidsms.contrib.locations.models import Location\nfrom igreport.models import *\nfrom igreport.models import DNDList\nfrom igreport import util, media\nfrom igreport.html.admin import ListStyleAdmin\nfrom igreport.report_admin import ReportAdmin\nfrom igreport.ui.bulksms import *\nfrom igreport.ui.districts import DistrictAdmin\nfrom igreport.unregister import unregister_apps\n\nclass DNDListAdmin(admin.ModelAdmin):\n list_display = ['msisdn', 'notes_', 'date', 'updated_']\n\n def date(self, obj):\n return obj.entry_date.strftime('%d/%m/%Y %H:%M')\n\n date.short_description = 'Entry Date'\n date.admin_order_field = 'entry_date'\n\n def updated_(self, obj):\n return obj.updated.strftime('%d/%m/%Y %H:%M')\n\n updated_.short_description = 'Updated'\n updated_.admin_order_field = 'updated'\n \n def notes_(self, obj):\n return util.truncate_str(obj.notes, 50)\n \nclass IGReportAdmin(admin.ModelAdmin, ListStyleAdmin):\n\n list_display = ['sender', 'message', 'accused', 'amount_formatted', 'refno', 'report_time', 'options']\n list_filter = ['datetime']\n ordering = ['-datetime']\n date_hierarchy = 'datetime'\n search_fields = ['connection__identity', 'reference_number']\n actions = None\n Media = media.JQueryUIMedia\n change_list_template = 'igreport/change_list.html'\n change_list_results_template = 'igreport/change_list_results.html'\n list_per_page = 50\n\n def __init__(self, *args, **kwargs):\n super(IGReportAdmin, self).__init__(*args, **kwargs)\n self.list_display_links = (None,)\n\n def report_time(self, obj):\n return obj.datetime.strftime('%d/%m/%Y %H:%M')\n\n report_time.short_description = 'Report Date'\n report_time.admin_order_field = 'datetime'\n\n def message(self, obj):\n text = obj.report\n width = ''\n if text and len(text) > 40:\n width = '280px'\n\n style = 'font-size:13px;'\n if width:\n style += 'width:%s;' % width\n if style:\n style = ' style=\"%s\"' % style\n html = '
%s
' % (obj.id, style, text)\n return html\n\n message.short_description = 'Report'\n message.allow_tags = True\n\n def accused(self, obj):\n text = obj.subject\n width = ''\n if text and len(text) > 40:\n width = '200px'\n\n style = 'font-size:13px;'\n if width:\n style += 'width:%s;' % width\n if style:\n style = ' style=\"%s\"' % style\n if not text:\n text = '(none)'\n html = '%s' % (style, text)\n return html\n\n accused.short_description = 'Accused'\n accused.allow_tags=True\n\n def sender(self, obj):\n msisdn = obj.connection.identity\n t = (msisdn, msisdn, msisdn)\n html = '%s' % t\n return html\n \n sender.short_description = 'Sender'\n sender.admin_order_field = 'connection'\n sender.allow_tags = True\n\n def amount_formatted(self, obj):\n if obj.amount:\n amount = int(obj.amount)\n locale.setlocale(locale.LC_ALL, '')\n amount = locale.format(\"%d\", amount, grouping=True)\n currency = ''\n if obj.currency:\n currency = obj.currency.code\n amount = '%s' % amount\n if currency:\n amount = '%s%s' % (currency, amount)\n return amount\n return 'NA'\n \n amount_formatted.short_description = 'Amount'\n amount_formatted.admin_order_field = 'amount'\n amount_formatted.allow_tags=True\n\n def refno(self, obj):\n if not obj.reference_number:\n return '__'\n\n return obj.reference_number\n \n refno.short_description = 'Ref. No'\n refno.admin_order_field = 'reference_number'\n\n def options(self, obj):\n html = ''\n if not obj.synced and not obj.closed:\n link = '  ' % (obj.id, settings.STATIC_URL)\n html += link\n \n link = '
' % (obj.id, settings.STATIC_URL)\n html += link\n \n html = '
%s
' % html\n\n if obj.completed and not obj.synced and not obj.closed:\n d = dict(id=str(obj.id), amount=str(obj.amount) if obj.amount else '', amountff=obj.amount_freeform or '')\n a = json.dumps( d )\n if re.search(\"'\", a):\n a = re.compile(\"'\").sub(\"\", a)\n link = '  ' % (a, settings.STATIC_URL)\n html += link\n \n msisdn = obj.connection.identity\n t = (msisdn, obj.id, msisdn, settings.STATIC_URL)\n html += '' % t\n\n return html\n\n options.short_description = 'Options'\n options.allow_tags = True\n \n def get_row_css(self, obj, index):\n if obj.completed:\n return ' rpt-completed'\n if obj.synced:\n return ' rpt-synced'\n return ''\n\n def has_add_permission(self, request):\n return False\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n def change_view(self, request, object_id, extra_context=None):\n raise PermissionDenied\n \n def changelist_view(self, request, extra_context=None):\n title = 'Reports'\n \n ids = [ '{id:%s,completed:%s,synced:%s,closed:%s}' % \\\n (obj.id, 'true' if obj.completed else 'false', \\\n 'true' if obj.synced else 'false', 'true' if obj.closed else 'false') \\\n for obj in self.queryset(self) ]\n js = '[%s]' % ','.join(ids)\n bottom_js = '\\nvar reports=%s;\\nrptsetc();\\n' % js\n \n #bottom_js=''\n buttons = [ dict(label='Refresh', link=''), dict(label='All Reports', link='?'), dict(label='Completed', link='?completed=1'), dict(label='Synced', link='?synced=1'), dict(label='Closed', link='?closed=1') ]\n context = dict(title=title, include_file='igreport/report.html', bottom_js=bottom_js, buttons=buttons)\n return super(IGReportAdmin, self).changelist_view(request, extra_context=context)\n\nclass MessageLogAdmin(admin.ModelAdmin):\n list_display = ['sender', 'message', 'send_date', 'direction', 'status', 'options']\n search_fields = ('connection__identity', 'text')\n list_filter = ['date', 'direction', 'status']\n actions = None\n date_hierarchy = 'date'\n Media = media.JQueryUIMedia\n change_list_template = 'igreport/change_list.html'\n Media = media.JQueryUIMedia\n \n def __init__(self, *args, **kwargs):\n super(MessageLogAdmin, self).__init__(*args, **kwargs)\n self.list_display_links = (None,)\n \n def sender(self, obj):\n return obj.connection.identity\n \n sender.admin_order_field = 'connection'\n\n def message(self, obj):\n text = obj.text\n if obj.direction == 'I':\n color='#336699'\n else:\n color = '#000'\n width = ''\n if len(text) > 50:\n width = '300px'\n\n style = 'color:%s;font-size:13px;' % color\n if width:\n style += 'width:%s;' % width\n if style:\n style = ' style=\"%s\"' % style\n html = '
%s
' % (obj.id, style, text)\n \n if obj.direction == 'I':\n html += '' % (obj.id, obj.connection.identity)\n return html\n \n message.allow_tags='True'\n\n def send_date(self, obj):\n return obj.date.strftime('%d/%m/%Y %H:%M')\n\n send_date.short_description = 'Send Date'\n send_date.admin_order_field = 'date'\n\n def options(self, obj):\n if obj.direction=='I':\n msisdn = obj.connection.identity\n t = (msisdn, obj.id, msisdn, settings.STATIC_URL)\n links = list()\n links.append( '' % t)\n \n #t = (obj.id, settings.STATIC_URL)\n #links.append( '' % t)\n html = ' '.join(links)\n else:\n html = '[NONE]'\n return html\n\n options.short_description = 'Options'\n options.allow_tags = True\n \n def has_add_permission(self, request):\n return False\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n def changelist_view(self, request, extra_context=None):\n\n buttons = [ {'label': 'Go To Reports', 'link': '../igreport/'}, {'label': 'Refresh', 'link': '?'} ]\n context = dict(title='All Messages', include_file='igreport/report.html', buttons=buttons)\n return super(MessageLogAdmin, self).changelist_view(request, extra_context=context)\n \n def change_view(self, request, object_id, extra_context=None):\n raise PermissionDenied\n\nclass UnprocessedAdmin(admin.ModelAdmin):\n list_display = ['sender', 'message', 'send_date']\n date_hierarchy = 'date'\n search_fields = ('connection__identity', 'text')\n list_filter = ['date']\n actions = None\n change_list_template = 'igreport/change_list.html'\n\n def __init__(self, *args, **kwargs):\n super(UnprocessedAdmin, self).__init__(*args, **kwargs)\n self.list_display_links = (None,)\n \n def sender(self, obj):\n return obj.connection.identity\n \n sender.admin_order_field = 'connection'\n\n def message(self, obj):\n text = obj.text\n \n width = ''\n if len(text) > 50:\n width = '300px'\n \n style = 'font-size:13px;'\n if width:\n style += 'width:%s;' % width\n if style:\n style = ' style=\"%s\"' % style\n html = '
%s
' % (obj.id, style, text)\n return html\n \n message.allow_tags='True'\n\n def send_date(self, obj):\n return obj.date.strftime('%d/%m/%Y %H:%M')\n\n send_date.short_description = 'Send Date'\n send_date.admin_order_field = 'date'\n \n def has_add_permission(self, request):\n return False\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n def changelist_view(self, request, extra_context=None):\n\n buttons = [ {'label': 'Go To Reports', 'link': '../igreport/'}, {'label': 'Refresh', 'link': '?'} ]\n context = {'title': 'Unprocessed Messages', 'buttons': buttons}\n return super(UnprocessedAdmin, self).changelist_view(request, extra_context=context)\n \n def change_view(self, request, object_id, extra_context=None):\n raise PermissionDenied\n \n def queryset(self, request):\n qs = super(UnprocessedAdmin, self).queryset(request)\n return qs.filter(direction='I', application=None)\n\nclass UserProfileForm(ModelForm):\n district = ModelChoiceField(Location.objects.filter(type__name='district').order_by('name'))\n class Meta:\n model = UserProfile\n\nclass UserProfileAdmin(admin.ModelAdmin):\n form = UserProfileForm\n\nclass CategoryAdmin(admin.ModelAdmin):\n list_display = ['name', 'description']\n\nclass CurrencyAdmin(admin.ModelAdmin):\n list_display = ['code', 'name']\n\nadmin.site.register(Report, ReportAdmin) \nadmin.site.register(IGReport, IGReportAdmin)\nadmin.site.register(Currency, CurrencyAdmin)\nadmin.site.register(UserProfile, UserProfileAdmin)\nadmin.site.register(Category, CategoryAdmin)\nadmin.site.register(MessageLog, MessageLogAdmin)\nadmin.site.register(Unprocessed, UnprocessedAdmin)\nadmin.site.register(DNDList, DNDListAdmin)\nadmin.site.register(BulkMessage, BulkMessageAdmin)\nadmin.site.register(BulkRecipient, BulkRecipientAdmin)\nadmin.site.register(District, DistrictAdmin)\n\nunregister_apps()\n","sub_path":"igreport_project/igreport_src/igreport/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":12876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"478938904","text":"# https://projecteuler.net/problem=46\nfrom euler import *\nimport time\nT = time.time()\n\n\n# composite sieve, basically a reverse prime sieve\ndef composite_sieve(n):\n sieve = (n+1)*[True]\n limit = math.floor(math.sqrt(n))+1\n\n for i in range(2, limit):\n if sieve[i]:\n for j in range(i*i, n+1, i):\n sieve[j] = False\n\n for k in range(3, n, 2):\n if not sieve[k]:\n yield k\n\n# Goldbach's other conjecture\ndef gboc(n):\n for i in prime_sieve(n):\n solution = math.sqrt((n-i)/2)\n if solution == int(solution):\n return True\n\n return False\n\n\nlimit = 10000\nfor i in composite_sieve(limit+1):\n if not gboc(i):\n print(str(i) + ' cannot be written as the sum of a prime and twice a square')\n break\n\nprint('time elapsed:', time.time() - T)","sub_path":"046.py","file_name":"046.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"419296025","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n==================================================================================================================================================\n 杭州HUB仿真项目\n\n 项目启动日期:2017年7月6日\n 项目启动标识:AIRPORT OF EZHOU'S PROJECT -- HZ\n ===========================================\n 代码创建日期:2017年7月6日\n 代码创建工程师:韩蓝毅\n 代码版本:1.0\n 版本更新日期:2017��7月6日\n 版本更新工程师:韩蓝毅\n\n 代码整体功能描述:终分拣模块,\n 1、终分拣模拟\n\n\n\n==================================================================================================================================================\n\"\"\"\n\n\nimport simpy\nfrom src.vehicles import Package\nfrom src.utils import PackageRecord\n\n\nclass SecondarySort(object):\n\n def __init__(self,\n env: simpy.Environment(),\n machine_id: tuple,\n pipelines_dict: dict,\n equipment_process_time_dict:dict,\n ):\n\n self.env = env\n self.machine_id = machine_id\n self.pipelines_dict = pipelines_dict\n self.equipment_process_time_dict = equipment_process_time_dict\n self._set_machine()\n\n def _set_machine(self):\n \"\"\"\n \"\"\"\n self.equipment_id = self.machine_id[1] # pipeline id last value, for other machines\n self.process_time = self.equipment_process_time_dict[self.equipment_id]\n self.input_pip_line = self.pipelines_dict[self.machine_id]\n\n def process_package(self, item: Package):\n # package start for process\n item.insert_data(\n PackageRecord(\n equipment_id=self.equipment_id,\n package_id=item.item_id,\n time_stamp=self.env.now,\n action=\"start\", ))\n\n yield self.env.timeout(self.process_time)\n\n # package end for process\n item.insert_data(\n PackageRecord(\n equipment_id=self.equipment_id,\n package_id=item.item_id,\n time_stamp=self.env.now,\n action=\"end\", ))\n\n next_pipeline = item.next_pipeline\n self.pipelines_dict[next_pipeline].put(item)\n\n def run(self):\n while True:\n package = yield self.input_pip_line.get()\n self.env.process(self.process_package(item=package))\n","sub_path":"src/machine/secondary_sort/secondary_sort.py","file_name":"secondary_sort.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"362656132","text":"# -------------------------------\n# Released under MIT License\n# Copyright (c) 2020 TytusDb Team\n\n\nfrom storage.avl import avlMode as avl\nfrom storage.b import BMode as b\nfrom storage.bplus import BPlusMode as bplus\nfrom storage.dict import DictMode as dict\nfrom storage.hash import HashMode as hash\nfrom storage.isam import ISAMMode as isam\nfrom storage.json import jsonMode as json\n\nimport traceback\n\nmodos = {\n \"avl\": avl,\n \"b\": b,\n \"bplus\": bplus,\n \"hash\": hash,\n \"isam\": isam,\n \"dict\": dict,\n \"json\": json\n}\n\n\nclass ForeignKeyStr:\n\n def __init__(self, modo, database, table):\n\n self.modo = modo\n self.database = database\n self.table = table + \"_fk_str\"\n \n self.createTable()\n\n \n def createTable(self):\n \n for modo, func in modos.items():\n\n if self.modo == modo:\n\n var = []\n\n var.append(func.createTable(self.database, self.table, 5))\n var.append(func.alterAddPK(self.database, self.table, [0]))\n\n return var\n \n\n def alterTable(self, table):\n \n for modo, func in modos.items():\n\n if self.modo == modo:\n\n table_old = self.table\n self.table = table + \"_fk_str\"\n return func.alterTable(self.database, table_old, self.table)\n\n\n def dropTable(self):\n\n for modo, func in modos.items():\n\n if self.modo == modo:\n\n return func.dropTable(self.database, self.table)\n\n\n def insert(self, registro):\n \n for modo, func in modos.items():\n\n if self.modo == modo:\n\n return func.insert(self.database, self.table, registro)\n\n\n def delete(self, nombre):\n \n for modo, func in modos.items():\n\n if self.modo == modo:\n\n return func.delete(self.database, self.table, [nombre])\n\n\n def extractTable(self):\n \n for modo, func in modos.items():\n\n if self.modo == modo:\n\n return func.extractTable(self.database, self.table)\n\n\n def extractRow(self, nombre):\n \n for modo, func in modos.items():\n\n if self.modo == modo:\n\n return func.extractRow(self.database, self.table, [nombre])\n\n\n def alterTableMode(self, database: str, table: str, mode: str) -> int:\n\n try:\n \n registros = self.extractTable(database, table)\n self.modo = mode\n\n self.dropTable(database, table)\n self.createTable()\n\n for registro in registros:\n self.insert(registro)\n\n return 0\n \n\n except Exception:\n print(\"=\"*30)\n traceback.print_exc()\n return -1\n","sub_path":"storage/fase2/team15/storage/misc/ForeignKeyStr.py","file_name":"ForeignKeyStr.py","file_ext":"py","file_size_in_byte":2773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"80989327","text":"from tessagon.adaptors.list_adaptor import ListAdaptor\n\n\nclass SvgAdaptor(ListAdaptor):\n ADAPTOR_OPTIONS = ['svg_root_tag', 'svg_style',\n 'svg_fill_color', 'svg_fill_colors',\n 'svg_stroke_color', 'svg_stroke_width']\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n self.svg_root_tag = kwargs.get('svg_root_tag', False)\n # Optional: a string with style information\n self.style = kwargs.get('svg_style')\n self.svg_fill_colors = kwargs.get('svg_fill_colors')\n self.svg_fill_color = kwargs.get('svg_fill_color')\n self.svg_stroke_color = kwargs.get('svg_stroke_color')\n self.svg_stroke_width = kwargs.get('svg_stroke_width')\n\n def get_mesh(self):\n buffer = \"\"\n if self.svg_root_tag:\n if self.svg_root_tag is True:\n buffer += ''\n else:\n buffer += self.svg_root_tag\n buffer += \"\"\n\n style = self.make_style()\n if style:\n buffer += \"\".format(style)\n\n for i in range(len(self.face_list)):\n face = self.face_list[i]\n class_string = \"\"\n if len(self.color_list) > 0:\n color = self.color_list[i]\n class_string = ' class=\"color-{}\"'.format(color)\n verts = [self.vert_list[v] for v in face]\n points_string = \\\n ' '.join([\"{},{}\".format(vert[0],\n vert[1]) for vert in verts])\n buffer += ''.format(points_string,\n class_string)\n buffer += \"\"\n if self.svg_root_tag:\n buffer += \"\"\n return buffer\n\n def make_style(self):\n if self.style:\n return self.style\n\n style = \"\"\n polygon_style = \"\"\n if self.svg_fill_colors:\n for i in range(len(self.svg_fill_colors)):\n style += '.color-{} {{\\n fill:{};\\n}}\\n'.\\\n format(i, self.svg_fill_colors[i])\n if self.svg_stroke_color:\n polygon_style += ' stroke:{};\\n'.format(self.svg_stroke_color)\n if self.svg_stroke_width:\n polygon_style += \" stroke-width:{};\\n\".\\\n format(self.svg_stroke_width)\n if self.svg_fill_color:\n polygon_style += ' fill:{};\\n'.format(self.svg_fill_color)\n\n if len(polygon_style) > 0:\n style += \"polygon {{\\n{}}}\\n\".format(polygon_style)\n\n if len(style) > 0:\n return style\n\n return None\n","sub_path":"tessagon/adaptors/svg_adaptor.py","file_name":"svg_adaptor.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"490749805","text":"import datetime\nfrom flask import url_for\nfrom demo import db\n\n\nclass CreationLog(db.Document):\n instance_id = db.StringField(max_length=255, required=True)\n task_code = db.IntField(required=True)\n #status = db.StringField(max_length=255, required=True)\n status = db.IntField(required=True)\n updated_at = db.DateTimeField(default=datetime.datetime.now, required=True)\n\n def __unicode__(self):\n return self.instance_id + ' ' + str(self.updated_at)\n\n meta = {\n 'allow_inheritance': True,\n 'indexes': ['instance_id', 'task_code'],\n 'ordering': ['updated_at']\n }\n\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"163930163","text":"from collections import OrderedDict\nimport knackpy\n\n# import _setpath # uncomment this for local development\nfrom atd_knack_api._fieldmaps import FIELDMAP\nfrom atd_knack_api import _transforms\nfrom atd_knack_api.secrets import KNACK_CREDENTIALS\n\n\nclass Record(object):\n \"\"\"\n Transform a Knack record from a source application to a destination application.\n \"\"\"\n\n def __init__(self, app_id_src, app_id_dest, data, record_type=None, callback=False):\n\n self.app_id_src = app_id_src\n self.app_id_dest = app_id_dest\n self.app_name_src = KNACK_CREDENTIALS.get(app_id_src).get(\"name\")\n self.app_name_dest = KNACK_CREDENTIALS.get(app_id_dest).get(\"name\")\n self.callback = callback\n self.data = data\n self.record_type = record_type\n self.fieldmap = FIELDMAP.get(record_type)\n\n if not self.fieldmap:\n raise Exception(\"Cannot find fieldmap. Unknown record `type_` provided.\")\n\n self.direction = self._set_direction()\n self.fields = self.fieldmap.get(\"fields\")\n self.knack_cfg = self.fieldmap.get(\"knack_cfg\")\n self.payload = self._build_payload()\n self.method = self._set_method()\n\n def _set_direction(self):\n \"\"\"\n Determine the fieldmap \"direction\". This attribute allows the fieldmap to\n properly filter fields based on the src/dest applications.\n \"\"\"\n if self.callback:\n return f\"callback_{self.app_name_dest}\"\n\n else:\n return f\"to_{self.app_name_dest}\"\n\n def _build_payload(self):\n \"\"\"\n Map input data to output fields. Fields not definied in `self.fields` are dropped.\n \"\"\"\n payload = {}\n\n for field in self.fields:\n if self.direction not in field.get(\"directions\"):\n # ignore fields that do not support the direction of data flow\n continue\n\n src_field_id = field.get(\"apps\").get(self.app_name_src).get(\"id\")\n\n dest_field_id = field.get(\"apps\").get(self.app_name_dest).get(\"id\")\n\n transform = field.get(\"apps\").get(self.app_name_dest).get(\"transform\")\n\n if not src_field_id:\n \"\"\"\n Field is not present in src data; use default value.\n note that default val is only applied when the src field\n is not defined in field map!\n \"\"\"\n val = field.get(\"apps\").get(self.app_name_dest).get(\"default\")\n\n else:\n val = self.data.get(src_field_id)\n\n if transform:\n func = transform.get(\"name\")\n config = transform.get(\"config\")\n val = self._transform(val, func, config)\n\n payload[dest_field_id] = val\n\n return payload\n\n def debug(self):\n \"\"\"\n Print a helpful comparison of the input data vs the output payload.\n \"\"\"\n\n print(\"\\n========== Record Data ==========\")\n print(\n f\"src : {self.app_name_src}\\ndest : {self.app_name_dest}\\ndirection : {self.direction}\"\n )\n\n for field in self.fields:\n d = OrderedDict({})\n\n if self.direction not in field[\"directions\"]:\n continue\n\n src_key = field.get(\"apps\").get(self.app_name_src).get(\"id\")\n dest_key = field.get(\"apps\").get(self.app_name_dest).get(\"id\")\n\n print(f\"comment: {field.get('comment')}\")\n print(f\"src: {self.data.get(src_key)}\")\n print(f\"dest: {self.payload.get(dest_key)}\")\n print(\"-------------------------\")\n print(\"\\n========== End Record Data ==========\")\n\n return\n\n def _set_method(self):\n \"\"\"\n Determine if the record will be created or updated in the destination app.\n \"\"\"\n if self.payload.get(\"id\"):\n return \"update\"\n else:\n return \"create\"\n\n def _transform(self, val, transform, config):\n transform_func = getattr(_transforms, transform)\n if config:\n \"\"\"\n Special transforms may have a config and require authentication. So we pass the\n config along with the auth if a config is present in the transform definition.\n \"\"\"\n return transform_func(val, config, KNACK_CREDENTIALS[self.app_id_dest])\n else:\n return transform_func(val)\n\n def send(self):\n \"\"\"\n Send the record payload to the dest app.\n \"\"\"\n obj = self.knack_cfg.get(self.app_name_dest).get(\"object\")\n app_id = self.app_id_dest\n api_key = KNACK_CREDENTIALS[app_id][\"api_key\"]\n method = self.method\n\n res = knackpy.record(\n self.payload, obj_key=obj, app_id=app_id, api_key=api_key, method=method\n )\n\n return res\n\n\nif __name__ == \"__main__\":\n import logging\n\n logger = get_logger(\"_models\")\n logger.setLevel(logging.DEBUG)\n","sub_path":"atd_knack_api/_models.py","file_name":"_models.py","file_ext":"py","file_size_in_byte":4948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"443506887","text":"#/bin/env python3\n\nimport os, sys, signal, time\n\nCMD_ENDERS = (\" \", \"\\n\", \"\\t\", \";\", \"&\", \"|\", \">\", \"<\")\nHOMEDIR = os.getenv('HOME')\noutQueue = [] #A queue of stuff to be prepended to prompt\nbgJobs = {} #A dictionary of pid:cmd key-value pairs\nfgJobs = {} #A dictionary of pid:cmd key-value pairs\n\ndef newProcess(cmd, args,\n errorMsg=\"Unknown Command \\\"{0}\\\"\", fileErrorMsg=\"Error Opening \\\"{0}\\\"\",\n redir=None, outdir=None):\n tmp = \" \".join(args).split('|')\n if len(tmp) > 1:\n #Piping\n cmd = tmp\n first = cmd[0].strip().split()\n second = cmd[1].strip().split()\n print(first, second)\n r, w = os.pipe() #Read, write ends of pipe\n firstPID = os.fork()\n if not firstPID:\n #First Child\n os.dup2(w, 1)\n os.close(w)\n try:\n os.execvp(first[0], first)\n except FileNotFoundError: #Incorrect Command catching\n print(fileErrorMsg.format(first))\n sys.exit()\n secondPID = os.fork()\n if not secondPID:\n #Second Child\n os.dup2(r, 0)\n os.close(r)\n try:\n os.execvp(second[0], second)\n except FileNotFoundError: #Incorrect Command catching\n print(fileErrorMsg.format(second))\n sys.exit()\n return {firstPID: first, secondPID: second}\n else:\n #Not Piping\n pid = os.fork()\n if not pid:\n try:\n if redir is not None: #<\n try:\n fin = os.open(redir, os.O_RDONLY)\n except FileNotFoundError:\n print (fileErrorMsg.format(redir))\n sys.exit()\n os.dup2(fin, 0)\n os.close(fin)\n\n if outdir is not None: #>\n try:\n fout = os.open(outdir, os.O_CREAT | os.O_WRONLY | os.O_TRUNC)\n except FileNotFoundError:\n print (fileErrorMsg.format(outdir))\n sys.exit()\n os.dup2(fout, 1)\n # os.write(fout, bytes(\"TEST\", 'UTF-8'))\n os.close(fout)\n\n os.execvp(cmd, args)\n except FileNotFoundError:\n print(errorMsg.format(cmd))\n sys.exit()\n return pid\n\ndef run(str):\n global outQueue, bgJobs, fgJobs\n orig = str\n if \"&\" in str:\n bg = True\n else:\n bg = False\n str = str.replace('&','')\n fin = str.split('<')\n fin = fin[1] if len(fin)>1 else None\n fout = None\n if fin is not None: # < first\n fout = fin.split('>')\n if len(fout) > 1: # < >, so I need to fix fin's file name\n fin = fout[0]\n fout = fout[1]\n else: #Checking > <\n fout = str.split('<')[0].split('>')\n if len(fout) > 1: # > <\n fout = fout[1]\n else: # No >\n fout = None\n else: # Checking >\n fout = str.split('>')\n fout = fout[1] if len(fout)>1 else None\n cmd = str.split('>')[0].split('<')[0].split()\n if fin is not None:\n fin = fin.strip()\n if fout is not None:\n fout = fout.strip()\n #print(\"\\\"{}\\\"\".format(cmd))\n #print(\"\\\"{}\\\"\".format(fin))\n #print(\"\\\"{}\\\"\".format(fout))\n pid = newProcess(cmd[0], cmd, redir=fin, outdir=fout)\n if \"|\" in orig:\n fgJobs.update(pid)\n elif bg:\n bgJobs[pid] = orig\n else:\n fgJobs[pid] = orig\n\ndef prompt():\n global outQueue, bgJobs, fgJobs\n while fgJobs:\n time.sleep(1)\n for i in outQueue:\n print(i)\n outQueue = []\n promptLine = os.getcwd().replace(HOMEDIR, '~') + \"$ \"\n sys.stdout.write(promptLine)\n sys.stdout.flush()\n stdin = sys.stdin.readline()\n if len(stdin) == 0 or (len(stdin) >= 4 and stdin[0:4] == \"exit\"):\n print()\n sys.stdout.flush()\n sys.exit()\n elif stdin == \"\\n\":\n print()\n elif len(stdin) >= 3 and stdin[0:2] == \"cd\" and stdin[2] in CMD_ENDERS:\n stdin.replace('~', HOMEDIR)\n stdin = stdin.split()\n if len(stdin)==1: #If nothing after cd\n os.chdir(HOMEDIR)\n else:\n os.chdir(stdin[1])\n elif len(stdin) >= 4 and stdin[0:4] == \"jobs\" and stdin[4] in CMD_ENDERS:\n for pid,cmd in bgJobs:\n print(\"[{}] {} running\".format(pid, cmd))\n else:\n run(stdin.strip())\n\ndef childReaper(signum, frame):\n global outQueue, bgJobs, fgJobs\n print(\"Beginning Reaping\")\n if (len(bgJobs) > 0) or (len(fgJobs) > 0):\n pid, status = os.waitpid(-1, os.WNOHANG)\n print(\"Returned From waitpid \", pid, status)\n if pid:\n if pid in fgJobs:\n if os.WEXITSTATUS(status):\n outQueue.insert(0, \"Command {} errored with exit status {}\".format(fgJobs[pid], os.WEXITSTATUS(status)))\n print(\"Before Deleting fgJobs \", fgJobs)\n del fgJobs[pid]\n print(\"After Deleting fgJobs \", fgJobs)\n elif pid in bgJobs:\n outQueue.append(\"[{}] Command {} finished\".format(pid, bgJobs[pid]))\n print(\"Before Deleting bgJobs \", bgJobs)\n del bgJobs[pid]\n print(\"After Deleting bgJobs \", bgJobs)\n print(\"Exit\")\n\ndef main():\n global outQueue, bgJobs, fgJobs\n signal.signal(signal.SIGCHLD, childReaper)\n while True:\n prompt()\n\nif __name__==\"__main__\":\n main()\n","sub_path":"sh.py","file_name":"sh.py","file_ext":"py","file_size_in_byte":5590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"632496936","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nAMR (Abstract Meaning Representation) structure\nFor detailed description of AMR, see http://www.isi.edu/natural-language/amr/a.pdf\n\"\"\"\n\nfrom __future__ import print_function\nimport sys\nimport penman as pp\n\n# change this if needed\nERROR_LOG = sys.stderr\n\n# change this if needed\nDEBUG_LOG = sys.stderr\n\n\nclass AMR(object):\n \"\"\"\n AMR is a rooted, labeled graph to represent semantics.\n This class has the following members:\n nodes: list of node in the graph. Its ith element is the name of the ith node. For example, a node name\n could be \"a1\", \"b\", \"g2\", .etc\n node_values: list of node labels (values) of the graph. Its ith element is the value associated with node i in\n nodes list. In AMR, such value is usually a semantic concept (e.g. \"boy\", \"want-01\")\n root: root node name\n relations: list of edges connecting two nodes in the graph. Each entry is a link between two nodes, i.e. a triple\n . In AMR, such link denotes the relation between two semantic\n concepts. For example, \"arg0\" means that one of the concepts is the 0th argument of the other.\n attributes: list of edges connecting a node to an attribute name and its value. For example, if the polarity of\n some node is negative, there should be an edge connecting this node and \"-\". A triple < attribute name,\n node name, attribute value> is used to represent such attribute. It can also be viewed as a relation.\n \"\"\"\n\n def __init__(self, node_list=None, node_value_list=None, relation_list=None, attribute_list=None):\n \"\"\"\n node_list: names of nodes in AMR graph, e.g. \"a11\", \"n\"\n node_value_list: values of nodes in AMR graph, e.g. \"group\" for a node named \"g\"\n relation_list: list of relations between two nodes\n attribute_list: list of attributes (links between one node and one constant value)\n \"\"\"\n # initialize AMR graph nodes using list of nodes name\n # root, by default, is the first in var_list\n\n if node_list is None:\n self.nodes = []\n self.root = None\n else:\n self.nodes = node_list[:]\n if len(node_list) != 0:\n self.root = node_list[0]\n else:\n self.root = None\n if node_value_list is None:\n self.node_values = []\n else:\n self.node_values = node_value_list[:]\n if relation_list is None:\n self.relations = []\n else:\n self.relations = relation_list[:]\n if attribute_list is None:\n self.attributes = []\n else:\n self.attributes = attribute_list[:]\n\n def rename_node(self, prefix):\n \"\"\"\n Rename AMR graph nodes to prefix + node_index to avoid nodes with the same name in two different AMRs.\n \"\"\"\n node_map_dict = {}\n # map each node to its new name (e.g. \"a1\")\n for i in range(0, len(self.nodes)):\n node_map_dict[self.nodes[i]] = prefix + str(i)\n # update node name\n for i, v in enumerate(self.nodes):\n self.nodes[i] = node_map_dict[v]\n # update node name in relations\n for node_relations in self.relations:\n for i, l in enumerate(node_relations):\n node_relations[i][1] = node_map_dict[l[1]]\n\n def get_triples(self):\n \"\"\"\n Get the triples in three lists.\n instance_triple: a triple representing an instance. E.g. instance(w, want-01)\n attribute triple: relation of attributes, e.g. polarity(w, - )\n and relation triple, e.g. arg0 (w, b)\n \"\"\"\n instance_triple = []\n relation_triple = []\n attribute_triple = []\n for i in range(len(self.nodes)):\n instance_triple.append((\"instance\", self.nodes[i], self.node_values[i]))\n # l[0] is relation name\n # l[1] is the other node this node has relation with\n for l in self.relations[i]:\n relation_triple.append((l[0], self.nodes[i], l[1]))\n # l[0] is the attribute name\n # l[1] is the attribute value\n for l in self.attributes[i]:\n attribute_triple.append((l[0], self.nodes[i], l[1]))\n return instance_triple, attribute_triple, relation_triple\n\n def get_triples2(self):\n \"\"\"\n Get the triples in two lists:\n instance_triple: a triple representing an instance. E.g. instance(w, want-01)\n relation_triple: a triple representing all relations. E.g arg0 (w, b) or E.g. polarity(w, - )\n Note that we do not differentiate between attribute triple and relation triple. Both are considered as relation\n triples.\n All triples are represented by (triple_type, argument 1 of the triple, argument 2 of the triple)\n \"\"\"\n instance_triple = []\n relation_triple = []\n for i in range(len(self.nodes)):\n # an instance triple is instance(node name, node value).\n # For example, instance(b, boy).\n instance_triple.append((\"instance\", self.nodes[i], self.node_values[i]))\n # l[0] is relation name\n # l[1] is the other node this node has relation with\n for l in self.relations[i]:\n relation_triple.append((l[0], self.nodes[i], l[1]))\n # l[0] is the attribute name\n # l[1] is the attribute value\n for l in self.attributes[i]:\n relation_triple.append((l[0], self.nodes[i], l[1]))\n return instance_triple, relation_triple\n\n def __str__(self):\n \"\"\"\n Generate AMR string for better readability\n \"\"\"\n lines = []\n for i in range(len(self.nodes)):\n lines.append(\"Node \" + str(i) + \" \" + self.nodes[i])\n lines.append(\"Value: \" + self.node_values[i])\n lines.append(\"Relations:\")\n for relation in self.relations[i]:\n lines.append(\"Node \" + relation[1] + \" via \" + relation[0])\n for attribute in self.attributes[i]:\n lines.append(\"Attribute: \" + attribute[0] + \" value \" + attribute[1])\n return \"\\n\".join(lines)\n\n def __repr__(self):\n return self.__str__()\n\n def output_amr(self):\n \"\"\"\n Output AMR string\n \"\"\"\n print(self.__str__(), file=DEBUG_LOG)\n\n @staticmethod\n def get_amr_line(input_f):\n \"\"\"\n Read the file containing AMRs. AMRs are separated by a blank line.\n Each call of get_amr_line() returns the next available AMR (in one-line form).\n Note: this function does not verify if the AMR is valid\n \"\"\"\n cur_amr = []\n for line in input_f:\n line = line.strip()\n if line != \"\":\n cur_amr.append(line.strip())\n else:\n break\n return \"\\n\".join(cur_amr)\n\n @staticmethod\n def parse_graph_line(lines):\n g = pp.decode(lines)\n return AMR.from_graph(g)\n\n @staticmethod\n def from_graph(g):\n instances = g.instances()\n node_name_list, node_value_list = zip(*[(instance.source, instance.target) for instance in instances])\n node_name_list = list(node_name_list)\n node_value_list = list(node_value_list)\n positions = {concept: idx for idx, concept in enumerate(node_name_list)}\n relation_list = [[] for _ in node_name_list]\n attribute_list = [[] for _ in node_name_list]\n\n for src, label, tgt in g.edges():\n tgt: str\n if len(tgt) >= 3 and tgt.startswith('\\\"') and tgt.endswith('\\\"'):\n tgt = tgt.strip('\\\"')\n relation_list[positions[src]].append([label[1:], tgt])\n\n for src, label, tgt in g.attributes():\n tgt: str\n if len(tgt) >= 3 and tgt.startswith('\\\"') and tgt.endswith('\\\"'):\n tgt = tgt.strip('\\\"')\n attribute_list[positions[src]].append([label[1:], tgt])\n\n attribute_list[positions[g.top]].append(['TOP', node_value_list[positions[g.top]]])\n\n node_value_list_ = []\n for value in node_value_list:\n if len(value) >= 3 and value.startswith('\\\"') and value.endswith('\\\"'):\n value = value.strip('\\\"')\n node_value_list_.append(value)\n\n result_amr = AMR(node_name_list, node_value_list_, relation_list, attribute_list)\n return result_amr\n\n\n# test AMR parsing\n# run by amr.py [file containing AMR]\n# a unittest can also be used.\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n print(\"No file given\", file=ERROR_LOG)\n exit(1)\n amr_count = 1\n for line in open(sys.argv[1]):\n cur_line = line.strip()\n if cur_line == \"\" or cur_line.startswith(\"#\"):\n continue\n print(\"AMR\", amr_count, file=DEBUG_LOG)\n current = AMR.from_graph(cur_line)\n current.output_amr()\n amr_count += 1\n","sub_path":"amr_parser/amr.py","file_name":"amr.py","file_ext":"py","file_size_in_byte":9050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"471411758","text":"# python module for one-timepad\n#import onetimepad\n# python module to create GUI\nimport smtplib\nfrom tkinter import *\nimport sqlite3,sys\ndef connection():\n try:\n conn=sqlite3.connect(\"student.db\")\n except:\n print(\"cannot connect to the database\")\n return conn\n\n\ndef verifier():\n a=b=c=d=e=f=0\n if not student_name.get():\n t1.insert(END,\"<>Student name is required<>\\n\")\n a=1\n if not roll_no.get():\n t1.insert(END,\"<>Roll no is required<>\\n\")\n b=1\n if not branch.get():\n t1.insert(END,\"<>Branch is required<>\\n\")\n c=1\n if not phone.get():\n t1.insert(END,\"<>Phone number is requrired<>\\n\")\n d=1\n if not email.get():\n t1.insert(END,\"<>email name is required<>\\n\")\n e=1\n if not address.get():\n t1.insert(END,\"<>Address is Required<>\\n\")\n f=1\n if a==1 or b==1 or c==1 or d==1 or e==1 or f==1:\n return 1\n else:\n return 0\n\n\ndef add_student():\n ret = verifier()\n if ret == 0:\n conn = connection()\n cur = conn.cursor()\n cur.execute(\"CREATE TABLE IF NOT EXISTS STUDENTS(NAME TEXT,ROLL_NO INTEGER,BRANCH TEXT,PHONE_NO INTEGER,FATHER TEXT,ADDRESS TEXT)\")\n cur.execute(\"insert into STUDENTS values(?,?,?,?,?,?)\", (\n student_name.get(), int(roll_no.get()), branch.get(), int(phone.get()), email.get(), address.get()))\n conn.commit()\n conn.close()\n t1.insert(END, \"Stundent Registration Successfully Check Your Register Email ID\\n\")\n\ndef view_student():\n conn=connection()\n cur=conn.cursor()\n cur.execute(\"select * from STUDENTS\")\n data=cur.fetchall()\n conn.close()\n for i in data:\n t1.insert(END,str(i)+\"\\n\")\n\n\ndef delete_student():\n ret=verifier()\n if ret==0:\n conn=connection()\n cur=conn.cursor()\n cur.execute(\"DELETE FROM STUDENTS WHERE ROLL_NO=?\",(int(roll_no.get()),))\n conn.commit()\n conn.close()\n t1.insert(END,\"Student Profile Successfully Deleted\\n\")\n\ndef update_student():\n ret=verifier()\n if ret==0:\n conn=connection()\n cur=conn.cursor()\n cur.execute(\"UPDATE STUDENTS SET NAME=?,ROLL_NO=?,BRANCH=?,PHONE_NO=?,FATHER=?,ADDRESS=? where ROLL_NO=?\",(student_name.get(),int(roll_no.get()),branch.get(),int(phone.get()),email.get(),address.get(),int(roll_no.get())))\n conn.commit()\n conn.close()\n t1.insert(END,\"Student Profile Updated Successfully\\n\")\n\n\ndef clse():\n sys.exit()\n\n\nif __name__ == \"__main__\":\n student_root = Tk()\n student_root.geometry(\"1366x800\")\n student_root.title(\"Student Management System\")\n title = Label(student_root, text=\"Seva Sadan Student Management System\", font=(\"Arial\",30, \"bold\"),bg=\"black\", fg=\"#1aff1a\")\n title.pack(side=TOP, fill=X)\n\n student_name = StringVar()\n roll_no = StringVar()\n branch = StringVar()\n phone = StringVar()\n email = StringVar()\n address = StringVar()\n gmail=email\n # -----main1 frame-------------------\n\n main1_frame = Frame(student_root, bd=4, relief=RIDGE, bg=\"black\")\n main1_frame.place(x=35, y=70, width=650, height=300)\n\n\n\n main1_title = Label(main1_frame, text=\"Student Panel\", font=(\"Arial\", 20, \"bold\"), fg=\"#1aff1a\", bg=\"black\")\n main1_title.grid(row=0, column=1, pady=0, padx=0)\n\n\n label1 = Label(main1_frame, text=\"Student Name. : \", font=(\"Arial\", 15, \"bold\"), fg=\"#1aff1a\", bg=\"black\")\n label1.grid(row=1, column=0, pady=0, padx=20)\n\n label2 = Label(main1_frame, text=\"Student Enroll. : \", font=(\"Arial\", 15, \"bold\"), fg=\"#1aff1a\", bg=\"black\")\n label2.grid(row=2, column=0, pady=0, padx=20)\n\n label3 = Label(main1_frame, text=\"Student Branch : \", font=(\"Arial\", 15, \"bold\"), fg=\"#1aff1a\", bg=\"black\")\n label3.grid(row=3, column=0, pady=0, padx=20)\n\n label4 = Label(main1_frame, text=\"Student Phone : \", font=(\"Arial\", 15, \"bold\"), fg=\"#1aff1a\", bg=\"black\")\n label4.grid(row=4, column=0, pady=0, padx=20)\n\n label5 = Label(main1_frame, text=\"Student Email : \", font=(\"Arial\", 15, \"bold\"), fg=\"#1aff1a\", bg=\"black\")\n label5.grid(row=5, column=0, pady=0, padx=20)\n label6 = Label(main1_frame, text=\"Student Address : \", font=(\"Arial\", 15, \"bold\"), fg=\"#1aff1a\", bg=\"black\")\n label6.grid(row=6, column=0, pady=0, padx=20)\n\n e1 = Entry(main1_frame, textvariable=student_name,width=30)\n e1.grid(row=1, column=1, pady=0, padx=10)\n\n e2 = Entry(main1_frame, textvariable=roll_no,width=30)\n e2.grid(row=2, column=1, pady=0, padx=10)\n\n e3 = Entry(main1_frame, textvariable=branch,width=30)\n e3.grid(row=3, column=1, pady=0, padx=10)\n\n e4 = Entry(main1_frame, textvariable=phone,width=30)\n e4.grid(row=4, column=1, pady=0, padx=10)\n\n e5 = Entry(main1_frame, textvariable=email,width=30)\n e5.grid(row=5, column=1, pady=0, padx=10)\n\n e6 = Entry(main1_frame, textvariable=address,width=30)\n e6.grid(row=6, column=1, pady=0, padx=10)\n\n\n\n # -----main2 frame-------------------\n main2_frame = Frame(student_root, bd=4, relief=RIDGE, bg=\"black\")\n main2_frame.place(x=680, y=70, width=650, height=300)\n\n main2_title = Label(main2_frame, text=\"Control Panel\", font=(\"Arial\", 20, \"bold\"), fg=\"#1aff1a\", bg=\"black\")\n main2_title.place(x=200, y=0)\n\n b1 = Button(main1_frame, command=add_student, width=30, text=\"Add Student\", bg=\"black\", fg=\"#1aff1a\",\n font=(\"comicsanses\", 10, \"normal\"))\n b1.place(x=110, y=230)\n\n b2 = Button(main2_frame, text=\"View All Student\", width=30, command=view_student, bg=\"black\", fg=\"#1aff1a\",\n font=(\"comicsanses\", 10, \"normal\"))\n b2.place(x=180, y=50)\n\n\n b4 = Button(main2_frame, text=\"Update Profile\", width=30, command=update_student, bg=\"black\", fg=\"#1aff1a\",\n font=(\"comicsanses\", 10, \"normal\"))\n b4.place(x=180, y=100)\n\n b3 = Button(main2_frame, text=\"Delete Profile\", width=30, command=delete_student, bg=\"black\", fg=\"#1aff1a\",\n font=(\"comicsanses\", 10, \"normal\"))\n b3.place(x=180, y=150)\n\n b5 = Button(main2_frame, text=\"Exit\", width=30, command=clse, bg=\"black\", fg=\"#1aff1a\",\n font=(\"comicsanses\", 10, \"normal\"))\n b5.place(x=180, y=200)\n\n b6=Label(main2_frame,text=\"Developed By:| Ashish Patil | Dipak Mahajan | Ruchee Chouhan |\",bg=\"black\", fg=\"#1aff1a\")\n b6.place(x=100,y=260)\n # -----main3 frame-------------------\n main3_frame = Frame(student_root, bd=4, relief=RIDGE, bg=\"black\")\n main3_frame.place(x=0, y=400, width=1366, height=300)\n\n # -----------Table data frame----------------\n t1 = Text(main3_frame, width=1300, height=250,fg='#1aff1a',bg=\"black\",font=(\"Arial\",10,'bold'))\n t1.grid(row=2, column=1)\n\n # --------data search--------------\n\nstudent_root=Tk()\nstudent_root.mainloop()","sub_path":"main_index.py","file_name":"main_index.py","file_ext":"py","file_size_in_byte":6773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"295317151","text":"from django.shortcuts import render, redirect\nfrom .forms import MovieForm\nfrom .models import Movie\n\n\ndef mov(request):\n if request.method == \"POST\":\n form = MovieForm(request.POST)\n if form.is_valid():\n try:\n form.save()\n return redirect('/show')\n except:\n pass\n else:\n form = MovieForm()\n return render(request,'index.html', {'form': form})\n\n\ndef show(request):\n movies = Movie.objects.all()\n return render(request,\"show.html\", {'movies': movies})\n\n\ndef edit(request, id):\n movie = Movie.objects.get(id=id)\n return render(request,'edit.html', {'movie': movie})\n\n\ndef update(request, id):\n movie = Movie.objects.get(id=id)\n form = MovieForm(request.POST, instance = movie)\n if form.is_valid():\n form.save()\n return redirect(\"/show\")\n return render(request, 'edit.html', {'movie': movie})\n\n\ndef destroy(request, id):\n movie = Movie.objects.get(id=id)\n movie.delete()\n return redirect(\"/show\")\n","sub_path":"movie/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"466759403","text":"\n################# PRINT #################\n\ndef printHeader(matrixHeader):\n for k in matrixHeader:\n print(k,matrixHeader[k])\n\ndef printMatrix(matrix):\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n print(matrix[i][j],\" \",end='')\n print()\n\n################# PRE-PROCESSING #################\n\ndef removeAccents(text):\n vowelReplacements = [\n ('á','a'),\n ('é','e'),\n ('í','i'),\n ('ó','o'),\n ('ú','u')\n ]\n for a1, a2 in vowelReplacements:\n text = text.replace(a1, a2)\n return text\n\ndef removeSpacesSigns(text):\n punctuationMarks = [',',';',':','¿','?','!','¡','.']\n for item in punctuationMarks:\n text = text.replace(item,'')\n text = text.replace(' ','').replace('\\n','')\n return text\n\ndef preprocess(text):\n text = removeSpacesSigns(text)\n text = removeAccents(text)\n text = text.replace('ñ','n')\n text = text.upper()\n return text\n\n################# HEAD MATRIX #################\n\ndef sortKey (clave):\n clave = ''.join(sorted(clave))\n orderedKey = list(clave)\n return orderedKey\n\ndef generateHeader(key, orderedKey):\n matrixHeader = {}\n for i in range(len(orderedKey)):\n pos = orderedKey.index(key[i])\n matrixHeader[pos + 1] = key[i]\n orderedKey[pos] = \"-\"\n return matrixHeader\n\n################# TRANSPOSITION INTERRUPTED #################\n\ndef getEncryptedText(matrixHeader, matrix):\n result = \"\"\n for col in range(len(matrixHeader)):\n colMatrix = list(matrixHeader).index(col+1)\n for row in matrix:\n result += row[colMatrix]\n result += \" \"\n return result\n\ndef interruptedTransposition(key, text):\n # HEAD\n orderedKey = sortKey(key)\n matrixHeader = generateHeader(key, orderedKey)\n\n # MATRIX\n matrix = []\n flag = False \n\n i = 0 # Preprocessed text index \n numrow = 0 # Variable that stores the row number\n\n while flag == False:\n\n row = [\"\"] * len(matrixHeader) # Row\n f = 0 # Index of the created row\n\n for key in matrixHeader:\n\n # If all the preprocessed text has been read, break the two loops\n if i >= len(text):\n flag = True\n break\n\n # If the row number matches the column key,\n # break the loop and go to the next row\n if (numrow + 1) == key:\n row[f] += text[i]\n f += 1\n i += 1\n break\n else:\n row[f] += text[i]\n f += 1\n i += 1\n\n numrow += 1\n matrix.append(row)\n\n # printHeader(matrixHeader)\n # printMatrix(matrix)\n\n # GET ENCRYPTED TEXT\n result = getEncryptedText(matrixHeader, matrix)\n return result\n\n################# MAIN #################\n\nif __name__ == \"__main__\":\n\n # Password and plain text entry\n key = \"convenience\"\n text = \"Here is a secret message enciphered by transposition\"\n\n # Key and plain text preprocessing\n key = preprocess(key)\n text = preprocess(text)\n\n # Text encryption\n textEncryption = interruptedTransposition(key, text)\n\n print(textEncryption)\n\n","sub_path":"practices/interrupted_transposition.py","file_name":"interrupted_transposition.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"571253615","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nSetup for Cartes deployment\n\nFor sdist deployments, do not forget MANIFEST.in file to include data\nfiles.\n\"\"\"\n\nfrom setuptools import setup\n\nVERSION = '0.2.2'\n\n__author__ = '{martin.monperrus,raphael.marvie}@univ-lille1.fr'\n__date__ = 'Thu Jun 21 21:14:51 2012'\n\n\nwith open('README') as file:\n long_description = file.read()\n\nsetup(\n name='CartesInitProg',\n version=VERSION,\n description=\"Package Cartes pour le module d'InitProg.\",\n long_description=long_description,\n author='Martin Monperrus & Raphael Marvie',\n author_email=__author__,\n url='http://www.fil.univ-lille1.fr/',\n packages=['Cartes'],\n include_package_data=True,\n package_data={'Cartes': ['Cartes/images/*.gif']},\n install_requires='pyparsing >= 1.5.6'\n)\n\n\n\n# eof\n","sub_path":"pypi_install_script/CartesInitProg-0.2.2.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"479672167","text":"#Slightly edited from the original idfix script, it now handles errors better; credit goes to GFD\n\nimport sys\nimport struct\nfrom struct import pack\nimport os\nfrom util import *\n\nnud = open(sys.argv[1], \"rb+\")\nmainID = int(sys.argv[2])\nfighterID = int(sys.argv[3])\ntexID = int(sys.argv[4])\noffsetID = int(sys.argv[5])\ntexID\n#setup are arrays\nboneCount = []\nvertexStart_array=[]\nvertexAmount_array=[]\nvertexSize_array=[]\npolyStart_array=[]\npolyAmount_array=[]\npolySize_array=[]\nvertexAddStart_array=[]\nUVSize_array=[]\npolyName_array=[]\nsingleBind_array=[]\ntexturePropertiesL1Start_array=[]\ntexturePropertiesL2Start_array=[]\ntexturePropertiesL3Start_array=[]\ntexturePropertiesL4Start_array=[]\ntextureNuml1_array=[]\ntextureNuml2_array=[]\ntextureNuml3_array=[]\ntextureNuml4_array=[]\n\nnud.seek(0x4)\n\nfileSize = readu32be(nud)\nif(offsetID == 0 or offsetID == 128):\n offsetID = offsetID\nelse:\n offsetID = 0\n print(\"ERROR: offsetID was not set as 0 or 128. Setting offsetId to 0\")\n#print(\"File Size is %i bytes\" % (fileSize))\nnud.seek(2,1)\npolysets = readu16be(nud)\nnud.seek(4,1)\npolyClumpStart = (readu32be(nud) + 0x30)\npolyClumpSize = readu32be(nud)\nvertexClumpStart = (polyClumpStart + polyClumpSize)\nvertexClumpSize = readu32be(nud)\nvertexAddStart = (vertexClumpSize + vertexClumpStart)\nvertexAddClumpSize = readu32be(nud)\nnameClumpStart = (vertexAddClumpSize + vertexAddStart)\nnud.seek(0x10,1)\nobjCount = 0\n\nfor z in range(polysets):\n nud.seek(0x20,1)\n print(hex(nud.tell()))\n polynamestart = readu32be(nud)\n indentifiera = readu32be(nud)\n singleBind = readu16be(nud)\n polyamount = readu16be(nud)\n positionb = readu32be(nud)\n objCount = (objCount + polyamount)\n if polynamestart > 99999 or polyamount > 999:\n sys.exit(1)\n for s in range(polyamount):\n polyName_array.append(polynamestart)\n singleBind_array.append(singleBind)\nfor p in range(objCount):\n polyStart = (readu32be(nud) + polyClumpStart)\n vertexStart = (readu32be(nud) + vertexAddStart)\n vertexAddStart = (readu32be(nud) + vertexAddClumpSize)\n vertexAmount = readu16be(nud)\n #print(\"There is %i Vertexs\" % int(vertexAmount))\n vertexSize = readByte(nud)\n uvSize = readByte(nud)\n textureLayer1Properties = readu32be(nud)\n textureLayer2Properties = readu32be(nud)\n textureLayer3Properties = readu32be(nud)\n textureLayer4Properties = readu32be(nud)\n polyAmount = readu16be(nud)\n #print(\"There is %i Faces\" % int(polyAmount))\n polySize = readByte(nud)\n polyFlag = readByte(nud)\n nud.seek(0xC,1)\n vertexStart_array.append(vertexStart)\n polyStart_array.append(polyStart)\n vertexAddStart_array.append(vertexAddStart)\n vertexAmount_array.append(vertexAmount)\n polyAmount_array.append(polyAmount)\n vertexSize_array.append(vertexSize)\n UVSize_array.append(uvSize)\n polySize_array.append(polySize)\n texturePropertiesL1Start_array.append(textureLayer1Properties)\n texturePropertiesL2Start_array.append(textureLayer2Properties)\n texturePropertiesL3Start_array.append(textureLayer3Properties)\n texturePropertiesL4Start_array.append(textureLayer4Properties)\n\nfor z in range(objCount):\n nud.seek(texturePropertiesL1Start_array[z])\n nud.seek(8,1)\n texSomethingCount = readu16be(nud)\n texPropCount = readu16be(nud)\n nud.seek(20,1)\n for x in range(texPropCount):\n typeNum = readByte(nud)\n fightNum = readByte(nud)\n playNum = readByte(nud)\n texNum = readByte(nud)\n if(texNum > 0 and texNum <= 127):\n texNum = texNum\n elif(texNum >= 128 and texNum <= 255):\n texNum = texNum - 128\n if(typeNum >= 0x40):\n nud.seek(-4,1)\n nud.write(struct.pack(\"B\", mainID))\n nud.write(struct.pack(\"B\", fighterID))\n nud.write(struct.pack(\"B\", texID))\n nud.write(struct.pack(\"B\", offsetID + texNum))\n nud.seek(-4,1)\n typeNum = readByte(nud)\n fightNum = readByte(nud)\n playNum = readByte(nud)\n texNum = readByte(nud)\n print(\"The main ID is: %i The fighter ID is: %i The skin ID is: %i The texture number is: %i\" %(typeNum,fightNum,playNum,texNum))\n nud.seek(20,1)\n headerRead = 0x20\n while headerRead == 0x20:\n headerRead = readu32be(nud)\n texPropNameStart = readu32be(nud)\n nud.seek(24,1)\n backUp = nud.tell()\n nud.seek(nameClumpStart + texPropNameStart)\n texPropName = getString(nud)\n nud.seek(backUp)\n if texturePropertiesL2Start_array[z] != 0:\n nud.seek(texturePropertiesL2Start_array[z])\n nud.seek(8,1)\n texSomethingCount = readu16be(nud)\n texPropCount = readu16be(nud)\n nud.seek(20,1)\n for x in range(texPropCount):\n typeNum = readByte(nud)\n fightNum = readByte(nud)\n playNum = readByte(nud)\n texNum = readByte(nud)\n if(texNum > 0 and texNum <= 127):\n texNum = texNum\n elif(texNum >= 128 and texNum <= 255):\n texNum = texNum - 128\n if(typeNum >= 0x40):\n nud.seek(-4,1)\n nud.write(struct.pack(\"B\", mainID))\n nud.write(struct.pack(\"B\", fighterID))\n nud.write(struct.pack(\"B\", texID))\n nud.write(struct.pack(\"B\", offsetID + texNum))\n nud.seek(-4,1)\n typeNum = readByte(nud)\n fightNum = readByte(nud)\n playNum = readByte(nud)\n texNum = readByte(nud)\n print(\"The main ID is: %i The fighter ID is: %i The skin ID is: %i The texture number is: %i\" %(typeNum,fightNum,playNum,texNum))\n nud.seek(20,1)\n headerRead = 0x20\n while headerRead == 0x20:\n headerRead = readu32be(nud)\n texPropNameStart = readu32be(nud)\n nud.seek(24,1)\n backUp = nud.tell()\n nud.seek(nameClumpStart + texPropNameStart)\n texPropName = getString(nud)\n nud.seek(backUp)\n if texturePropertiesL3Start_array[z] != 0:\n nud.seek(texturePropertiesL2Start_array[z])\n nud.seek(8,1)\n texSomethingCount = readu16be(nud)\n texPropCount = readu16be(nud)\n nud.seek(20,1)\n for x in range(texPropCount):\n typeNum = readByte(nud)\n fightNum = readByte(nud)\n playNum = readByte(nud)\n texNum = readByte(nud)\n if(texNum > 0 and texNum <= 127):\n texNum = texNum\n elif(texNum >= 128 and texNum <= 255):\n texNum = texNum - 128\n if(typeNum >= 0x40):\n nud.seek(-4,1)\n nud.write(struct.pack(\"B\", mainID))\n nud.write(struct.pack(\"B\", fighterID))\n nud.write(struct.pack(\"B\", texID))\n nud.write(struct.pack(\"B\", offsetID + texNum))\n typeNum = readByte(nud)\n fightNum = readByte(nud)\n playNum = readByte(nud)\n texNum = readByte(nud)\n print(\"The main ID is: %i The fighter ID is: %i The skin ID is: %i The texture number is: %i\" %(typeNum,fightNum,playNum,texNum))\n nud.seek(20,1)\n headerRead = 0x20\n while headerRead == 0x20:\n headerRead = readu32be(nud)\n texPropNameStart = readu32be(nud)\n nud.seek(24,1)\n backUp = nud.tell()\n nud.seek(nameClumpStart + texPropNameStart)\n texPropName = getString(nud)\n nud.seek(backUp)\n if texturePropertiesL4Start_array[z] != 0:\n nud.seek(texturePropertiesL2Start_array[z])\n nud.seek(8,1)\n texSomethingCount = readu16be(nud)\n texPropCount = readu16be(nud)\n nud.seek(20,1)\n for x in range(texPropCount):\n typeNum = readByte(nud)\n fightNum = readByte(nud)\n playNum = readByte(nud)\n texNum = readByte(nud)\n if(texNum > 0 and texNum <= 127):\n texNum = texNum\n elif(texNum >= 128 and texNum <= 255):\n texNum = texNum - 128\n if(typeNum >= 0x40):\n nud.seek(-4,1)\n nud.write(struct.pack(\"B\", mainID))\n nud.write(struct.pack(\"B\", fighterID))\n nud.write(struct.pack(\"B\", texID))\n nud.write(struct.pack(\"B\", offsetID + texNum))\n nud.seek(-4,1)\n typeNum = readByte(nud)\n fightNum = readByte(nud)\n playNum = readByte(nud)\n texNum = readByte(nud)\n print(\"The main ID is: %i The fighter ID is: %i The skin ID is: %i The texture number is: %i\" %(typeNum,fightNum,playNum,texNum))\n nud.seek(20,1)\n headerRead = 0x20\n while headerRead == 0x20:\n headerRead = readu32be(nud)\n texPropNameStart = readu32be(nud)\n nud.seek(24,1)\n backUp = nud.tell()\n nud.seek(nameClumpStart + texPropNameStart)\n texPropName = getString(nud)\n nud.seek(backUp)\n\n\nnud.close()\n","sub_path":"server/Mass TexIDFix/TexIDfix-NUD.py","file_name":"TexIDfix-NUD.py","file_ext":"py","file_size_in_byte":11541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"593313617","text":"from requests import Session\nimport re\nimport time\ns = Session()\ns.headers[\"User-Agent\"] = \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:80.0) Gecko/20100101 Firefox/80.0\"\nout = open(\"happy(251-300).csv\",\"w\")\nf = open(\"all_link(1).txt\",\"r\").read().split(\"\\n\")\n\ndef scrap(i,t=1):\n time.sleep(t)\n try:\n r = s.get(i,proxies={\"https\":\"http://scraperapi.render=true:a6438ab03fee3e0af7053fbbcaa5c20c@proxy-server.scraperapi.com:8001\"},verify=False)\n ex = re.search(\"noon-express\",r.text)\n # ex = re.search(\"https://k.nooncdn.com/s/app/2019/noon-bigalog/472711e386dd707b927b9b7c1b43fb6e190ea5c4/static/images/noon-express-en.png\",r.text)\n if(ex):\n ex = \"Yes\"\n else:\n ex = \"No\"\n of = re.search('

(.*?)Offers Available

',r.text)\n if of:\n offer = of.group(1)\n else:\n offer = 1\n printPrice = 0\n p1 = re.search('riceCurrency\":\"AED\",\"price\":(.*?),',r.text).group(1)\n p2 = re.search('tPrice\">AED (.*?)p2):\n printPrice = p2\n else:\n printPrice = p1\n else:\n printPrice = p1\n\n\n out.write(\"{}|{}|{}|{}\\n\".format(r.url,ex,printPrice,offer))\n time.sleep(4)\n print(r.url)\n except:\n print(\"exception.....\")\n scrap(i,2)\n\n\n\n\nfor i in f[250:300]:\n scrap(i,2)","sub_path":"h2.py","file_name":"h2.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"134224731","text":"from datetime import datetime, timedelta\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse\nfrom django.db import IntegrityError\nfrom django.test import TestCase\nfrom django.test.client import Client\nfrom django.utils.translation import ugettext as _\nfrom wouso.core import scoring\nfrom wouso.core.magic.templatetags.artifacts import artifact, spell_due, artifact_full\nfrom wouso.core.scoring.models import Coin, Formula\nfrom wouso.core.tests import WousoTest\nfrom wouso.core.user.models import Player\nfrom wouso.core.magic.models import Spell\nfrom wouso.interface.activity.models import Activity\nfrom models import *\nfrom manager import MagicManager\n\nclass ManagerTestCase(WousoTest):\n \"\"\" Test the core.magic.manager.Manager helper.\n \"\"\"\n def setUp(self):\n self.user = User.objects.create(username='test')\n self.player = self.user.get_profile()\n\n def test_manager_properties(self):\n self.assertTrue(self.player.magic)\n\n self.assertIsInstance(self.player.magic, MagicManager)\n\n self.assertEqual(self.player.magic.spells.count(), 0)\n self.assertEqual(self.player.magic.spells_cast.count(), 0)\n self.assertEqual(self.player.magic.spells_available.count(), 0)\n self.assertEqual(self.player.magic.artifact_amounts.count(), 0)\n self.assertEqual(self.player.magic.spell_amounts.count(), 0)\n\n self.assertFalse(self.player.magic.has_modifier('inexistent-modifier'))\n self.assertEqual(self.player.magic.modifier_percents('inexistent-modifier'), 100) # should return 0\n\n def test_manager_use_modifier(self):\n Artifact.objects.create(name='modifier-name')\n self.player.magic.give_modifier('modifier-name', 1)\n self.assertTrue(self.player.magic.has_modifier('modifier-name'))\n\n self.player.magic.use_modifier('modifier-name', 1)\n self.assertFalse(self.player.magic.has_modifier('modifier-name'))\n\n def test_cast_spell(self):\n spell1 = Spell.objects.create(name='le-spell')\n spell2 = Spell.objects.create(name='le-spell2', mass=True, type='o')\n v = []\n for i in range(0,7):\n player = self._get_player(i+2)\n player.points = 10-i\n player.save()\n v.append(player)\n\n v[3].magic.add_spell(spell2)\n neigh = v[3].get_neighbours_from_top(2)\n neigh = v[3].magic.filter_players_by_spell(neigh, spell2)\n v[3].magic.mass_cast(spell2, neigh, datetime.now()+timedelta(days=1))\n\n for i in [1, 2, 4, 5]:\n self.assertTrue(v[i].magic.is_spelled)\n self.assertTrue(v[3].magic.is_spelled)\n\n v[6].magic.cast_spell(spell1, v[0], datetime.now()+timedelta(days=1))\n self.assertFalse(v[6].magic.is_spelled)\n\n v[0].magic.add_spell(spell1)\n v[6].magic.cast_spell(spell1, v[0], datetime.now()+timedelta(days=1))\n self.assertTrue(v[6].magic.is_spelled)\n\n\nclass ModifierTest(TestCase):\n def test_path_simple(self):\n m = Modifier(name='cici')\n self.assertTrue(m.path)\n self.assertEqual(m.path, 'cici')\n\n def test_path_image(self):\n m = Modifier(name='cici')\n m.image = 'test.jpg'\n self.assertTrue('test.jpg' in m.path)\n\n\nclass ArtifactTestCase(TestCase):\n\n def testArtifactCreateUnique(self):\n \"\"\" Test if we cannot create two artifacts with the same name in a group\n \"\"\"\n group = ArtifactGroup.objects.create(name='gigi')\n\n a1 = Artifact.objects.create(group=group, name='name')\n\n self.assertRaises(IntegrityError, Artifact.objects.create, group=group, name='name')\n\n def test_no_artifact_behavior(self):\n noartifact = NoArtifactLevel(1)\n\n self.assertTrue(artifact(noartifact))\n\nclass SpellTestCase(WousoTest):\n\n def test_buy_spell(self):\n Coin.add('gold')\n Formula.add('buy-spell', definition=\"gold=-{price}\")\n spell = Spell.objects.create(name='test-spell', available=True, price=10)\n player = User.objects.create_user('test', 'test@a.ro', password='test').get_profile()\n\n scoring.score_simple(player, 'gold', 100)\n self.assertEqual(player.coins['gold'], 100)\n\n # TODO: interface test should not be here\n response = self.client.get(reverse('bazaar_home'))\n self.assertTrue('test-spell' in response.content)\n\n self.client.login(username='test', password='test')\n response = self.client.get(reverse('bazaar_buy', kwargs={'spell': spell.id}))\n self.assertFalse('error' in response.content)\n\n player = Player.objects.get(user__username='test')\n self.assertEqual(player.coins['gold'], 90)\n\n def test_expired(self):\n player = self._get_player()\n spell = Spell.objects.create(name='test-spell', available=True, price=10)\n\n obs = PlayerSpellDue.objects.create(player=player, source=player, spell=spell, due=datetime.now() + timedelta(days=1))\n self.assertFalse(PlayerSpellDue.get_expired(datetime.today()))\n\n obs.due = datetime.now() - timedelta(days=1)\n obs.save()\n\n self.assertTrue(PlayerSpellDue.get_expired(datetime.today()))\n self.assertIn(obs, PlayerSpellDue.get_expired(datetime.today()))\n\n obs.due = datetime.now() - timedelta(days=1)\n obs.save()\n\n # Run management task: should delete expired dues\n Bazaar.management_task()\n\n self.assertFalse(PlayerSpellDue.get_expired(datetime.today()))\n\n def test_cure(self):\n \"\"\"\n Test if cure works on a player\n \"\"\"\n player = self._get_player()\n player2 = self._get_player(2)\n\n spell = Spell.objects.create(name='test-spell', available=True, price=10, type='n')\n cure = Spell.objects.create(name='cure', available=True, price=10)\n obs = PlayerSpellDue.objects.create(player=player, source=player, spell=spell, due=datetime.now() + timedelta(days=1))\n\n self.assertTrue(player.magic.spells) # There is test-spell cast on myself\n\n player2.magic.add_spell(cure)\n player.magic.cast_spell(cure, player2, datetime.now() + timedelta(days=1))\n\n self.assertFalse(player.magic.spells) # There isn't any spell left\n\n def test_disguise_simple(self):\n \"\"\"\n Test if top-disguise spell works\n \"\"\"\n player = self._get_player()\n Coin.add('points')\n scoring.score_simple(player, 'points', 10)\n\n self.assertEqual(player.points, 10)\n\n disguise = Spell.objects.create(name='top-disguise', available=True, price=10, percents=50, type='s')\n player.magic.add_spell(disguise)\n player.magic.cast_spell(disguise, player, datetime.now() + timedelta(days=1))\n\n self.assertTrue(player.magic.has_modifier('top-disguise'))\n\n self.assertEqual(player.points, 15)\n\n def test_disguise_expire_on_dispell(self):\n player = self._get_player()\n Coin.add('points')\n scoring.score_simple(player, 'points', 10)\n\n disguise = Spell.objects.create(name='top-disguise', available=True, price=10, percents=50, type='s')\n player.magic.add_spell(disguise)\n player.magic.cast_spell(disguise, player, datetime.now() + timedelta(days=1))\n\n self.assertEqual(player.points, 15)\n\n dispell = Spell.objects.create(name='dispell', available=True, price=10)\n player.magic.add_spell(dispell)\n player.magic.cast_spell(dispell, player)\n\n self.assertFalse(player.magic.has_modifier('top-disguise'))\n\n player = Player.objects.get(pk=player.pk)\n\n self.assertEqual(player.points, 10)\n\n\nclass TemplatetagsTest(WousoTest):\n def test_spell_due(self):\n player = self._get_player()\n spell = Spell.objects.create(name='test-spell', available=True, price=10)\n\n obs = PlayerSpellDue.objects.create(player=player, source=player, spell=spell, due=datetime.now() + timedelta(days=1))\n\n self.assertTrue(spell_due(obs))\n\n def test_artifact_full(self):\n self.assertFalse(artifact_full(None))\n\n player = self._get_player()\n self.assertTrue(artifact_full(player.level))\n\nclass TestMagicViews(WousoTest):\n def setUp(self):\n super(TestMagicViews, self).setUp()\n self.p1 = self._get_player(1)\n self.p2 = self._get_player(2)\n self.p1.points = 500\n self.p1.save()\n self.spell_1 = Spell.objects.create(name='spell1', title='Spell no. 1')\n self.spell_2 = Spell.objects.create(name='spell2', title='Spell no. 2')\n self.c = Client()\n self.c.login(username='testuser1', password='test')\n self.activity = Activity.objects.create(user_from=self.p1, user_to=self.p2,\n action='gold-won')\n scoring.setup_scoring()\n\n def test_bazaar_view(self):\n response = self.c.get(reverse('bazaar_home'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'Bazaar')\n self.assertContains(response, 'Exchange')\n self.assertContains(response, 'Rate')\n self.assertContains(response, 'testuser1')\n self.assertContains(response, 'testuser2')\n self.assertContains(response, 'Spell no. 1')\n self.assertContains(response, 'Spell no. 2')\n\n def test_bazaar_exchange_success_message(self):\n data = {'points': 10}\n response = self.c.post(reverse('bazaar_exchange'), data)\n self.assertContains(response, _('Converted successfully'))\n \n def test_bazaar_exchange_error_message(self):\n data = {'points': 1000}\n response = self.c.post(reverse('bazaar_exchange'), data)\n self.assertContains(response, _('Insufficient points'))\n response = self.c.get(reverse('bazaar_exchange'))\n self.assertContains(response, _('Expected post'))\n\n def test_magic_cast_error_message(self):\n data = {'days': 10, 'spell': 1}\n self.p1.magic.add_spell(self.spell_1)\n response = self.c.post(reverse('magic_cast', args=[2]), data)\n self.assertContains(response, _('Invalid number of days'))\n","sub_path":"wouso/core/magic/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":10101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"426272675","text":"from django.http import HttpResponseForbidden, Http404\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse\n\nfrom academic_groups.models import Exam, ExamResult, Student, AcademicGroup, EventGroup\n\nfrom pages.decorators import should_be_starosta, should_be_jury\n\n\n# Create your views here.\n@should_be_starosta\ndef students(request):\n user = request.user\n academic_group = user.academicgroup\n\n context = {\n 'academic_group': academic_group,\n 'exams': Exam.objects.all(),\n 'name': '{0} {1}'.format(user.first_name, user.last_name),\n }\n\n return render(request, 'academic_groups/students.html', context=context)\n\n\n@should_be_starosta\ndef add_student(request):\n user = request.user\n academic_group = user.academicgroup\n\n if request.method == 'GET':\n\n context = {\n 'academic_group': academic_group,\n 'name': '{0} {1}'.format(user.first_name, user.last_name),\n }\n\n return render(request, 'academic_groups/add_student.html', context=context)\n elif request.POST:\n student = Student()\n\n student.name = '{0} {1} {2}'.format(\n request.POST['last_name'],\n request.POST['first_name'],\n request.POST['father_first_name'],\n )\n\n student.academic_group = academic_group\n student.educational_form = request.POST['educational_form']\n student.save()\n\n exams = academic_group.exams.all()\n\n for exam in exams:\n exam_score = ExamResult()\n exam_score.student = student\n exam_score.exam = exam\n exam_score.score = int(request.POST['{0}'.format(exam.id)])\n exam_score.save()\n\n return redirect(reverse(\"groups:students\"))\n else:\n return Http404()\n\n\n@should_be_starosta\ndef student_show(request, student_id):\n student = Student.objects.get(pk=student_id)\n\n context = {\n 'student': student,\n 'student_exams': student.examresult_set.filter(student=student),\n 'name': '{0} {1}'.format(request.user.first_name, request.user.last_name),\n }\n\n return render(request, 'academic_groups/student.html', context)\n\n\n@should_be_starosta\ndef edit_student_exams(request, student_id):\n if request.POST:\n student = Student.objects.get(pk=student_id)\n\n student_exams = list(filter(lambda exam_result: exam_result.student == student, student.examresult_set.all()))\n\n for student_exam in student_exams:\n student_exam.score = request.POST['exam{0}'.format(student_exam.exam_id)]\n student_exam.save()\n\n return redirect(reverse(\"groups:student\", args={\n student_id: student.id,\n }))\n else:\n return Http404()\n\n\n@should_be_starosta\ndef delete_student(request, student_id):\n if request.POST:\n student = Student.objects.get(pk=student_id)\n student.delete()\n return redirect(reverse('groups:students'))\n\n return Http404()\n\n\n@should_be_starosta\ndef add_exam(request):\n if request.POST:\n exam = Exam.objects.get(pk=request.POST['exam_id'])\n\n request.user.academicgroup.exams.add(exam)\n\n for student in request.user.academicgroup.student_set.all():\n exam_result = ExamResult()\n exam_result.exam = exam\n exam_result.student = student\n exam_result.score = 0\n exam_result.save()\n\n return redirect(reverse('home'))\n\n\n@should_be_starosta\ndef delete_exam(request):\n if request.POST:\n academic_group = request.user.academicgroup\n academic_group.exams.remove(request.POST['exam_id'])\n academic_group.save()\n\n for student in academic_group.student_set.all():\n student_exam = ExamResult.objects.filter(student_id=student.id, exam_id=request.POST['exam_id'])\n student_exam.delete()\n\n return redirect(reverse('groups:students'))\n\n return Http404()\n\n\n@should_be_starosta\ndef events(request):\n\n context = {\n 'academic_group': request.user.academicgroup,\n 'name': '{0} {1}'.format(request.user.first_name, request.user.last_name),\n }\n\n return render(request, 'academic_groups/events.html', context)\n\n\n@should_be_starosta\ndef add_event(request):\n if request.POST:\n event_group = EventGroup()\n event_group.name = request.POST['group_name']\n event_group.event_name = request.POST['event_name']\n event_group.event_area = request.POST['event_area']\n event_group.event_level = request.POST['event_level']\n event_group.prize_winning_place = request.POST['place']\n event_group.academic_group = request.user.academicgroup\n event_group.save()\n return redirect(reverse('groups:events'))\n\n\n@should_be_starosta\ndef event_add_student(request):\n if request.POST:\n\n event_group = EventGroup.objects.get(pk=request.POST['event_group_id'])\n student = Student.objects.get(pk=request.POST['student'])\n event_group.student_event.add(student)\n event_group.save()\n\n return redirect(reverse('groups:events'))\n\n\n@should_be_starosta\ndef edit_event_group(request):\n if request.POST:\n event_group = EventGroup.objects.get(pk=request.POST['event_id'])\n event_group.name = request.POST['group_name']\n event_group.event_name = request.POST['event_name']\n event_group.event_level = request.POST['event_level']\n event_group.prize_winning_place = request.POST['place']\n event_group.save()\n\n return redirect(reverse('groups:events'))\n\n\n@should_be_starosta\ndef delete_event_group(request):\n if request.POST:\n event_group = EventGroup.objects.get(pk=request.POST['event_group_id'])\n event_group.delete()\n\n return redirect(reverse('groups:events'))\n\n\n@should_be_jury\ndef jury(request):\n\n context = {\n 'academic_groups': AcademicGroup.objects.all(),\n 'name': '{0} {1}'.format(request.user.first_name, request.user.last_name),\n }\n\n return render(request, 'academic_groups/jury.html', context)\n","sub_path":"academic_groups/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"61957604","text":"from PyQt5 import QtGui, QtCore, QtWidgets\n\nfrom backend.repository import Repository\nfrom backend.commit import Commit\nfrom frontend.qcommit import QCommit, QLabelLabel\n\nimport random\nimport time\n\nCOLORS = [QtCore.Qt.blue, QtCore.Qt.black, QtCore.Qt.red, \n QtCore.Qt.darkRed, QtCore.Qt.green, QtCore.Qt.darkGreen, QtCore.Qt.darkBlue, \n QtCore.Qt.cyan, QtCore.Qt.darkCyan, QtCore.Qt.darkMagenta, QtCore.Qt.yellow, \n QtCore.Qt.darkYellow, QtCore.Qt.gray, QtCore.Qt.darkGray, QtCore.Qt.lightGray ]\n\nclass RepositoryWidget(QtWidgets.QWidget):\n def __init__(self, args):\n super().__init__(args)\n self.vbox = QtWidgets.QVBoxLayout()\n big_box = QOverlay(self)\n big_box.setLayout(self.vbox)\n\n scroll = QtWidgets.QScrollArea()\n scroll.setWidget(big_box)\n scroll.setWidgetResizable(True)\n layout = QtWidgets.QVBoxLayout(self)\n layout.addWidget(scroll)\n\n self.last_drawn = 0\n# self.repository = Repository('/home/rixx/Projects/shack/augenkrebs')\n self.repository = Repository('/home/kunzefri/Projects/augenkrebs')\n self.commits = {}\n self.labels = []\n self.initUI()\n #self.repository.commit(message='Automated commit, will be reverted')\n #self.repository.revert(self.repository.HEAD)\n\n def initUI(self):\n self._gather_commits()\n self._sort_commits()\n #self._add_labels()\n self.drawCommits()\n\n def _gather_commits(self):\n for commit in self.repository.walk_full():\n qcommit = QCommit(Commit(commit.oid, self.repository), is_visible=True) \n\n for commit in self.repository[commit.oid].parents:\n qcommit.prev_nodes.append(self.commits[commit.oid])\n\n for parent in qcommit.prev_nodes:\n parent.next_nodes.append(qcommit)\n\n self.commits[qcommit.oid] = qcommit\n\n def _sort_commits(self):\n x = -1\n y = 1\n maxindent = 0\n\n for commit in self.repository.walk_topological():\n qcommit = self.commits[commit.oid]\n\n if len(qcommit.next_nodes) == 0:\n qcommit.color = random.choice(COLORS)\n maxindent += 1\n x = maxindent\n else:\n if len(qcommit.next_nodes) == 1:\n parent = qcommit.next_nodes[0]\n else:\n parent = sorted(qcommit.next_nodes, key=lambda child: child.position[0])[0]\n\n if len(parent.prev_nodes) == 1:\n x = parent.position[0]\n qcommit.color = parent.color\n elif parent.prev_nodes.index(qcommit) == 0:\n x = parent.position[0]\n qcommit.color = parent.color\n else:\n maxindent += 1\n x = maxindent\n qcommit.color = random.choice([c for c in COLORS if c != parent.color])\n\n maxindent -= (len(qcommit.next_nodes) -1)\n qcommit.textindent = maxindent + 1\n qcommit.position = (x, y)\n y+=1\n\n def _add_labels(self):\n for commit in self.commits.values():\n for label in commit.labels:\n self.labels.append(QLabelLabel(label, commit))\n\n\n def paintEvent(self, event):\n if time.time() > self.last_drawn + 100:\n self.last_drawn = time.time()\n while self.vbox.takeAt(0):\n b = self.vbox.takeAt(0)\n del b\n\n self.drawCommits()\n\n \n def drawCommits(self):\n for commit in sorted(self.commits.values(), key=lambda commit: commit.position[1]):\n self.vbox.addWidget(commit.get_label())\n\n\nclass QOverlay(QtWidgets.QWidget):\n def __init__(self, p, *args):\n super().__init__(*args)\n self.window = p\n\n def paintEvent(self, event):\n qp = QtGui.QPainter()\n qp.begin(self)\n qp.setRenderHints(qp.renderHints() | QtGui.QPainter.Antialiasing)\n\n for commit in self.window.commits.values():\n for child in commit.next_nodes:\n self._draw_connection(commit, child, qp)\n\n for label in self.window.labels:\n self._draw_label(label, qp)\n\n qp.end()\n\n def _draw_label(self, label, qp):\n pen = QtGui.QPen()\n pen.setColor(QtCore.Qt.blue)\n pen.setWidth(1)\n pen.setJoinStyle(QtCore.Qt.RoundJoin)\n\n qp.setPen(pen)\n commitlabel = label.qcommit.get_label()\n x = commitlabel.x() + 2\n y = commitlabel.y()\n qp.fillRect(x,y, 10, 10, QtCore.Qt.blue)\n\n def _draw_connection(self, commit1, commit2, qp):\n pen = QtGui.QPen()\n pen.setColor(commit1.get_label()._color)\n pen.setWidth(2)\n pen.setJoinStyle(QtCore.Qt.RoundJoin)\n\n qp.setPen(pen)\n\n l1 = commit1.get_label()\n l2 = commit2.get_label()\n \n middle = l1._point_size[0]/2\n x1 = l1.x() + l1._point[0] + middle\n y1 = l1.y() + l1._point[1] + middle\n x2 = l2.x() + l2._point[0] + middle\n y2 = l2.y() + l2._point[1] + middle\n\n if x1 == x2:\n qp.drawLine(x1, y1, x2, y2)\n elif ((x1 > x2) and (y1 < y2)) or ((x1 < x2) and (y1 > y2)):\n if (y1 > y2):\n y1, y2 = y2, y1\n x1, x2 = x2, x1\n\n pen.setColor(commit2.get_label()._color)\n qp.setPen(pen)\n\n between_y = y2 - commit2.get_label().height()\n qp.drawLine(x1, y1, x1, between_y)\n qp.drawLine(x1, between_y, x2, y2)\n else:\n qp.drawLine(x1, y1, x2, y2)\n\n\n\n","sub_path":"git-gui/frontend/qrepository.py","file_name":"qrepository.py","file_ext":"py","file_size_in_byte":5664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"184331497","text":"import cv2\n\nimg = cv2.imread(\"F:\\Final Project\\Train Images\\Afridi\\\\1.jpg\")\n\nresized = cv2.resize(img, (int(img.shape[1]/2), int(img.shape[0]/2)))\n\ncv2.imshow(\"Afridi\", resized)\n\ncv2.waitKey(0)\n\ncv2.destroyAllWindows()","sub_path":"Practice/resize_div.py","file_name":"resize_div.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"109310274","text":"# lab03_test.py\n# PUT YOUR NAME AND NETID HERE\n# PUT THE DATE YOU LAST CHANGED THIS FILE HERE\n# Skeleton by Walker M. White (wmw2), Lillian Lee (LJL2), Feb 2018\n\n\n\"\"\"(Skeleton of) tests for lab03.py\"\"\"\n\nimport cornellasserts # For assert_equals and assert_true\nimport lab03 # This is what we are testing\n\n\ndef test_replace_first():\n \"\"\"Testing function for lab03.replace_first\"\"\"\n\n print(\"Testing lab03.replace_first\")\n\n print(\"first test case\")\n result = lab03.replace_first('methos', 's', 'd')\n cornellasserts.assert_equals('method', result)\n \n print(\"second test case\")\n result = lab03.replace_first('Misissippi', 's', 'ss')\n cornellasserts.assert_equals('Mississippi', result)\n \n print(\"third test case\")\n result = lab03.replace_first('decrepif', 'f', 't')\n cornellasserts.assert_equals('decrepit', result)\n \n print(\"fourth test case\")\n result = lab03.replace_first('aggreived', 'ei', 'ie')\n cornellasserts.assert_equals('aggrieved', result)\n \n print(\"fifth test case\")\n result = lab03.replace_first('em', 'em', 'umm')\n cornellasserts.assert_equals('umm', result)\n \n print(\"sixth test case\")\n result = lab03.replace_first('judgement', 'e', '')\n cornellasserts.assert_equals('judgment', result)\n\n###########\n# Calls to testing functions go here\n\ntest_replace_first()\n\nprint('Module lab03: all tests passed')\n","sub_path":"CS_1110/lab03_string_manipulation/lab03_test.py","file_name":"lab03_test.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"341439526","text":"from tkinter import *\r\nimport tkinter as tk\r\nfrom PIL import Image, ImageTk\r\n# pillow (PIL) library used for jpeg image background compatibility with tkinter\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\r\nfrom pandas import DataFrame\r\n# matplotlib used for creating the graph and Pandas is used to display it\r\n\r\n\r\n# this is the function called when the \"show my results\" button is clicked\r\ndef resultsButton():\r\n # results.insert() future use\r\n print('clicked')\r\n displayGraph()\r\n\r\n# this is the function to update the graph with the output from the jar file\r\ndef displayGraph():\r\n yearlyAmounts = [100, 200, 300, 400, 500, 600, 100]\r\n graph = Tk()\r\n graph.title(\"Retirement Planner\")\r\n years= []\r\n for i in range(len(yearlyAmounts)-1):\r\n years.append(2020+i)\r\n requiredSaving = yearlyAmounts.pop(len(yearlyAmounts) - 1)\r\n data = {'Year':[],'Total Savings': []}\r\n\r\n # Make the data the actual inputs\r\n data.get('Year').extend(years)\r\n data.get('Total Savings').extend(yearlyAmounts)\r\n\r\n df = DataFrame(data, columns=['Year','Total Savings'])\r\n\r\n figure1 = plt.Figure(figsize=(6,5),dpi=100)\r\n ax1 = figure1.add_subplot(111)\r\n line1 = FigureCanvasTkAgg(figure1, graph)\r\n line1.get_tk_widget().pack(side=tk.LEFT,fill=tk.BOTH)\r\n df = df[['Year','Total Savings']].groupby('Year').sum()\r\n df.plot(kind='line', legend=True,ax=ax1,color='r',marker='o', fontsize=10)\r\n ax1.set_title('Retirement Planner')\r\n graph.mainloop()\r\n\r\n\r\n\r\nroot = Tk()\r\nimage = Image.open(\"C:/Users/garvi/PycharmProjects/ITCS4102Project-master/images/retirement4.jpg\") #need to change based on your file directory\r\nphoto_image = ImageTk.PhotoImage(image)\r\nlabel = Label(root, image=photo_image)\r\nlabel.pack()\r\n# This is the section of code which creates the main window\r\nroot.geometry('800x500')\r\n# root.resizable(0, 0)\r\nroot.configure(background='#7FFFD4')\r\nroot.title('Retirement Report')\r\n\r\n# Creates label to give user direction\r\nLabel(root, text='Please Answer The Questions Below.', bg='#7FFFD4', font=('courier', 12, 'normal')).place(x=8, y=9)\r\n\r\n# Creates current age label\r\nLabel(root, text='Current Age:', bg='#7FFFD4', font=('courier', 10, 'normal')).place(x=8, y=39)\r\n\r\n# Accepts current age input\r\ncurrentAgeInput = StringVar()\r\ncurrentAgeInput = Entry(root, textvariable=currentAgeInput)\r\ncurrentAgeInput.place(x=118, y=39)\r\n\r\n# Creates goal retirement age label\r\nLabel(root, text='Goal Retirement Age:', bg='#7FFFD4', font=('courier', 10, 'normal')).place(x=8, y=69)\r\n\r\n# Accepts retirement age input\r\nretirementAgeInput = StringVar()\r\nretirementAgeInput = Entry(root, textvariable=retirementAgeInput)\r\nretirementAgeInput.place(x=188, y=69)\r\n\r\n# Creates gross income label\r\nLabel(root, text='Annual Gross Income:', bg='#7FFFD4', font=('courier', 10, 'normal')).place(x=8, y=99)\r\n\r\n# Accepts gross income input\r\ngrossIncomeInput = StringVar()\r\ngrossIncomeInput = Entry(root, textvariable=grossIncomeInput)\r\ngrossIncomeInput.place(x=188, y=99)\r\n\r\n# Creates total amount in savings label\r\nLabel(root, text='Total Amount In Savings:', bg='#7FFFD4', font=('courier', 10, 'normal')).place(x=8, y=129)\r\n\r\n# accepts savings input\r\nsavingsInput = StringVar()\r\nsavingsInput = Entry(root, textvariable=savingsInput)\r\nsavingsInput.place(x=218, y=129)\r\n\r\n# creates annual expenses label\r\nLabel(root, text='Average Annual Expenses:', bg='#7FFFD4', font=('courier', 10, 'normal')).place(x=8, y=159)\r\n\r\n# accepts annual expenses text entry\r\nexpensesInput = StringVar()\r\nexpensesInput = Entry(root, textvariable=expensesInput)\r\nexpensesInput.place(x=218, y=159)\r\n\r\n# creates results label\r\nLabel(root, text='Results:', bg='#7FFFD4', font=('courier', 12, 'normal')).place(x=8, y=261)\r\n\r\n# text box for displaying results\r\nresults = Text(root, height=7, width=20)\r\nresults.place(x=98, y=269)\r\n\r\n# creates results button, calls resultsButton() function\r\nButton(root, text='Display My Results!', bg='#FAEBD7', font=('courier', 12, 'normal'),\r\n command=resultsButton).place(x=8, y=199)\r\n\r\nroot.mainloop()\r\n","sub_path":"retirementReport.py","file_name":"retirementReport.py","file_ext":"py","file_size_in_byte":4095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"295916548","text":"# -*- coding: utf-8 -*-\n\"\"\"Implementation of the ``variant_calling`` step\n\nThe ``variant_calling`` step takes as the input the results of the ``ngs_mapping`` step\n(aligned reads in BAM format) and performs germline variant calling. The result are variant files\nwith germline variants (bgzip-ed and indexed VCF files).\n\nUsually, the variant calling step is followed by the ``variant_annotation`` step.\n\n==========\nStability\n==========\n\nThe HaplotypeCaller and UnifiedGenotyper from the Genome Analysis Toolkit (GATK) are considered\nstable.\n\nThe other supported callers are still in the experimental stage and may not be stable.\n\n==========\nStep Input\n==========\n\nThe variant calling step uses Snakemake sub workflows for using the result of the ``ngs_mapping``\nstep.\n\n===========\nStep Output\n===========\n\nFor all pedigrees, variant calling will be performed on the primary DNA NGS libraries of all\nmembers, separately for each configured read mapper and variant caller. The name of the primary\nDNA NGS library of the index will be used as an identification token in the output file. For each\nread mapper, variant caller, and pedigree, the following files will be generated:\n\n- ``{mapper}.{var_caller}.{lib_name}.vcf.gz``\n- ``{mapper}.{var_caller}.{lib_name}.vcf.gz.tbi``\n- ``{mapper}.{var_caller}.{lib_name}.vcf.gz.md5``\n- ``{mapper}.{var_caller}.{lib_name}.vcf.gz.tbi.md5``\n\nFor example, it might look as follows for the example from above:\n\n::\n\n output/\n +-- bwa.freebayes.P001-N1-DNA1-WES1\n | `-- out\n | |-- bwa.freebayes.P001-N1-DNA1-WES1.vcf.gz\n | |-- bwa.freebayes.P001-N1-DNA1-WES1.vcf.gz.tbi\n | |-- bwa.freebayes.P001-N1-DNA1-WES1.vcf.gz.md5\n | `-- bwa.freebayes.P001-N1-DNA1-WES1.vcf.gz.tbi.md5\n [...]\n\nGenerally, these files will be unfiltered, i.e., contain low-quality variants.\n\n====================\nGlobal Configuration\n====================\n\n- If GATK HaplotypeCaller or GATK UnifiedGenotyper are activated then\n ``static_data_config/dbsnp/path`` must be properly configured\n- ``static_data_config/reference/path`` must be set appropriately\n\n=====================\nDefault Configuration\n=====================\n\nThe default configuration is as follows.\n\n.. include:: DEFAULT_CONFIG_variant_calling.rst\n\n==================================\nAvailable Germline Variant Callers\n==================================\n\nThe following germline variant callers are currently available\n\n- ``\"bcftools\"`` -- samtools mpileup plus bcftools\n- ``\"freebayes\"``\n- ``\"gatk_hc\"`` -- GATK HaplotypeCaller\n- ``\"gatk_hc_gvcf\"`` -- GATK HaplotypeCaller via GVCF files\n- ``\"gatk_ug\"`` -- GATK UnifiedGenotyper\n- ``\"platypus\"``\n\n=======\nReports\n=======\n\nCurrently, the following reports are generated (and are linked from the output directory):\n\n- bcftools_stats (txt) is always generated by default.\n Within this file, stats are broken down separately for known and novel events.\n Report contents depend on the version of bcftools used. With version 1.3.1 the report includes\n the following details:\n\n - SN, Summary numbers\n - TSTV, Transitions/transversions\n - SiS, Singleton stats\n - AF, Stats by non-reference allele frequency\n - QUAL, Stats by quality\n - IDD, InDel distribution\n - ST, Substitution types\n - DP, Depth distribution\n - PSC, Per-sample counts\n - PSI, Per-Sample indels\n - HWE, Hardy-Weinberg equilibrium\n\n- jannovar_statistics (txt) is always generated by default.\n Requires jannovar_statistics/path_ser to be set to a \".ser\" file.\n Using jannovar-cli and htslib version 1.3.2 the report includes the following details:\n\n - putative_impacts (counts by type)\n - variant_effects (counts by type)\n - genome_regions (counts by type)\n - ts_tv_count (count TS, count TV)\n - alt_allele_count (counts for each number of alleles)\n - filter_count (counts by type, if any)\n - is_filtered_count (count passed and failed, if filtering is used)\n - contig_counts (count events per chromosome)\n\n.. _variant_calling_parallel_execution:\n\n==================\nParallel Execution\n==================\n\nFor many of the variant callers, cluster-parallel execution has been implemented (indicated by\nhaving a ``drmaa_snippet`` configuration setting). Here, a temporary directory with a Snakemake\nworkflow is written out and then executed. The default behaviour that the temporary files are\nremoved in the case of an error. This behaviour can be changed by setting the ``keep_tmpdir``\nsetting to ``\"onerror\"`` or ``\"always\"``. Further, for debugging, the number of windows to\ncreate can be limited using ``debug_trunc_tokens`` (the default of ``0``) leads to the processing\nof all windows. Resource requirements in terms of memory or running time can be boosted using\n``job_mult_memory`` and ``job_mult_time`` (similarly for the joining step and ``merge_mult_*``).\n\nWhen the temporary directory is kept, a failed execution can be restarted by calling ``snakemake``\nin the temporary directory with the command line written to the file ``snakemake_call.sh``.\n\"\"\"\n\nfrom collections import OrderedDict\nimport os\nimport os.path\nimport sys\n\nfrom biomedsheets.shortcuts import GermlineCaseSheet, is_not_background\nfrom snakemake.io import expand\n\nfrom snappy_pipeline.utils import dictify, listify\nfrom snappy_pipeline.workflows.abstract import (\n BaseStep,\n BaseStepPart,\n LinkOutStepPart,\n WritePedigreeStepPart,\n)\nfrom snappy_pipeline.workflows.ngs_mapping import NgsMappingWorkflow\nfrom snappy_wrappers.tools.genome_windows import yield_regions\n\n__author__ = \"Manuel Holtgrewe \"\n\n#: Extensions of files to create as main payload\nEXT_VALUES = (\".vcf.gz\", \".vcf.gz.tbi\", \".vcf.gz.md5\", \".vcf.gz.tbi.md5\")\n\n#: Names of the files to create for the extension\nEXT_NAMES = (\"vcf\", \"tbi\", \"vcf_md5\", \"tbi_md5\")\n\n#: Available germline variant callers\nVARIANT_CALLERS = (\n \"bcftools\",\n \"freebayes\",\n \"gatk_hc\",\n \"gatk_hc_gvcf\",\n \"gatk_ug\",\n \"platypus\",\n \"varscan\",\n)\n\n#: Callers that support cohort-wide calling.\nCOHORT_WIDE_CALLERS = (\"gatk_hc_gvcf\", \"varscan\")\n\n#: Default configuration for the variant_calling step\nDEFAULT_CONFIG = r\"\"\"\n# Default configuration variant_calling\nstep_config:\n variant_calling:\n drmaa_snippet: '' # default, you can override by step below\n path_ngs_mapping: ../ngs_mapping # REQUIRED\n tools: ['gatk_ug']\n jannovar_statistics:\n path_ser: REQUIRED # REQUIRED\n platypus:\n num_threads: 16\n ignore_chroms: # patterns of chromosome names to ignore\n - NC_007605 # herpes virus\n - hs37d5 # GRCh37 decoy\n - chrEBV # Eppstein-Barr Virus\n - '*_decoy' # decoy contig\n - 'HLA-*' # HLA genes\n bcftools:\n max_depth: 4000\n max_indel_depth: 4000\n window_length: 10000000\n num_threads: 16\n ignore_chroms: # patterns of chromosome names to ignore\n - NC_007605 # herpes virus\n - hs37d5 # GRCh37 decoy\n - chrEBV # Eppstein-Barr Virus\n - '*_decoy' # decoy contig\n - 'HLA-*' # HLA genes\n freebayes:\n use_standard_filters: true\n window_length: 10000000\n num_threads: 16\n ignore_chroms: # patterns of chromosome names to ignore\n - NC_007605 # herpes virus\n - hs37d5 # GRCh37 decoy\n - chrEBV # Eppstein-Barr Virus\n - '*_decoy' # decoy contig\n - 'HLA-*' # HLA genes\n gatk_hc:\n # Parallelization configuration\n drmaa_snippet: '' # value to pass in as additional DRMAA arguments\n num_threads: 2 # number of cores to use locally\n window_length: 5000000 # split input into windows of this size, each triggers a job\n num_jobs: 500 # number of windows to process in parallel\n use_drmaa: true # use DRMAA for parallel processing\n restart_times: 0 # number of times to re-launch jobs in case of failure\n max_jobs_per_second: 10 # throttling of job creation\n max_status_checks_per_second: 10 # throttling of status jobs\n debug_trunc_tokens: 0 # truncation to first N tokens (0 for none)\n keep_tmpdir: never # keep temporary directory, {always, never, onerror}\n job_mult_memory: 1 # memory multiplier\n job_mult_time: 1 # running time multiplier\n merge_mult_memory: 1 # memory multiplier for merging\n merge_mult_time: 1 # running time multiplier for merging\n ignore_chroms: # patterns of chromosome names to ignore\n - NC_007605 # herpes virus\n - hs37d5 # GRCh37 decoy\n - chrEBV # Eppstein-Barr Virus\n - '*_decoy' # decoy contig\n - 'HLA-*' # HLA genes\n # GATK HC--specific configuration\n allow_seq_dict_incompatibility: false\n annotations:\n - BaseQualityRankSumTest\n - FisherStrand\n - GCContent\n - HaplotypeScore\n - HomopolymerRun\n - MappingQualityRankSumTest\n - MappingQualityZero\n - QualByDepth\n - ReadPosRankSumTest\n - RMSMappingQuality\n - DepthPerAlleleBySample\n - Coverage\n - ClippingRankSumTest\n - DepthPerSampleHC\n gatk_hc_gvcf:\n # Enable cohort-wide calling\n cohort_wide: true\n # Enable pedigree-wise calling\n pedigree_wise: true\n # Parallelization configuration\n drmaa_snippet: '' # value to pass in as additional DRMAA arguments\n num_threads: 2 # number of cores to use locally\n window_length: 5000000 # split input into windows of this size, each triggers a job\n num_jobs: 500 # number of windows to process in parallel\n num_jobs_combine_gvcf_cort: 0\n num_jobs_genotype_cohort: 0\n use_drmaa: true # use DRMAA for parallel processing\n restart_times: 10 # number of times to re-launch jobs in case of failure\n max_jobs_per_second: 10 # throttling of job creation\n max_status_checks_per_second: 10 # throttling of status jobs\n ignore_chroms: # patterns of chromosome names to ignore\n - NC_007605 # herpes virus\n - hs37d5 # GRCh37 decoy\n - chrEBV # Eppstein-Barr Virus\n - '*_decoy' # decoy contig\n - 'HLA-*' # HLA genes\n # GATK HC--specific configuration\n allow_seq_dict_incompatibility: false\n annotations:\n - BaseQualityRankSumTest\n - FisherStrand\n - GCContent\n - HaplotypeScore\n - HomopolymerRun\n - MappingQualityRankSumTest\n - MappingQualityZero\n - QualByDepth\n - ReadPosRankSumTest\n - RMSMappingQuality\n - DepthPerAlleleBySample\n - Coverage\n - ClippingRankSumTest\n - DepthPerSampleHC\n gatk_ug:\n # Parallelization configuration\n drmaa_snippet: '' # value to pass in as additional DRMAA arguments\n num_threads: 2 # number of cores to use locally\n window_length: 5000000 # split input into windows of this size, each triggers a job\n num_jobs: 500 # number of windows to process in parallel\n use_drmaa: true # use DRMAA for parallel processing\n restart_times: 0 # number of times to re-launch jobs in case of failure\n max_jobs_per_second: 10 # throttling of job creation\n max_status_checks_per_second: 10 # throttling of status jobs\n debug_trunc_tokens: 0 # truncation to first N tokens (0 for none)\n keep_tmpdir: never # keep temporary directory, {always, never, onerror}\n job_mult_memory: 1 # memory multiplier\n job_mult_time: 1 # running time multiplier\n merge_mult_memory: 1 # memory multiplier for merging\n merge_mult_time: 1 # running time multiplier for merging\n ignore_chroms: # patterns of chromosome names to ignore\n - NC_007605 # herpes virus\n - hs37d5 # GRCh37 decoy\n - chrEBV # Eppstein-Barr Virus\n - '*_decoy' # decoy contig\n - 'HLA-*' # HLA genes\n # GATK UG--specific configuration\n allow_seq_dict_incompatibility: false\n downsample_to_coverage: 250\n annotations:\n - BaseQualityRankSumTest\n - FisherStrand\n - GCContent\n - HaplotypeScore\n - HomopolymerRun\n - MappingQualityRankSumTest\n - MappingQualityZero\n - QualByDepth\n - ReadPosRankSumTest\n - RMSMappingQuality\n - DepthPerAlleleBySample\n - Coverage\n - ClippingRankSumTest\n - DepthPerSampleHC\n # Configuration for cohort-wide variant calling using Varscan.\n varscan:\n # TODO: only cohort wide used and tested so far, settings should probably\n # be switched around\n # Enable cohort-wide calling\n cohort_wide: true\n # Enable pedigree-wise calling\n pedigree_wise: false\n # Divisor for window length in case of cohort-wide\n cohort_window_divisor: 50\n # Parallelization configuration\n drmaa_snippet: '' # value to pass in as additional DRMAA arguments\n num_threads: 2 # number of cores to use locally\n window_length: 5000000 # split input into windows of this size, each triggers a job\n num_jobs: 500 # number of windows to process in parallel\n use_drmaa: true # use drmaa for parallel processing\n restart_times: 0 # number of times to re-launch jobs in case of failure\n max_jobs_per_second: 10 # throttling of job creation\n max_status_checks_per_second: 10 # throttling of status jobs\n debug_trunc_tokens: 0 # truncation to first N tokens (0 for none)\n keep_tmpdir: never # keep temporary directory, {always, never, onerror}\n job_mult_memory: 1 # memory multiplier\n job_mult_time: 1 # running time multiplier\n merge_mult_memory: 1 # memory multiplier for merging\n merge_mult_time: 1 # running time multiplier for merging\n ignore_chroms: # patterns of chromosome names to ignore\n - nc_007605 # herpes virus\n - hs37d5 # grch37 decoy\n - chrebv # eppstein-barr virus\n - '*_decoy' # decoy contig\n - 'hla-*' # hla genes\n # Configuration for samtools mpileup\n max_depth: 4000\n max_indel_depth: 4000\n min_bq: 13\n no_baq: True\n # Configuration for Varscan\n min_coverage: 8\n min_reads2: 2\n min_avg_qual: 15\n min_var_freq: 0.01\n min_freq_for_hom: 0.75\n p_value: 99e-02\n\"\"\"\n\n\nclass VariantCallingStepPart(BaseStepPart):\n \"\"\"Base class for germline variant calling step parts\n\n Variant calling is performed on a per-pedigree level. The (one) index individual is used\n for naming the output file.\n \"\"\"\n\n def __init__(self, parent):\n super().__init__(parent)\n self.base_path_out = (\n \"work/{{mapper}}.{var_caller}.{{index_library_name}}/out/\"\n \"{{mapper}}.{var_caller}.{{index_library_name}}{ext}\"\n )\n self.base_path_tmp = self.base_path_out.replace(\"/out/\", \"/tmp/\")\n # Build shortcut from index library name to pedigree\n self.index_ngs_library_to_pedigree = OrderedDict()\n for sheet in self.parent.shortcut_sheets:\n self.index_ngs_library_to_pedigree.update(sheet.index_ngs_library_to_pedigree)\n\n def get_input_files(self, action):\n @listify\n def input_function(wildcards):\n \"\"\"Helper wrapper function\"\"\"\n # TODO: Actually, the structure should be {'ped': ..., 'bam': [], 'bai': []}\n # Get shorcut to NGS mapping sub workflow\n ngs_mapping = self.parent.sub_workflows[\"ngs_mapping\"]\n # Get names of primary libraries of the selected pedigree. The pedigree is selected\n # by the primary DNA NGS library of the index.\n pedigree = self.index_ngs_library_to_pedigree[wildcards.index_library_name]\n if not pedigree.index or not pedigree.index.dna_ngs_library:\n msg = \"INFO: pedigree without index (names: {})\" # pragma: no cover\n donor_names = list(sorted(d.name for d in pedigree.donors))\n print(msg.format(donor_names), file=sys.stderr) # pragma: no cover\n return\n if pedigree.index.dna_ngs_library:\n yield \"work/write_pedigree.{library_name}/out/{library_name}.ped\".format(\n library_name=pedigree.index.dna_ngs_library.name, **wildcards\n )\n for donor in pedigree.donors:\n if donor.dna_ngs_library:\n for ext in (\".bam\", \".bam.bai\"):\n tpl = \"output/{mapper}.{library_name}/out/{mapper}.{library_name}{ext}\"\n yield ngs_mapping(\n tpl.format(\n library_name=donor.dna_ngs_library.name, ext=ext, **wildcards\n )\n )\n\n assert action == \"run\", \"Unsupported actions\"\n return input_function\n\n def get_output_files(self, action):\n \"\"\"Return output files that all germline variant calling sub steps must\n return (VCF + TBI file)\n \"\"\"\n assert action == \"run\"\n return dict(\n zip(EXT_NAMES, expand(self.base_path_out, var_caller=[self.name], ext=EXT_VALUES))\n )\n\n @dictify\n def _get_log_file(self, action):\n \"\"\"Return dict of log files.\"\"\"\n prefix = (\n \"work/{{mapper}}.{caller}.{{index_library_name}}/log/\"\n \"{{mapper}}.{caller}.{{index_library_name}}\"\n ).format(caller=self.__class__.name)\n key_ext = (\n (\"log\", \".log\"),\n (\"conda_info\", \".conda_info.txt\"),\n (\"conda_list\", \".conda_list.txt\"),\n )\n for key, ext in key_ext:\n yield key, prefix + ext\n\n\nclass BcftoolsStepPart(VariantCallingStepPart):\n \"\"\"Germline variant calling with bcftools\"\"\"\n\n name = \"bcftools\"\n\n def update_cluster_config(self, cluster_config):\n cluster_config[\"variant_calling_bcftools_run\"] = {\n \"mem\": int(3.75 * 1024 * 16),\n \"time\": \"48:00\",\n \"ntasks\": 16,\n }\n\n\nclass FreebayesStepPart(VariantCallingStepPart):\n \"\"\"Germline variant calling with freebayes\"\"\"\n\n name = \"freebayes\"\n\n def update_cluster_config(self, cluster_config):\n cluster_config[\"variant_calling_freebayes_run\"] = {\n \"mem\": int(3.75 * 1024 * 16),\n \"time\": \"48:00\",\n \"ntasks\": 16,\n }\n\n\nclass GatkCallerStepPartBase(VariantCallingStepPart):\n \"\"\"Germlin variant calling with GATK caller\"\"\"\n\n def check_config(self):\n if self.__class__.name not in self.config[\"tools\"]:\n return # caller not enabled, skip # pragma: no cover\n self.parent.ensure_w_config(\n (\"static_data_config\", \"dbsnp\", \"path\"),\n \"dbSNP not configured but required for {}\".format(self.__class__.name),\n )\n\n def update_cluster_config(self, cluster_config):\n cluster_config[\"variant_calling_{}_run\".format(self.__class__.name)] = {\n \"mem\": 14 * 1024,\n \"time\": \"80:00\",\n \"ntasks\": 1,\n }\n\n\nclass GatkHaplotypeCallerStepPart(GatkCallerStepPartBase):\n \"\"\"Germline variant calling with GATK HaplotypeCaller\"\"\"\n\n name = \"gatk_hc\"\n\n\nclass GatkUnifiedGenotyperStepPart(GatkCallerStepPartBase):\n \"\"\"Germline variant calling with GATK UnifiedGenotyper\"\"\"\n\n name = \"gatk_ug\"\n\n\nclass PlatypusStepPart(VariantCallingStepPart):\n \"\"\"Germline variant calling with Platypus\"\"\"\n\n name = \"platypus\"\n\n def update_cluster_config(self, cluster_config):\n cluster_config[\"variant_calling_platypus_run\"] = {\n \"mem\": int(3.75 * 1024 * 16),\n \"time\": \"20:00\",\n \"ntasks\": 16,\n }\n\n\nclass GatkHaplotypeCallerGvcfStepPart(BaseStepPart):\n \"\"\"Base class for germline variant calling step parts\n\n Variant calling is performed on a per-pedigree level. The (one) index individual is used\n for naming the output file.\n \"\"\"\n\n name = \"gatk_hc_gvcf\"\n\n #: Actions in GATK HC GVCF workflow\n actions = (\"discover\", \"genotype_pedigree\", \"combine_gvcf\", \"genotype_cohort\")\n\n #: Directory infixes\n dir_infixes = {\n \"discover\": \"{mapper}.gatk_hc_gvcf.discover.{library_name}\",\n \"genotype_pedigree\": \"{mapper}.gatk_hc_gvcf.{index_library_name,[^\\.]+}\",\n \"combine_gvcf\": \"{mapper}.gatk_hc_gvcf.combine_gvcf\",\n \"genotype_cohort\": \"{mapper}.gatk_hc_gvcf.whole_cohort\",\n }\n\n def __init__(self, parent):\n super().__init__(parent)\n self.base_path_out = (\n \"work/{{mapper}}.{var_caller}.{{ngs_library}}/out/\"\n \"{{mapper}}.{var_caller}.{{ngs_library}}{ext}\"\n )\n self.base_path_tmp = self.base_path_out.replace(\"/out/\", \"/tmp/\")\n # Build shortcut from index library name to pedigree\n self.index_ngs_library_to_pedigree = OrderedDict()\n for sheet in self.parent.shortcut_sheets:\n self.index_ngs_library_to_pedigree.update(sheet.index_ngs_library_to_pedigree)\n\n def get_input_files(self, action):\n \"\"\"Return appropriate input function for the given action\"\"\"\n assert action in self.actions\n mapping = {\n \"discover\": self._get_input_files_discover,\n \"genotype_pedigree\": self._get_input_files_genotype_pedigree,\n \"combine_gvcf\": self._get_input_files_combine_gvcf,\n \"genotype_cohort\": self._get_input_files_genotype_cohort,\n }\n return mapping[action]\n\n @listify\n def _get_input_files_discover(self, wildcards):\n \"\"\"Return input files for \"discover\" action\"\"\"\n ngs_mapping = self.parent.sub_workflows[\"ngs_mapping\"]\n for ext in (\".bam\", \".bam.bai\"):\n tpl = \"output/{mapper}.{library_name}/out/{mapper}.{library_name}{ext}\"\n yield ngs_mapping(tpl.format(ext=ext, **wildcards))\n\n @listify\n def _get_input_files_genotype_pedigree(self, wildcards):\n \"\"\"Return input files for \"genotype_pedigree\" action\"\"\"\n pedigree = self.index_ngs_library_to_pedigree[wildcards.index_library_name]\n for donor in pedigree.donors:\n paths = self.get_output_files(\"discover\")\n yield paths[\"vcf\"].format(library_name=donor.dna_ngs_library.name, **wildcards)\n\n @listify\n def _get_input_files_combine_gvcf(self, wildcards):\n \"\"\"Return input files for \"combine_gvcf\" action\"\"\"\n for sheet in self.parent.shortcut_sheets:\n for donor in sheet.donors:\n paths = self.get_output_files(\"discover\")\n yield paths[\"vcf\"].format(library_name=donor.dna_ngs_library.name, **wildcards)\n\n @listify\n def _get_input_files_genotype_cohort(self, wildcards):\n \"\"\"Return input files for \"genotype_cohort\" action\"\"\"\n for path in self.get_output_files(\"combine_gvcf\")[\"vcf\"]:\n yield path.format(**wildcards)\n\n def get_args(self, action):\n \"\"\"Return function that maps wildcards to dict for input files\"\"\"\n assert action == \"combine_gvcf\", \"Unsupported actions\"\n return {\"genome_regions\": self._get_args_gvcf_regions()}\n\n def _get_args_gvcf_regions(self):\n \"\"\"Return list of regions to use for GVCF parallelization\n\n Returns ``OrderedDict`` with grouping by chromsome name.\n \"\"\"\n fai_path = self.w_config[\"static_data_config\"][\"reference\"][\"path\"] + \".fai\"\n window_length = max(\n 10 * 1000 * 1000, self.parent.config[\"gatk_hc_gvcf\"][\"window_length\"] // 10\n )\n ignore_chroms = self.parent.config[\"gatk_hc_gvcf\"][\"ignore_chroms\"]\n result = OrderedDict()\n with open(fai_path, \"rt\") as fai_file:\n for region in yield_regions(fai_file, window_length, ignore_chroms=ignore_chroms):\n # Note that we have to convert GenomeRegion to dict here via vars\n result.setdefault(region.chrom, []).append(vars(region))\n return result\n\n @dictify\n def get_output_files(self, action):\n \"\"\"Return output files that all germline variant calling sub steps must\n return (VCF + TBI file)\n \"\"\"\n assert action in self.actions\n if action != \"combine_gvcf\":\n for name, ext in {\"vcf\": \".vcf.gz\", \"tbi\": \".vcf.gz.tbi\"}.items():\n if action == \"discover\":\n ext = \".g\" + ext\n infix = self.dir_infixes[action].replace(r\",[^\\.]+\", \"\")\n yield name, \"work/\" + infix + \"/out/\" + infix + ext\n yield name + \"_md5\", \"work/\" + infix + \"/out/\" + infix + ext + \".md5\"\n else:\n result = OrderedDict()\n for chrom, _ in self._get_args_gvcf_regions().items():\n chrom = chrom.replace(\".\", \"_\")\n for name, ext in {\"vcf\": \".g.vcf.gz\", \"tbi\": \".g.vcf.gz.tbi\"}.items():\n infix = self.dir_infixes[action].replace(r\",[^\\.]+\", \"\")\n result.setdefault(name, []).append(\n \"work/\" + infix + \"/out/\" + infix + \".\" + chrom + ext\n )\n result.setdefault(name + \"_md5\", []).append(\n \"work/\" + infix + \"/out/\" + infix + \".\" + chrom + ext + \".md5\"\n )\n yield from result.items()\n\n def get_log_file(self, action):\n assert action in self.actions\n infix = self.dir_infixes[action].replace(r\",[^\\.]+\", \"\")\n return \"work/\" + infix + \"/log/snakemake.log\"\n\n def update_cluster_config(self, cluster_config):\n for action in self.actions:\n if action == \"combine_gvcf\":\n cluster_config[\"variant_calling_gatk_hc_gvcf_{action}\".format(action=action)] = {\n \"mem\": 10 * 1024,\n \"time\": \"240:00\",\n \"ntasks\": 1,\n }\n else:\n cluster_config[\"variant_calling_gatk_hc_gvcf_{action}\".format(action=action)] = {\n \"mem\": 10 * 1024,\n \"time\": \"80:00\",\n \"ntasks\": 1,\n }\n\n\nclass VarscanStepPart(BaseStepPart):\n \"\"\"Variant calling using Varscan.\n\n Variants are called in a whole-pedigree or whole-cohort fashion.\n \"\"\"\n\n name = \"varscan\"\n\n #: Actions in GATK HC GVCF workflow\n actions = (\"call_pedigree\", \"call_cohort\")\n\n #: Directory infixes\n dir_infixes = {\n \"call_pedigree\": \"{mapper}.varscan.{index_library_name,[^\\.]+}\",\n \"call_cohort\": \"{mapper}.varscan.whole_cohort\",\n }\n\n def __init__(self, parent):\n super().__init__(parent)\n self.base_path_out = (\n \"work/{{mapper}}.{var_caller}.{{ngs_library}}/out/\"\n \"{{mapper}}.{var_caller}.{{ngs_library}}{ext}\"\n )\n self.base_path_tmp = self.base_path_out.replace(\"/out/\", \"/tmp/\")\n # Build shortcut from index library name to pedigree\n self.index_ngs_library_to_pedigree = OrderedDict()\n for sheet in self.parent.shortcut_sheets:\n self.index_ngs_library_to_pedigree.update(sheet.index_ngs_library_to_pedigree)\n\n def get_input_files(self, action):\n \"\"\"Return appropriate input function for the given action\"\"\"\n assert action in self.actions\n mapping = {\n \"call_pedigree\": self._get_input_files_call_pedigree,\n \"call_cohort\": self._get_input_files_call_cohort,\n }\n return mapping[action]\n\n @listify\n def _get_input_files_call_pedigree(self, wildcards):\n \"\"\"Return input files for \"call_pedigree\" action\"\"\"\n ngs_mapping = self.parent.sub_workflows[\"ngs_mapping\"]\n pedigree = self.index_ngs_library_to_pedigree[wildcards.index_library_name]\n for donor in pedigree.donors:\n for ext in (\".bam\", \".bam.bai\"):\n tpl = \"output/{mapper}.{library_name}/out/{mapper}.{library_name}{ext}\"\n library_name = donor.dna_ngs_library.name\n yield ngs_mapping(tpl.format(ext=ext, library_name=library_name, **wildcards))\n\n @listify\n def _get_input_files_call_cohort(self, wildcards):\n \"\"\"Return input files for \"combine_gvcf\" action\"\"\"\n ngs_mapping = self.parent.sub_workflows[\"ngs_mapping\"]\n for library_name in sorted(self.index_ngs_library_to_pedigree.keys()):\n for ext in (\".bam\", \".bam.bai\"):\n tpl = \"output/{mapper}.{library_name}/out/{mapper}.{library_name}{ext}\"\n yield ngs_mapping(tpl.format(ext=ext, library_name=library_name, **wildcards))\n\n @dictify\n def get_output_files(self, action):\n \"\"\"Return output files that all germline variant calling sub steps must\n return (VCF + TBI file)\n \"\"\"\n assert action in self.actions\n for name, ext in {\"vcf\": \".vcf.gz\", \"tbi\": \".vcf.gz.tbi\"}.items():\n infix = self.dir_infixes[action].replace(r\",[^\\.]+\", \"\")\n yield name, \"work/\" + infix + \"/out/\" + infix + ext\n yield name + \"_md5\", \"work/\" + infix + \"/out/\" + infix + ext + \".md5\"\n\n @dictify\n def _get_log_file(self, action):\n \"\"\"Return dict of log files.\"\"\"\n infix = self.dir_infixes[action].replace(r\",[^\\.]+\", \"\")\n prefix = os.path.join(\"work\", infix, \"log\", infix)\n key_ext = (\n (\"log\", \".log\"),\n (\"conda_info\", \".conda_info.txt\"),\n (\"conda_list\", \".conda_list.txt\"),\n )\n for key, ext in key_ext:\n yield key, prefix + ext\n\n def update_cluster_config(self, cluster_config):\n for action in self.actions:\n if action == \"call_pedigree\":\n cluster_config[\"variant_calling_varscan_call_pedigree\".format(action=action)] = {\n \"mem\": 4 * 1024,\n \"time\": \"168:00\",\n \"ntasks\": 1,\n }\n else:\n cluster_config[\"variant_calling_varscan_call_cohort\".format(action=action)] = {\n \"mem\": 16 * 1024,\n \"time\": \"168:00\",\n \"ntasks\": 1,\n }\n\n\nclass BcftoolsStatsStepPart(BaseStepPart):\n \"\"\"Base class for VCF statistics computation with \"bcftools stats\"\n\n Statistics are computed overall and per-sample\n \"\"\"\n\n # TODO: maybe we need to use \"--stats\" anyway and can handle pedigree VCF files then...\n\n name = \"bcftools_stats\"\n\n def __init__(self, parent):\n super().__init__(parent)\n self.base_path_out = (\n \"work/{mapper}.{var_caller}.{index_ngs_library}/report/bcftools_stats/\"\n \"{mapper}.{var_caller}.{index_ngs_library}.{donor_ngs_library}\"\n )\n # Build shortcut from index library name to pedigree\n self.index_ngs_library_to_pedigree = OrderedDict()\n for sheet in self.parent.shortcut_sheets:\n self.index_ngs_library_to_pedigree.update(sheet.index_ngs_library_to_pedigree)\n\n @dictify\n def get_input_files(self, action):\n \"\"\"Return path to input files\"\"\"\n assert action == \"run\", \"Unsupported actions\"\n # Return path to input VCF file\n yield \"vcf\", (\n \"work/{mapper}.{var_caller}.{index_ngs_library}/out/\"\n \"{mapper}.{var_caller}.{index_ngs_library}.vcf.gz\"\n )\n\n @dictify\n def get_output_files(self, action):\n \"\"\"Return output files that all germline variant calling sub steps must return (VCF +\n TBI file)\n \"\"\"\n assert action == \"run\"\n EXT_NAMES = {\"txt\": \".txt\", \"txt_md5\": \".txt.md5\"}\n for key, ext in EXT_NAMES.items():\n yield key, self.base_path_out + ext\n\n def get_log_file(self, action):\n assert action == \"run\"\n return (\n \"work/{mapper}.{var_caller}.{index_ngs_library}/log/bcftools_stats/\"\n \"{mapper}.{var_caller}.{index_ngs_library}.{donor_ngs_library}.log\"\n )\n\n def update_cluster_config(self, cluster_config):\n cluster_config[\"variant_calling_bcftools_stats_report\"] = {\n \"mem\": 1024,\n \"time\": \"02:00\",\n \"ntasks\": 1,\n }\n\n\nclass JannovarStatisticsStepPart(BaseStepPart):\n \"\"\"Base class for VCF statistics computation with \"jannovar statistics\"\n\n Statistics are computed overall and per-sample\n \"\"\"\n\n name = \"jannovar_statistics\"\n\n def __init__(self, parent):\n super().__init__(parent)\n self.base_path_out = (\n \"work/{mapper}.{var_caller}.{index_ngs_library}/report/jannovar_statistics/\"\n \"{mapper}.{var_caller}.{index_ngs_library}\"\n )\n # Build shortcut from index library name to pedigree\n self.index_ngs_library_to_pedigree = OrderedDict()\n for sheet in self.parent.shortcut_sheets:\n self.index_ngs_library_to_pedigree.update(sheet.index_ngs_library_to_pedigree)\n\n @dictify\n def get_input_files(self, action):\n \"\"\"Return path to input files\"\"\"\n assert action == \"run\", \"Unsupported actions\"\n # Return path to input VCF file\n yield \"vcf\", (\n \"work/{mapper}.{var_caller}.{index_ngs_library}/out/\"\n \"{mapper}.{var_caller}.{index_ngs_library}.vcf.gz\"\n )\n\n @dictify\n def get_output_files(self, action):\n \"\"\"Return output files that all germline variant calling sub steps must return (VCF +\n TBI file)\n \"\"\"\n assert action == \"run\"\n EXT_NAMES = {\"report\": \".txt\", \"report_md5\": \".txt.md5\"}\n for key, ext in EXT_NAMES.items():\n yield key, self.base_path_out + ext\n\n def get_log_file(self, action):\n assert action == \"run\"\n return (\n \"work/{mapper}.{var_caller}.{index_ngs_library}/log/jannovar_statistics/\"\n \"{mapper}.{var_caller}.{index_ngs_library}.log\"\n )\n\n def update_cluster_config(self, cluster_config):\n cluster_config[\"variant_calling_jannovar_statistics_report\"] = {\n \"mem\": int(3.75 * 1024 * 2),\n \"time\": \"04:00\",\n \"ntasks\": 2,\n }\n\n\nclass VariantCallingWorkflow(BaseStep):\n \"\"\"Perform germline variant calling\"\"\"\n\n name = \"variant_calling\"\n sheet_shortcut_class = GermlineCaseSheet\n\n @classmethod\n def default_config_yaml(cls):\n \"\"\"Return default config YAML, to be overwritten by project-specific one\"\"\"\n return DEFAULT_CONFIG\n\n def __init__(\n self, workflow, config, cluster_config, config_lookup_paths, config_paths, workdir\n ):\n super().__init__(\n workflow,\n config,\n cluster_config,\n config_lookup_paths,\n config_paths,\n workdir,\n (NgsMappingWorkflow,),\n )\n # Register sub step classes so the sub steps are available\n self.register_sub_step_classes(\n (\n WritePedigreeStepPart,\n BcftoolsStepPart,\n FreebayesStepPart,\n GatkHaplotypeCallerStepPart,\n GatkHaplotypeCallerGvcfStepPart,\n GatkUnifiedGenotyperStepPart,\n PlatypusStepPart,\n VarscanStepPart,\n BcftoolsStatsStepPart,\n JannovarStatisticsStepPart,\n LinkOutStepPart,\n )\n )\n # Register sub workflows\n self.register_sub_workflow(\"ngs_mapping\", self.config[\"path_ngs_mapping\"])\n\n @listify\n def get_result_files(self):\n \"\"\"Return list of result files for the NGS mapping workflow\n\n We will process all primary DNA libraries and perform joint calling within pedigrees\n \"\"\"\n name_pattern = \"{mapper}.{caller}.{index_library.name}\"\n for caller in self.config[\"tools\"]:\n if self.config[caller].get(\"pedigree_wise\", True):\n yield from self._yield_result_files(\n os.path.join(\"output\", name_pattern, \"out\", name_pattern + \"{ext}\"),\n mapper=self.w_config[\"step_config\"][\"ngs_mapping\"][\"tools\"][\"dna\"],\n caller=[caller],\n ext=EXT_VALUES,\n )\n yield from self._yield_result_files(\n os.path.join(\"output\", name_pattern, \"log\", name_pattern + \".{ext}\"),\n mapper=self.w_config[\"step_config\"][\"ngs_mapping\"][\"tools\"][\"dna\"],\n caller=[caller],\n ext=(\n \"log\",\n \"log.md5\",\n \"conda_info.txt\",\n \"conda_info.txt.md5\",\n \"conda_list.txt\",\n \"conda_list.txt.md5\",\n ),\n )\n # Yield result files of whole-cohort genotyping\n for caller in COHORT_WIDE_CALLERS:\n if caller in self.config[\"tools\"] and self.config[caller][\"cohort_wide\"]:\n name_pattern = \"{mapper}.%s.whole_cohort\" % caller\n yield from expand(\n os.path.join(\"output\", name_pattern, \"out\", name_pattern + \"{ext}\"),\n mapper=self.w_config[\"step_config\"][\"ngs_mapping\"][\"tools\"][\"dna\"],\n caller=[caller],\n ext=EXT_VALUES,\n )\n # Yield report files\n yield from self._yield_bcftools_report_files()\n yield from self._yield_jannovar_report_files()\n\n def _yield_result_files(self, tpl, **kwargs):\n \"\"\"Build output paths from path template and extension list\"\"\"\n for sheet in filter(is_not_background, self.shortcut_sheets):\n for pedigree in sheet.cohort.pedigrees:\n if not pedigree.index:\n msg = \"INFO: pedigree without index (names: {})\" # pragma: no cover\n print(\n msg.format( # pragma: no cover\n list(sorted(d.name for d in pedigree.donors))\n ),\n file=sys.stderr,\n )\n continue # pragma: no cover\n elif not pedigree.index.dna_ngs_library: # pragma: no cover\n msg = \"INFO: pedigree index without DNA NGS library (names: {})\"\n print(\n msg.format( # pragma: no cover\n list(sorted(d.name for d in pedigree.donors))\n ),\n file=sys.stderr,\n )\n continue # pragma: no cover\n yield from expand(tpl, index_library=[pedigree.index.dna_ngs_library], **kwargs)\n\n def _yield_bcftools_report_files(self):\n name_pattern = \"{mapper}.{caller}.{index_library.name}\"\n tpl = (\n \"output/\"\n + name_pattern\n + \"/report/bcftools_stats/\"\n + name_pattern\n + \".{donor_library.name}.{ext}\"\n )\n for sheet in filter(is_not_background, self.shortcut_sheets):\n for caller in self.config[\"tools\"]:\n if self.config[caller].get(\"pedigree_wise\", True):\n for pedigree in sheet.cohort.pedigrees:\n if not pedigree.index:\n msg = \"INFO: pedigree without index (names: {})\" # pragma: no cover\n print(\n msg.format( # pragma: no cover\n list(sorted(d.name for d in pedigree.donors))\n ),\n file=sys.stderr,\n )\n continue # pragma: no cover\n elif not pedigree.index.dna_ngs_library: # pragma: no cover\n msg = \"INFO: pedigree index DNA NGS library (names: {})\"\n print(\n msg.format( # pragma: no cover\n list(sorted(d.name for d in pedigree.donors))\n ),\n file=sys.stderr,\n )\n continue # pragma: no cover\n for donor in pedigree.donors:\n if donor.dna_ngs_library:\n yield from expand(\n tpl,\n mapper=self.w_config[\"step_config\"][\"ngs_mapping\"][\"tools\"][\n \"dna\"\n ],\n caller=[caller],\n index_library=[pedigree.index.dna_ngs_library],\n donor_library=[donor.dna_ngs_library],\n ext=[\"txt\", \"txt.md5\"],\n )\n\n def _yield_jannovar_report_files(self):\n name_pattern = \"{mapper}.{caller}.{index_library.name}\"\n tpl = \"output/\" + name_pattern + \"/report/jannovar_statistics/\" + name_pattern + \".{ext}\"\n for sheet in filter(is_not_background, self.shortcut_sheets):\n for caller in self.config[\"tools\"]:\n if self.config[caller].get(\"pedigree_wise\", True):\n for pedigree in sheet.cohort.pedigrees:\n if not pedigree.index:\n msg = \"INFO: pedigree without index (names: {})\" # pragma: no cover\n print(\n msg.format( # pragma: no cover\n list(sorted(d.name for d in pedigree.donors))\n ),\n file=sys.stderr,\n )\n continue # pragma: no cover\n elif not pedigree.index.dna_ngs_library: # pragma: no cover\n msg = \"INFO: pedigree index without DNA NGS library (names: {})\"\n print(\n msg.format( # pragma: no cover\n list(sorted(d.name for d in pedigree.donors))\n ),\n file=sys.stderr,\n )\n continue # pragma: no cover\n yield from expand(\n tpl,\n mapper=self.w_config[\"step_config\"][\"ngs_mapping\"][\"tools\"][\"dna\"],\n caller=[caller],\n index_library=[pedigree.index.dna_ngs_library],\n ext=[\"txt\", \"txt.md5\"],\n )\n # Statistics for whole-cohort files\n for caller in COHORT_WIDE_CALLERS:\n if caller in self.config[\"tools\"] and self.config[caller][\"cohort_wide\"]:\n name_pattern = \"{mapper}.%s.whole_cohort\" % caller\n tpl = (\n \"output/\"\n + name_pattern\n + \"/report/jannovar_statistics/\"\n + name_pattern\n + \".{ext}\"\n )\n yield from expand(\n tpl,\n mapper=self.w_config[\"step_config\"][\"ngs_mapping\"][\"tools\"][\"dna\"],\n ext=[\"txt\", \"txt.md5\"],\n )\n\n def check_config(self):\n \"\"\"Check that the path to the NGS mapping is present\"\"\"\n self.ensure_w_config(\n (\"step_config\", \"variant_calling\", \"path_ngs_mapping\"),\n \"Path to NGS mapping not configured but required for variant calling\",\n )\n self.ensure_w_config(\n (\"static_data_config\", \"reference\", \"path\"),\n \"Path to reference FASTA not configured but required for variant calling\",\n )\n # Check that only valid tools are selected\n selected = set(self.w_config[\"step_config\"][\"variant_calling\"][\"tools\"])\n invalid = selected - set(VARIANT_CALLERS)\n if invalid:\n raise Exception(\n \"Invalid variant callers selected: {}\".format( # pragma: no cover\n list(sorted(invalid))\n )\n )\n","sub_path":"snappy_pipeline/workflows/variant_calling/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":44349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"209483398","text":"#!/usr/bin/python3\n\nfrom anytree import Node\nimport utils\nimport time\nimport requests\nimport json\nimport main\nimport config\nimport api\nimport ratio\n\n\ndef get_total_amount_assets(list_assets, currency) -> float:\n amount = 0\n for (a_id, q) in list_assets:\n curr = 1\n if utils.get_asset_currency_by_id(a_id) != currency:\n curr = utils.rate_tgt_curr(a_id, currency)\n amount += utils.get_last_price_by_id(a_id) * q / curr\n return amount\n\n\n# calcul du rendement sans le poids\n# list_id = liste des id asset\ndef compute_rend_without_weight(list_id: list) -> float:\n r = 0\n w = len(list_id)\n for a_id in list_id:\n r += ratio.get_rendement_by_id(a_id) / w\n\n return r\n\n# calcul du rendement avec le poids \n# list_id = liste de pair de valeur (id_asset, quantity)\ndef compute_rend_with_weight(list_assets: list, currency='EUR') -> float:\n r = 0\n for (a_id, q) in list_assets:\n r += ratio.get_rendement_by_id(a_id) * \\\n (q * utils.get_last_price_by_id(a_id) / \\\n get_total_amount_assets(list_assets, currency))\n\n return r\n\n\nclass Wallet():\n id = 0 # contains the id of the portfolio\n label = 'epita_ptf_6' # portfolio label \n assets = list() # contains pair (assets_id, quantity)\n currency = 'EUR' # currency\n ret = -1 # the return of the wallet\n volatility = -1 # the volatility of the wallet\n cur_tree = None # contains the actual tree\n best_tree = None # contains the tree which gives the best ret for the wallet\n\n\n def __init__(self, id=0, assets=list(), currency='EUR', ret=-1, volatility=-1):\n\n main.print_notif(\"Creation du portefeuille\")\n\n self.id = id \n self.assets = assets\n self.currency = currency\n self.ret = ret\n self.volatility = volatility\n self.cur_tree = None\n self.best_tree = None\n\n if id == 0:\n for a in config.assets:\n if utils.get_asset_type(a) == 'PORTFOLIO':\n self.id = utils.get_asset_id(a)\n if self.id == 0:\n raise RuntimeError(\"Error ID portfolio\")\n\n\n def set_ret(self, ret):\n self.ret = ret\n\n\n def add_asset(self, asset: int, quantity: int):\n if type(asset) == type(dict()):\n asset = utils.get_asset_id(asset)\n self.assets.append((asset, quantity))\n\n\n def rm_asset(self, asset: int):\n for i, (a, q) in enumerate(self.assets, 0):\n if a == asset:\n self.assets.pop(i)\n\n\n def calc_ret(self):\n pass\n\n\n def calc_volatility(self):\n pass\n\n\n def submit_to_server(self):\n list_asset = []\n for (asset, quantity) in self.assets:\n a = {}\n a[\"asset\"] = {\n \"asset\": asset,\n \"quantity\": quantity\n }\n list_asset.append(a)\n \n payload = {\n \"currency\": {\"code\": self.currency},\n \"label\": self.label,\n \"type\": \"front\",\n \"values\": {\"2012-01-02\": list_asset}\n }\n \n main.print_notif(\"Soumission du portefeuille au serveur...\")\n result = requests.put(utils.URL + '/portfolio/' + str(self.id) + \n '/dyn_amount_compo',\n auth=utils.AUTH,\n data=json.dumps(payload))\n if result.status_code != 200:\n print (\"Error submit portfolio: code \" + str(result.status_code))\n return 1\n main.print_notif(\" Portfolio soumit avec succes !\")\n\n return 0\n\n def print(self):\n s = \"\\nPortefeuille \" + self.label + \":\\n\" + 25 * '-' + \"\\n\"\n for (a_id, q) in self.assets:\n s += \"\\tid:\\t\\t\" + str(a_id) + \"\\n\\tlabel:\\t\\t\" + \\\n utils.get_asset_label_by_id(a_id) + \\\n \"\\n\\tquantity:\\t\" + str(q) + \"\\n\\n\"\n \n s += '-' * 25 + \"\\n\"\n s += \"\\nTotal amount: \" + str(self.get_total_amount()) + \" \" + self.currency\n s += \"\\nRendement: \" + str(self.get_rend() * 100) + \" %\\n\"\n print (s)\n \n def print_portfolio_server(self):\n self.submit_to_server()\n print (json.dumps(api.get_portfolio(self.id), indent=2))\n\n\n def get_rend(self) -> float:\n return compute_rend_with_weight(self.assets, self.currency)\n\n\n def get_total_amount(self) -> float:\n return get_total_amount_assets(self.assets, self.currency)\n\n\n # proportion de la devise dans le portefeuille\n def get_amount_in_currency(self, currency):\n pass \n\n\n def get_sharp(self) -> float:\n self.submit_to_server()\n return float(api.compute_ratio([ratio.sharp], [self.id])\\\n [str(self.id)][str(ratio.sharp)]['value'].replace(',', '.'))\n","sub_path":"src/wallet.py","file_name":"wallet.py","file_ext":"py","file_size_in_byte":4846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"349994451","text":"#\n# Memento\n# Backend\n# Unit Tests for IAM Models \n#\n\nfrom unittest import TestCase\nfrom ...app import db\nfrom ...models import *\n\n# unit tests for IAM model\nclass TestIAMModels(TestCase):\n def create_test_data(self):\n # create iam models for testing \n self.organisation = Organisation(name=\"kompany\")\n db.session.add(self.organisation)\n db.session.commit()\n\n self.team = Team(name=\"designers\",\n organisation=self.organisation)\n db.session.add(self.team)\n db.session.commit()\n\n self.supervisor = User(kind=User.Kind.Supervisor,\n organisation=self.organisation,\n name=\"Joel\",\n password=\"Pa$$w0rd\",\n email=\"joel@email.com\")\n db.session.add(self.supervisor)\n db.session.commit()\n\n self.worker = User(kind=User.Kind.Worker,\n organisation=self.organisation,\n team=self.team,\n name=\"James\",\n password=\"Pa$$w0rd\",\n email=\"james@email.com\")\n db.session.add(self.worker)\n db.session.commit()\n\n self.management = Management(kind=Management.Kind.User,\n target_id=self.worker.id,\n manager=self.supervisor)\n\n db.session.add(self.management)\n db.session.commit()\n\n def delete_test_data(self):\n db.session.delete(self.management)\n db.session.commit()\n db.session.delete(self.worker)\n db.session.commit()\n db.session.delete(self.supervisor)\n db.session.commit()\n db.session.delete(self.team)\n db.session.commit()\n db.session.delete(self.organisation)\n db.session.commit()\n\n def test_create_delete(self):\n self.create_test_data()\n self.delete_test_data()\n \n def test_validate_user(self):\n self.create_test_data()\n \n \n\n got_exception = False\n try:\n self.user = User(\n name=\"Joe\",\n password=\"Pa$$wrd\",\n kind=User.Kind.Admin,\n organisation=self.organisation,\n email=\"joe@gmail.com\"\n )\n db.session.add(self.user)\n db.session.commit()\n except AssertionError:\n got_exception = True\n \n self.assertTrue(got_exception)\n \n \n self.delete_test_data()\n\n","sub_path":"workshops/containers/src/backend/tests/models/iam.py","file_name":"iam.py","file_ext":"py","file_size_in_byte":2590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"335667256","text":"import numpy as np \nimport pandas as pd \nimport os \nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom matplotlib.gridspec import GridSpec\n\nPATH = os.getcwd()\n\nN = np.load(PATH + '/data/N_features.npy')\nF = np.load(PATH + '/data/F_features.npy')\n\nd = np.vstack((N,F))\ncol = ['Kur','Skew','Var','Eng']\ndata = pd.DataFrame(data=d,columns=col)\n\ny = np.hstack((np.repeat('Não focal',len(d)//2),np.repeat('Focal',len(d)//2)))\ndata['Type'] = y\n#teste\n\n# Boxplot -----------------------------------------------\nfig = plt.figure()\ngs = GridSpec(ncols=2,nrows=2,figure=fig)\n\nax1 = fig.add_subplot(gs[0,0])\nsns.boxplot(x='Type', y='Kur',\n hue=\"Type\", data=data,ax=ax1)\n\nax2 = fig.add_subplot(gs[0,1])\nsns.boxplot(x='Type', y='Skew',\n hue=\"Type\", data=data,ax=ax2)\n\nax3 = fig.add_subplot(gs[1,0])\nsns.boxplot(x='Type', y='Var',\n hue=\"Type\", data=data,ax=ax3)\n\nax4 = fig.add_subplot(gs[1,1])\nsns.boxplot(x='Type', y='Eng',\n hue=\"Type\", data=data,ax=ax4)\n\nplt.show()","sub_path":"plot_features_test.py","file_name":"plot_features_test.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"206150586","text":"# Time: O(n)\n# Space: O(w)\n\n# 1302 biweekly contest 16 12/28/2019\n\n# Given a binary tree, return the sum of values of its deepest leaves.\n\n# Constraints:\n# The number of nodes in the tree is between 1 and 10^4.\n# The value of nodes is between 1 and 100.\n\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution(object):\n def deepestLeavesSum(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n curr = [root]\n while curr:\n prev, curr = curr, [child for p in curr for child in [p.left, p.right] if child]\n return sum(node.val for node in prev)\n\nprint(Solution().deepestLeavesSum([1,2,3,4,5,null,6,7,null,null,null,null,8])) # 15","sub_path":"Python/deepest-leaves-sum.py","file_name":"deepest-leaves-sum.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"478367927","text":"from django.contrib import admin\nfrom qa_app.models import Tag, Question, Answer, Vote, User\nfrom django.contrib.auth.admin import UserAdmin as BaseUserAdmin\nfrom django.contrib.auth.admin import UserCreationForm\nfrom sorl.thumbnail.admin import AdminImageMixin\n\n\n@admin.register(Tag)\nclass TagAdmin(admin.ModelAdmin):\n list_display = ('name',)\n\n\n@admin.register(Question)\nclass QuestionAdmin(admin.ModelAdmin):\n list_display = ('author', 'title', 'created_at')\n\n\n@admin.register(Answer)\nclass AnswerAdmin(admin.ModelAdmin):\n list_display = ('question', 'content_short', 'is_right', 'author', 'created_at')\n\n\nclass UserAdmin(AdminImageMixin, BaseUserAdmin):\n add_form = UserCreationForm\n add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('email', 'first_name', 'last_name', 'avatar', 'password1', 'password2')}\n ),\n )\n\n def __init__(self, *args, **kwargs):\n self.fieldsets[1][1]['fields'] += ('avatar',)\n super().__init__(*args, **kwargs)\n\n\n# admin.site.unregister(User)\nadmin.site.register(User, UserAdmin)\n\n\n@admin.register(Vote)\nclass VoteAdmin(admin.ModelAdmin):\n list_display = ('created_at', 'value', 'question', 'answer')\n","sub_path":"qa_app/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"387288479","text":"\nfrom Objects.GECode import GECode\nfrom ObjectWriters.ObjectWriter import ObjectWriter\n\nfrom DatabaseManager.database import Database\n\nclass GECodeObjectWriter(ObjectWriter):\n\tge_code = GECode(-1)\n\n\tge_code_insert_columns = ['ge_id','math098','math099',\n\t\t\t\t\t\t\t'freshmanfoundationcourse','globalstudy',\n\t\t\t\t\t\t\t'writteninquiry','artisticinquiry',\n\t\t\t\t\t\t\t'naturalscienceinquiry','quantitativeinquiry',\n\t\t\t\t\t\t\t'valuesethicsinquiry','socialinquiry']\n\tge_code_update_columns = ['math098','math099',\n\t\t\t\t\t\t\t\t'freshmanfoundationcourse','globalstudy',\n\t\t\t\t\t\t\t\t'writteninquiry','artisticinquiry',\n\t\t\t\t\t\t\t\t'naturalscienceinquiry','quantitativeinquiry',\n\t\t\t\t\t\t\t\t'valuesethicsinquiry','socialinquiry']\n\tdef __init__(self, ge_code, operation):\n\t\tself.ge_code.copyGECode(ge_code)\n\t\tself.ge_code.prepareOutput()\n\t\tself.db = Database()\n\t\tif (operation == 'u'):\n\t\t\tself.update()\n\t\telif (operation == 'i'):\n\t\t\tself.insert()\n\n\n\tdef update(self):\n\t\tquery = \"UPDATE \" + \"`\" + self.ge_code.TABLE + \"`\" + \" SET \"\n\t\tge_code_arr = self.ge_code.getOutput()\n\t\tfor i in range(len(self.ge_code_update_columns)):\n\t\t\tquery += \"`\" + self.ge_code_update_columns[i] + \"`\"\n\t\t\tquery += \"=\"\n\t\t\tquery += str(ge_code_arr[i+1])\n\t\t\tquery += \",\"\n\t\tquery = query[:query.rfind(\",\")]\n\t\tquery += \" WHERE \"\n\t\tquery += \"`ge_id`=\" + str(ge_code_arr[0])\n\t\tself.db.query(query)\n\n\tdef insert(self):\n\t\tquery = \"INSERT INTO \" + \"`\" + self.ge_code.TABLE + \"`\" + \"(\"\n\t\tfor i in self.ge_code_insert_columns:\n\t\t\tquery += \"`\" + i + \"`\"\n\t\t\tquery += \",\"\n\t\tquery = query[:query.rfind(\",\")]\n\t\tquery += \") VALUES (\"\n\t\t\n\t\tfor i in self.ge_code.getOutput():\n\t\t\tquery += str(i) + \",\"\n\t\tquery = query[:query.rfind(\",\")]\n\t\tquery += \")\"\n\t\tself.db.query(query)\n\n","sub_path":"model/ObjectWriters/GECodeObjectWriter.py","file_name":"GECodeObjectWriter.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"489609148","text":"from django.urls import path\nfrom . import views\nfrom .views import (\n \n CheckoutView,\n \n OrderSummaryView,\n add_to_cart,\n remove_from_cart,\n remove_single_item_from_cart,\n PaymentView,\n AddCouponView,\n BkashPaymentView,\n OrderSuccessView,\n RequestRefundView\n)\n\napp_name = 'core'\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('samsung/', views.samsung, name='samsung'),\n path('xiaomi/', views.xiaomi, name='xiaomi'),\n path('nokia/', views.nokia, name='nokia'),\n path('apple/', views.apple, name='apple'),\n path('realme/', views.realme, name='realme'),\n path('oppo/', views.oppo, name='oppo'),\n path('oneplus/', views.oneplus, name='oneplus'),\n \n path('search/', views.search, name='search'),\n path('confirmed/', views.confirmed, name='confirmed'),\n path('bkash/', views.bkash, name='bkash'),\n \n path('contact/', views.contact, name='contact'),\n\n\n path('checkout/', CheckoutView.as_view(), name='checkout'),\n path('order-summary/', OrderSummaryView.as_view(), name='order-summary'),\n path('bkash-payment/', BkashPaymentView.as_view(), name='bkash-payment'),\n path('order-success/', OrderSuccessView.as_view(), name='order-success'),\n\n path('product//', views.product_details, name='product'),\n path('add-to-cart//', add_to_cart, name='add-to-cart'),\n path('add-coupon/', AddCouponView.as_view(), name='add-coupon'),\n path('remove-from-cart//', remove_from_cart, name='remove-from-cart'),\n path('remove-item-from-cart//', remove_single_item_from_cart,\n name='remove-single-item-from-cart'),\n path('payment//', PaymentView.as_view(), name='payment'),\n path('request-refund/', RequestRefundView.as_view(), name='request-refund')\n]\n","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"25609568","text":"import unittest\nfrom check_list_loop import Node\nfrom check_list_loop import check_list_loop\n\nclass TestCheckListLoop(unittest.TestCase):\n\t\n\tdef setUp(self):\n\t\tpass\n\n\tdef test_loop(self):\n\t\t\"\"\" test on a loop list \"\"\"\n\t\thead = self.create_list(10) # create a list\n\t\tself.get_last(head).next = self.get_n_th(head, 5) # create a loop\n\t\t# run check\n\t\thas_loop = check_list_loop(head)\n\t\tassert(has_loop == True)\n\n\tdef test_non_loop(self):\n\t\t\"\"\" test on a non-loop list \"\"\"\n\t\thead = self.create_list(10) # create a list\n\t\t# run check\n\t\thas_loop = check_list_loop(head)\n\t\tassert(has_loop == False)\n\n\tdef get_last(self, head):\n\t\t\"\"\" get last node of the list \"\"\"\n\t\twhile head.next:\n\t\t\thead = head.next\n\t\treturn head\n\n\tdef get_n_th(self, head, n):\n\t\t\"\"\" get n-th node of the list \"\"\"\n\t\tfor i in range(n):\n\t\t\thead = head.next\n\t\treturn head\n\n\tdef create_list(self, n):\n\t\t\"\"\" create a n size list \"\"\"\n\t\thead = pilot = Node(0)\n\t\tfor i in range(1, n):\n\t\t\tpilot.next = Node(i)\n\t\t\tpilot = pilot.next\n\t\treturn head\n\nif __name__ == '__main__':\n\tunittest.main()\n","sub_path":"tests/test_check_list_loop.py","file_name":"test_check_list_loop.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"637644050","text":"import csv\nimport glob\nimport os\nimport re\n\nimport PIL.ImageOps\nimport progressbar\nimport pytesseract\nfrom PIL import Image\n\nfolder = \"Screen\"\ndirectory = folder + \"/\"\n\npytesseract.pytesseract.tesseract_cmd = r'C:\\Program Files\\Tesseract-OCR\\tesseract'\n\nfile_count = os.listdir(directory)\nfile_count = len(file_count)\nbar2 = bar = progressbar.ProgressBar(max_value=file_count)\n\nlast_result = []\nresult_arrey = []\nimage_list = []\ni = 0\ncompare_first = 0\n\nfor filename in glob.glob(folder + \"/*.jpg\"): # First thing : open all the file\n im = Image.open(filename)\n image_list.append(im) # I stock them in the variable called \"image_list\"\n bar.update(i)\n i += 1\nbar.finish()\n\ni = 0\nresult = 0\nc = 0\nfirst = True\n\n# Now I need to catch the text, there is ton of way to do it\n# I'm gonna use pytesseract, cut some rectangles and then\n# read each text\n\n# Let's define each box, we actually need only 6 since\n# the text is the same, we don't need to read it, just\n# the number changes\n\nrow1 = 890\nrow2 = 952\n\ncolumn1 = 120\ncolumn2 = 371\ncolumn3 = 620\n\nsize_x = 60\nsize_xL = 80\nsize_y = 35\n\nkill_box = [column1, row1, size_x, size_y]\nobj_kill_box = [column2, row1, size_x, size_y]\nobj_time_box = [column3, row1, size_xL, size_y] # These boxes are larger\ndmg_done_box = [column1, row2, size_xL, size_y]\nheal_done_box = [column2, row2, size_xL, size_y]\ndeaths_box = [column3, row2, size_x, size_y]\n\n# Make the boxes\n\nboxes = []\nboxes.extend([kill_box, obj_kill_box, obj_time_box, dmg_done_box, heal_done_box, deaths_box])\n\n# Create an arrey to access it easier\n\nfor box in boxes: # PIL using a specific way to crop image\n box[2] = box[0] + box[2]\n box[3] = box[1] + box[3]\n\n# This is a way to save the boxes as you wanted at the beginning and not with their weird utilisation\n\n\nfor image in image_list: # Let's analyse each image!\n i += 1\n bar2.update(i)\n for j in range(0, len(boxes)): # We have 6 elements to anaylse\n area = image.crop(boxes[j])\n # area = area.convert('L')\n area = PIL.ImageOps.invert(area) # Convert because pytesseract is supposed to read black on white\n # We create two areas to be sure this is the most accurate as we can\n\n a = area.resize([int((boxes[j][2] - boxes[j][0]) * 2.1), int((boxes[j][3] - boxes[j][1]) * 2.1)], 0)\n b = area.resize([int((boxes[j][2] - boxes[j][0]) * 2.3), int((boxes[j][3] - boxes[j][1]) * 2.3)], 0)\n\n a = re.sub(\"[^0-9]\", \"\", pytesseract.image_to_string(a, lang='eng', config='--psm 10'))\n b = re.sub(\"[^0-9]\", \"\", pytesseract.image_to_string(b, lang='eng', config='--psm 10'))\n\n # Remove everything that is not a number\n\n a_error = b_error = False # We define all the error at False, I prefer when my program works lol\n\n if a != \"\":\n a = int(a) # convert into int\n else:\n a_error = True # There is an error, we'll save the b but if their is error_b too it's gonna be crazy!\n\n if b != \"\":\n b = int(b)\n else:\n b_error = True\n\n if a == b: # If a == b it means both analyse read the same\n result = a\n if a_error and b_error: # But if they didn't read ANYTHING AT ALL\n print(\"\\r EMERGENCY NOTHING HAS BEEN READ at : {}, let's save the last value\".format(j))\n result = last_result[j] # We then save the result of the last analyse, pretty clever right :)\n\n elif c != 0 and not a == b: # If a != b we'll then keep the closest value of the LAST result\n delta = []\n delta_a = abs(a - c)\n delta_b = abs(b - c)\n delta.extend([delta_a, delta_b])\n result = min(delta)\n\n if a == b and first:\n compare_first += 1 # just checking that the first analyse is working\n\n if not first:\n try:\n if result < last_result[j] and (\n abs(result - last_result[j]) / last_result[j]) < 0.3: # id abs is <0.3 (physics thing)\n # and the result < last result (it's easy to understand, you can't do 2 kills on frame 1 and 0 on 2\n # then we'll save the last result\n result = last_result[j]\n except ZeroDivisionError:\n pass\n # Let's create an arrey\n result_arrey.append(result)\n # Save this arrrey and print it\n\n last_result = result_arrey\n #print(last_result)\n\n result_arrey = []\n first = False\n # Make sure the first analyse worked\n assert compare_first == len(boxes), \"The first picture failed, maybe increase the quality\"\n # hey hey, let's write now!\n with open('cvs/ex{}.csv'.format(str(i)), mode='w') as ex:\n ex = csv.writer(ex, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n ex.writerow(['eliminations', str(last_result[0])])\n ex.writerow(['obj kill', str(last_result[1])])\n ex.writerow(['obj time', str(last_result[2])])\n ex.writerow(['hero dmg done', str(last_result[3])])\n ex.writerow(['healing done', str(last_result[4])])\n ex.writerow(['deaths', str(last_result[5])])\n\nbar2.finish()\n# Congratz, you just analyse your first sequence, yeaaah :D\n\ninput(\"Press enter to exit, everything gonna be save at the right moment you leave!\")\n","sub_path":"Homework/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"613719430","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('episodes', '0001_initial'),\n ('reactions', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='imagereaction',\n name='episode',\n field=models.ForeignKey(null=True, to='episodes.Episode'),\n ),\n migrations.AddField(\n model_name='tweetreaction',\n name='episode',\n field=models.ForeignKey(null=True, to='episodes.Episode'),\n ),\n ]\n","sub_path":"testsite/reactions/migrations/0002_auto_20170213_1043.py","file_name":"0002_auto_20170213_1043.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"460952056","text":"\"\"\"Basic building block for implementing BuildSteps and Pipelines\"\"\"\nimport argparse\nimport logging\nimport sys\nfrom abc import ABC, abstractmethod\nfrom typing import List, Callable, Set, cast, Optional\n\nimport configargparse\n\nfrom step_exec_lib.errors import Error\nfrom step_exec_lib.types import Context, StepType, STEP_ALL\nfrom step_exec_lib.utils import config as abs_config\nfrom step_exec_lib.utils import files\n\nlogger = logging.getLogger(__name__)\n\n\nclass BuildStep(ABC):\n \"\"\"\n BuildStep is an abstract base class that defines interface for any real build steps.\n\n All the BuildSteps are executed by the main logic in such a way that first all initialize_config\n methods are called, then sequentially pre_run methods of all the BuildSteps, then all run calls\n and after that all cleanup steps. Therefore, you should use the methods as follows:\n - initialize_config must be used for adding BuildStep specific config options only\n - pre_run, if defined, is used for \"fail fast\" logic; check any assumptions and validations\n you can check *quickly* at this stage; failing here will fail the whole pipeline and not\n even get to the run step, providing immediate feedback that the build can't be done\n - run is the only method expected to run long lasting jobs and to execute actual build steps\n that can be later re-used by next steps\n - since results of executing run of one BuildStep can be later re-used by a subsequent BuildStep,\n yet you might want to do a proper cleanup after the build is done, the cleanup method is called\n only after the run method of all BuildSteps is executed.\n \"\"\"\n\n @property\n def name(self) -> str:\n \"\"\"\n The name of the step.\n :return: By default returns the name of the implementing class.\n \"\"\"\n return self.__class__.__name__\n\n @property\n @abstractmethod\n def steps_provided(self) -> Set[StepType]:\n \"\"\"\n This defines types of steps this BuildStep should be executed for. If a user filters the set of steps\n and the steps listed here don't match any of the steps selected by the user, the whole BuildStep\n won't be executed for this run.\n :return: Returns a list with elements from ALL_STEPS.\n \"\"\"\n raise NotImplementedError\n\n def initialize_config(self, config_parser: configargparse.ArgParser) -> None:\n \"\"\"\n Provide configuration options supported by this BuildStep. Needs to only act on ArgParser and can't\n run any blocking/long operations.\n :param config_parser: configargparse.ArgParser to add the configuration options to.\n :return: None\n \"\"\"\n pass # pragma: no cover\n\n def pre_run(self, config: argparse.Namespace) -> None:\n \"\"\"\n Execute any pre-run validation or assertion logic.\n :param config: Ready (parsed) configuration Namespace object.\n :return: None\n \"\"\"\n pass # pragma: no cover\n\n @abstractmethod\n def run(self, config: argparse.Namespace, context: Context) -> None:\n \"\"\"\n Execute actual build action of the BuildStep.\n :param context: A context where different components can save data to share with other components.\n :param config: Ready (parsed) configuration Namespace object.\n :return: None\n \"\"\"\n raise NotImplementedError\n\n def cleanup(\n self,\n config: argparse.Namespace,\n context: Context,\n has_build_failed: bool,\n ) -> None:\n \"\"\"\n Clean up any resources used during the BuildStep.\n :param context: A context where different components can save data to share with other components.\n :param has_build_failed: A boolean set to True if the cleanup is run after any of the BuildSteps\n failed their run step\n :param config: Ready (parsed) configuration Namespace object.\n :return: None\n \"\"\"\n pass # pragma: no cover\n\n def _assert_binary_present_in_path(self, bin_name: str) -> None:\n \"\"\"\n Checks if binary is available in the system. Raises ValidationError if not found.\n :param bin_name: The name of the binary executable.\n :return: None.\n \"\"\"\n files.assert_binary_present_in_path(self.name, bin_name)\n\n def _assert_version_in_range(self, app_name: str, version: str, min_version: str, max_version_exc: str) -> None:\n \"\"\"\n Checks if the given app_name with a string version falls in between specified min and max\n versions (min_version <= version < max_version). Raises ValidationError.\n :param app_name: The name of the app (used just for logging purposes).\n :param version: The version string (semver, might start with optional 'v' prefix).\n :param min_version: proper semver version string to check for (includes this version)\n :param max_version_exc: proper semver version string to check for (excludes this version)\n :return:\n \"\"\"\n abs_config.assert_version_in_range(self.name, app_name, version, min_version, max_version_exc)\n\n\nclass BuildStepsFilteringPipeline(BuildStep):\n \"\"\"\n A base class to provide sets (pipelines) of BuildSteps that can be later executed as a single BuildStep.\n Implement your BuildStepsPipeline by inheriting from this class and overriding self._pipeline members.\n This class handles BuildSteps filtering based on configured \"--steps\" flags.\n \"\"\"\n\n def __init__(self, pipeline: List[BuildStep], config_group_desc: str):\n \"\"\"\n Create new instance using the BuildSteps passed.\n :param pipeline: The list of BuildSteps to be included in this pipeline.\n :param config_group_desc: All options provided by BuildSteps included in\n BuildStepsPipeline all included in the application's help message as\n a separate config options group. This sets its description.\n \"\"\"\n self._config_group_desc = config_group_desc\n self._pipeline = pipeline\n self._config_parser_group: Optional[configargparse.ArgParser] = None\n self._all_pre_runs_skipped = False\n self._all_runs_skipped = False\n self._all_cleanups_skipped = False\n\n @property\n def steps_provided(self) -> Set[StepType]:\n all_steps: Set[StepType] = set()\n for build_step in self._pipeline:\n all_steps.update(build_step.steps_provided)\n return all_steps\n\n def initialize_config(self, config_parser: configargparse.ArgParser) -> None:\n self._config_parser_group = cast(\n configargparse.ArgParser,\n config_parser.add_argument_group(self._config_group_desc),\n )\n for build_step in self._pipeline:\n build_step.initialize_config(self._config_parser_group)\n\n def pre_run(self, config: argparse.Namespace) -> None:\n self._all_pre_runs_skipped = self._iterate_steps(config, \"pre-run\", lambda step: step.pre_run(config))\n\n def run(self, config: argparse.Namespace, context: Context) -> None:\n self._all_runs_skipped = self._iterate_steps(config, \"build\", lambda step: step.run(config, context))\n\n def cleanup(\n self,\n config: argparse.Namespace,\n context: Context,\n has_build_failed: bool,\n ) -> None:\n self._all_cleanups_skipped = self._iterate_steps(\n config,\n \"cleanup\",\n lambda step: step.cleanup(config, context, has_build_failed),\n )\n\n def _iterate_steps(\n self,\n config: configargparse.Namespace,\n stage: str,\n step_function: Callable[[BuildStep], None],\n ) -> bool:\n all_steps_skipped = True\n for step in self._pipeline:\n execute_all = STEP_ALL in config.steps\n is_requested_step = any(s in step.steps_provided for s in config.steps)\n is_requested_skip = any(s in step.steps_provided for s in config.skip_steps)\n if (execute_all or is_requested_step) and not is_requested_skip:\n logger.info(f\"Running {stage} step for {step.name}\")\n all_steps_skipped = False\n try:\n step_function(step)\n except Error as e:\n logger.error(f\"Error when running {stage} step for {step.name}: {e.msg}\")\n raise\n else:\n logger.info(f\"Skipping {stage} step for {step.name} as it was not configured to run.\")\n return all_steps_skipped\n\n\nclass Runner:\n \"\"\"\n A class used to run all the steps of a build pipeline. Expects to get a list of configured\n BuildSteps and a config. Provides context object.\n \"\"\"\n\n def __init__(self, config: configargparse.Namespace, steps: List[BuildStep]):\n self._config = config\n self._steps = steps\n self._context: Context = {}\n self._failed_build = False\n\n @property\n def context(self):\n return self._context\n\n def run(self) -> None:\n self.run_pre_steps()\n self.run_build_steps()\n self.run_cleanup()\n if self._failed_build is True:\n logger.error(\"Exit 1 due to failed build step.\")\n sys.exit(1)\n\n def run_pre_steps(self) -> None:\n try:\n for step in self._steps:\n step.pre_run(self._config)\n except Error as e:\n logger.error(f\"Error when running pre-steps: {e}. Exiting.\")\n sys.exit(1)\n\n def run_build_steps(self) -> None:\n try:\n for step in self._steps:\n step.run(self._config, self._context)\n except Error as e:\n logger.error(f\"Error when running build: {e}. No further build steps will be performed, moving to cleanup.\")\n self._failed_build = True\n\n def run_cleanup(self) -> None:\n for step in self._steps:\n try:\n step.cleanup(self._config, self._context, self._failed_build)\n except Error as e:\n logger.error(f\"Last cleanup step failed: {e}. Moving to the next one.\")\n","sub_path":"step_exec_lib/steps.py","file_name":"steps.py","file_ext":"py","file_size_in_byte":10077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"522445559","text":"import os, random\nimport sys, getch\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\n#returns path of random card\ndef getRandomCard(cmc):\n if cmc > 16 or cmc == 14:\n return 'insert garbage here.'\n else:\n root = 'C:\\\\Users\\\\Cody\\\\Documents\\\\GitHub\\\\Images\\\\' + str(cmc) + '\\\\'\n return root + random.choice(os.listdir(root))\n\n#send file to printer\ndef rcptPrint(imgPath):\n img = mpimg.imread(imgPath)\n imgplot = plt.imshow(img)\n plt.show()\n # print(imgPath)\n\n#send characers to display\ndef updateDisplay(cmc):\n print(cmc)\n\n#handles keypad inputs\ndef processKeypress(keyPress, lastKey, lastLastKey):\n keyPress = str(keyPress).replace('b\\'', '').replace('\\'','')\n\n #enter\n if keyPress == '\\\\r':\n rcptPrint(getRandomCard(int(str(lastLastKey + lastKey).replace(' ', '0'))))\n return (' ', ' ')\n\n #backspace\n elif 'x08' in keyPress:\n updateDisplay(' ' + lastLastKey)\n return(lastLastKey, ' ')\n\n #number key(input space available)\n elif lastLastKey == ' ':\n updateDisplay(lastKey + keyPress)\n return (keyPress.replace('0', ' '), lastKey)\n\n #number key(input space not available)\n else:\n updateDisplay(lastLastKey + lastKey)\n return (lastKey, lastLastKey)\n\nkeyPress = ' '\nlastKey = ' '\n\nwhile True:\n\n lastLastKey = lastKey\n lastKey = keyPress\n keyPress = getch.getch()\n (keyPress, lastKey) = processKeypress(keyPress, lastKey, lastLastKey)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"479321717","text":"# Copyright 2023 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nimport pytest\n\nimport mindspore.nn as nn\nfrom mindspore.ops.operations import _sequence_ops as seq\nfrom mindspore import context\nfrom mindspore.common import mutable\nfrom mindspore.ops.composite import GradOperation\nfrom sequence_help import context_prepare\n\ncontext.set_context(mode=context.GRAPH_MODE)\ncontext_prepare()\n\n\nclass Net(nn.Cell):\n def __init__(self):\n super().__init__()\n self.seq_mul = seq.SequenceMul()\n\n def construct(self, x, y):\n return self.seq_mul(x, y)\n\n\n@pytest.mark.level1\n@pytest.mark.platform_x86_gpu\n@pytest.mark.env_onecard\ndef test_seq_mul_tuple_dy():\n \"\"\"\n Feature: test sequence_mul op\n Description: first input is dynamic sequence\n Expectation: the result match with tuple result\n \"\"\"\n x = mutable((1, 2, 3), True)\n y = 2\n expect = (1, 2, 3, 1, 2, 3)\n net = Net()\n res = net(x, y)\n assert res == expect\n\n\n@pytest.mark.level1\n@pytest.mark.platform_x86_gpu\n@pytest.mark.env_onecard\ndef test_seq_mul_scalar_dy():\n \"\"\"\n Feature: test sequence_mul op\n Description: second input is dynamic scalar\n Expectation: the result match with tuple result\n \"\"\"\n x = (0, 1, 1, 2)\n y = mutable(1)\n expect = (0, 1, 1, 2)\n net = Net()\n res = net(x, y)\n assert res == expect\n\n\n@pytest.mark.level0\n@pytest.mark.platform_x86_gpu\n@pytest.mark.env_onecard\ndef test_seq_mul_all_dy():\n \"\"\"\n Feature: test sequence_mul op\n Description: two inputs are dynamic sequence\n Expectation: the result match with tuple result\n \"\"\"\n x = mutable((1, 2, 3), True)\n y = mutable(3)\n expect = (1, 2, 3, 1, 2, 3, 1, 2, 3)\n net = Net()\n res = net(x, y)\n assert res == expect\n\n\n@pytest.mark.level0\n@pytest.mark.platform_x86_gpu\n@pytest.mark.env_onecard\ndef test_seq_mul_grad():\n \"\"\"\n Feature: test sequence_mul grad op\n Description: two inputs are dynamic sequence\n Expectation: the result match with tuple result\n \"\"\"\n x = mutable((1, 2, 3), True)\n y = mutable(2)\n dout = mutable((4, 5, 6, 7, 8, 9), True)\n net = Net()\n grad_func = GradOperation(get_all=True, sens_param=True)(net)\n grad_func(x, y, dout)\n\n\n@pytest.mark.level0\n@pytest.mark.platform_x86_gpu\n@pytest.mark.env_onecard\ndef test_seq_mul_grad_mutable_scalar():\n \"\"\"\n Feature: test sequence_mul grad op\n Description: two inputs are dynamic sequence\n Expectation: the result match with tuple result\n \"\"\"\n x = (1, mutable(2), 3)\n y = mutable(2)\n dout = mutable((4, 5, 6, 7, 8, 9), True)\n net = Net()\n grad_func = GradOperation(get_all=True, sens_param=True)(net)\n grad_func(x, y, dout)\n","sub_path":"tests/st/ops/dynamic_sequence/test_dynamic_sequence_mul.py","file_name":"test_dynamic_sequence_mul.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"182048177","text":"# by using lenth function\n\nl=[1,2,3,4,5,6,7,8]\nprint(len(l))\n\n# 2.\n\nh=[]\nh.append (\"mahesh\")\nh.append (\"deepak\")\nh.append (\"sushant\")\nh.append (\"Datta\")\nprint (len(h))\n\n#using naive method\n\nListName = [ \"Hello\", \"Edureka\", 1,2,3 ]\nprint (\"The list is : \" + str(ListName))\ncounter = 0\nfor i in ListName:\n counter = counter + 1\nprint (\"Length of list using naive method is : \" + str(counter))\n","sub_path":"length of list.py","file_name":"length of list.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"638606459","text":"from collections import Counter\nimport math\n######################## wordcut\nimport sys\nsys.path.append('./wordcutpy-master')\npath = './wordcutpy-master/'\nfrom wordcut import Wordcut\nwith open(path+'bigthai.txt', encoding=\"UTF-8\") as dict_file:\n word_thai_list = list(set([w.rstrip() for w in dict_file.readlines()]))\n wordcut = Wordcut(word_thai_list)\n########################\nspecial_character_file = open('./special_character_dict.txt', 'r')\nspecial_character = special_character_file.read()\nspecial_character_list = special_character.split('\\n')\n########################\nenglish_character_file = open('./english_character_dict.txt', 'r')\nenglish_character = special_character_file.read()\nenglish_character_list = special_character.split('\\n')\n########################\nstopword_file = open('./stopword.txt', 'r')\nstopword = special_character_file.read()\nstopword_list = special_character.split('\\n')\n########################\nans_file = open('./ans.txt', 'r')\nans = ans_file.read()\nans_list = ans.split('\\n')\n########################\nN = 1084\ndictionary = {}\ndf = {}\nsort_freq_list = []\nvector_list = []\n\n\n\ndef remove_stopword(word_l):\n\tremoving_list = []\n\tfor i in word_l:\n\t\tif i in stopword_list:\n\t\t\tremoving_list.append(i)\n\tfor i in removing_list:\n\t\tword_l.pop(i)\n\treturn word_l\n\ndef remove_word_has_no_meaning(word_l):\n\tremoving_list = []\n\tfor i,j in word_l.items():\n\t\tif not i in word_thai_list:\n\t\t\tif not is_english_language(i):\n\t\t\t\tremoving_list.append(i)\n\tfor i in removing_list:\n\t\tword_l.pop(i)\n\treturn word_l\n\ndef remove_special_character(post):\n\tfor i in special_character_list:\n\t\tpost = post.replace(i, '')\n\tpost = post.replace(' ','')\n\tpost = post.replace('\\n','')\n\treturn post\n\ndef is_english_language(post):\n\tcheck = False\n\tfor i in english_character_list:\n\t\tif i in post:\n\t\t\tcheck = True\n\treturn check\n\ndef readfile(filename):\n\tf = open(filename, 'r')\n\treturn f.read()\n\ndef writefile(filename,data):\n f = open(filename, 'w')\n f.write(data)\n f.close()\n return True\n\ndef convert_post_to_vector(post):\n\tpost = remove_special_character(post)\n\tword = wordcut.tokenize(post)\n\tword = Counter(word)\n\tword = remove_word_has_no_meaning(word)\n\tword = remove_stopword(word)\n\n\tfor i in word:\n\t\tif not i in df:\n\t\t\tdf[i] = 1\n\t\telse:\n\t\t\tdf[i] += 1\n\treturn word \t# {'word1':1, 'word2':2}\n\n\n\ndef main():\n\ttmp_vector_list = []\n\tdata = '@RELATION NEWS\\n'\n\tfor i in range(1, N+1):\n\t\tpost = readfile('./topic/'+str(i)+'.txt')\n\t\tvector = convert_post_to_vector(post)\t# {'a':1,'b':1,'c':2}\n\t\ttmp_vector_list.append(vector) \t\t\t#[{'a':1},{'a':1,b':1}]\n\t\t# attach vector to dict {'a':'1'} -> {'a':'1', 'b':'2'}\n\t\tdictionary.update(vector)\t\n\ttmp_freq_list = []\n\tfor i in range(0,N+1):\n\t\ttmp_freq_list.append([])\n\tfor word,freq in df.items():\n\t\ttmp_freq_list[freq].append(word)\t# [[],['a','b'],['c']] 'a','b' freq = 1\n\t\n\tcount = 0\t# to check limit dimension\n\tcount_2 = 0\t# we don't want the word which freq has just 1\n\tat_most_freq = 2000\t#limit dimension\n\tat_least_freq = 2 # if only accept n freq at least\n\n\tfor i in tmp_freq_list:\n\t\tif count_2 >= at_least_freq:\n\t\t\tfor word in i:\n\t\t\t\tsort_freq_list.append(word)\t\t# ['a','b','c']\n\t\t\t\tcount += 1\n\t\t\t\tif count == at_most_freq:\n\t\t\t\t\tbreak\n\t\t\tif count == at_most_freq:\n\t\t\t\tbreak\n\t\telse:\n\t\t\tcount_2+=1\n\n\tfor word in sort_freq_list:\n\t\tdata += '@ATTRIBUTE ' + word + \" NUMERIC\\n\"\n\tdata += \"@ATTRIBUTE class {act-g,ann-a,etc}\\n\"\n\tdata += \"@DATA\\n\"\n\n\tcount_acth = 0;\n\tlimit_acth_to_act_g = 100;\n\tfor i in range(0, N):\n\t\tif ans_list[i] in ['act-g', 'ann-a', 'etc']:\n\t\t\tfor word in sort_freq_list:\n\t\t\t\tif word in tmp_vector_list[i]:\n\t\t\t\t\tdata += str(tmp_vector_list[i][word]*math.log(N/df[word])) + ','\n\t\t\t\telse:\n\t\t\t\t\tdata += '0' + ','\n\t\t\tdata += ans_list[i] + '\\n'\n\t\t# increasing data convert act-h to act-g\n\t\tif ans_list[i] == 'act-h' and count_acth',deletepersonview,name='delete'),\n path('update/',updatepersonview,name='update'),\n\n path('addPeople/',addFakePeople,name='addPeople'),\n\n\n\n]\n\n","sub_path":"Project_Pagination/Page_App/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"595304700","text":"import argparse\nimport family\nimport person \n\npeople = {}\nfamilies = {}\n\n#given an array of strings parse the GEDCOM information\n#there is no return but will print to console\ndef parse(lines):\n for line in lines:\n line = line.replace(\"\\n\", \"\")\n print_in(line)\n split = line.split(\" \", 2)\n if len(split) == 2: #if it has no extra args, add an empty one\n split.append(\"\")\n\n if split[0] == \"0\":\n if split[2] == \"INDI\":\n print_out(0, \"INDI\", True, split[1])\n elif split[2] == \"FAM\":\n print_out(0, \"FAM\", True, split[1])\n elif split[1] == \"HEAD\":\n print_out(0, \"HEAD\", True, split[2])\n elif split[1] == \"TRLR\":\n print_out(0, \"TRLR\", True, split[2])\n elif split[1] == \"NOTE\":\n print_out(0, \"NOTE\", True, split[2])\n else:\n print_out(0, split[1], False, split[2])\n elif split[0] == \"1\":\n possible_tags = [\n \"NAME\", \n \"SEX\", \n \"BIRT\", \n \"DEAT\", \n \"FAMC\", \n \"FAMS\", \n \"MARR\", \n \"HUSB\", \n \"WIFE\",\n \"CHIL\",\n \"DIV\"\n ]\n if split[1] in possible_tags:\n print_out(1, split[1], True, split[2])\n else:\n print_out(1, split[1], False, split[2])\n elif split[0] == \"2\":\n if split[1] == \"DATE\":\n print_out(2, \"DATE\", True, split[2])\n else:\n print_out(2, split[1], False, split[2])\n else: \n print_out(split[0], split[1], False, split[2])\ndef print_in(str):\n print(\"-->\", str)\n\ndef print_out(level, tag, valid, arg):\n print(\"<--\", str(level) + \"|\" + str(tag) + \"|\" + (\"Y\" if valid else \"N\") + \"|\" + str(arg))\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Homemade GEDCOM reader')\n parser.add_argument('GEDCOM_file', type=str, help='The file to read (must be in GEDCOM format')\n args = parser.parse_args()\n \n with open(args.GEDCOM_file) as file:\n lines = file.readlines()\n parse(lines)\n\n\n","sub_path":"src/project_02_parser/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"450631732","text":"import scrapy\nfrom scrapy.selector import Selector\nfrom Hue.basepro import ZhengFuBaseSpider\n\n\nclass YantaiSpider(ZhengFuBaseSpider):\n name = 'Yantai'\n allowed_domains = ['yantai.gov.cn']\n start_urls = ['http://http://www.yantai.gov.cn//']\n api = \"http://www.yantai.gov.cn/jsearchfront/interfaces/cateSearch.do\"\n method = \"POST\"\n keywords = [\"社保\"]\n data = {\n \"websiteid\": \"370600000000000\",\n \"q\": \"{keyword}\",\n \"p\": \"{page}\",\n \"pg\": \"20\",\n \"cateid\": \"5\",\n \"pos\": \"\",\n \"pq\": \"\",\n \"oq\": \"\",\n \"eq\": \"\",\n \"begin\": \"\",\n \"end\": \"\",\n \"tpl\": \"82\",\n \"sortFields\": \"\"\n }\n\n def edit_data(self, data, keyword, page):\n data[\"q\"] = str(keyword)\n data[\"p\"] = str(page)\n return data\n\n def edit_items_box(self, response):\n raw_data = response.json()\n items_box = raw_data.get(\"result\", None)\n return items_box\n\n def edit_items(self, items_box):\n items = [Selector(text=item, type=\"html\") for item in items_box]\n return items\n\n def edit_item(self, item):\n data = {}\n data[\"title\"] = ''.join([w.strip() for w in item.css(\"div.jcse-news-title\").css(\"a::text,em::text\").getall()])\n data['url'] = item.css(\"div.jcse-news-title a::attr(href)\").get()\n data['date'] = item.css(\"span.jcse-news-date::text\").get()\n return data\n\n def edit_page(self, response):\n raw_data = response.json()\n total_items_num = raw_data.get(\"total\", 0)\n total_page_num = int(total_items_num) // 20 + 1\n return total_page_num\n","sub_path":"Hue/Hue/spiders/Yantai.py","file_name":"Yantai.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"201651332","text":"\r\nimport os\r\nos.chdir('C:\\\\ENEA_CAS_WORK\\\\Catania_RAFAEL\\\\postprocessing')\r\nos.getcwd()\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport geopandas as gpd\r\nfrom geopandas import GeoDataFrame\r\nfrom shapely.geometry import Point\r\nimport folium\r\nimport osmnx as ox\r\nimport networkx as nx\r\nimport math\r\nimport momepy\r\n# from funcs_network_FK import roads_type_folium\r\nfrom shapely import geometry\r\nfrom shapely.geometry import Point, Polygon\r\nimport psycopg2\r\n# import db_connect\r\nimport datetime\r\nimport seaborn as sns\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.colors as mcolors\r\nfrom folium_stuff_FK_map_matching import plot_graph_folium_FK\r\n\r\n\r\n## reload data (to be used later on...)\r\ngdf_all_EDGES = gpd.read_file(\"all_EDGES.geojson\")\r\n# gdf_all_EDGES = gpd.read_file(\"all_EDGES_archived.geojson\")\r\n\r\nAAA = pd.DataFrame(gdf_all_EDGES)\r\n\r\n## select only columns 'u' and 'v'\r\ngdf_all_EDGES_sel = gdf_all_EDGES[['u', 'v']]\r\n###################\r\n#### GROUP BY #####\r\n###################\r\n## count how many times an edge ('u', 'v') occur in the geodataframe\r\ndf_all_EDGES_sel = gdf_all_EDGES.groupby(gdf_all_EDGES_sel.columns.tolist()).size().reset_index().rename(columns={0:'records'})\r\n\r\n\r\ndf_all_EDGES_records = df_all_EDGES_sel\r\n# select only columns with records > N\r\ndf_all_EDGES_sel = df_all_EDGES_sel[df_all_EDGES_sel.records >= 10]\r\n# add colors based on 'records'\r\nvmin = min(df_all_EDGES_records.records)\r\nvmax = max(df_all_EDGES_records.records)\r\n# df_all_EDGES_records.iloc[-1] = np.nan\r\n# Try to map values to colors in hex\r\nnorm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax, clip=True)\r\nmapper = plt.cm.ScalarMappable(norm=norm, cmap=plt.cm.Reds) # scales of reds\r\ndf_all_EDGES_records['color'] = df_all_EDGES_records['records'].apply(lambda x: mcolors.to_hex(mapper.to_rgba(x)))\r\nrecords = df_all_EDGES_sel[['records']]\r\n\r\ndf_all_EDGES_sel = df_all_EDGES_sel[['u','v']]\r\n\r\n# filter recover_all_EDGES (geo-dataframe) with df_recover_all_EDGES_sel (dataframe)\r\nkeys = list(df_all_EDGES_sel.columns.values)\r\nindex_recover_all_EDGES = gdf_all_EDGES.set_index(keys).index\r\nindex_df_all_EDGES_sel = df_all_EDGES_sel.set_index(keys).index\r\nclean_edges_matched_route = gdf_all_EDGES[index_recover_all_EDGES.isin(index_df_all_EDGES_sel)]\r\n\r\n\r\n# get same color name according to the same 'u' 'v' pair\r\nclean_edges_matched_route[['u', 'v']].head()\r\n# merge records and colors into the geodataframe\r\nMERGED_clean_EDGES = pd.merge(clean_edges_matched_route, df_all_EDGES_records, on=['u', 'v'], how='inner')\r\n# remove duplicates nodes\r\nMERGED_clean_EDGES.drop_duplicates(['u', 'v'], inplace=True)\r\n\r\n\r\n'''\r\nAAA = MERGED_clean_EDGES[MERGED_clean_EDGES['u'] == 33589436]\r\nAAA.u\r\nAAA.v\r\nAAA.records\r\nAAA.color\r\n'''\r\n\r\n#############################################################################################\r\n# create basemap\r\nave_LAT = 37.53988692816245\r\nave_LON = 15.044971594798902\r\nmy_map = folium.Map([ave_LAT, ave_LON], zoom_start=11, tiles='cartodbpositron')\r\n#############################################################################################\r\n\r\n'''\r\nclean_edges_matched_route.geometry.to_file(filename='clean_matched_route.geojson', driver='GeoJSON')\r\nfolium.GeoJson('clean_matched_route.geojson').add_to(my_map)\r\nmy_map.save(\"clean_matched_route.html\")\r\n'''\r\n\r\n# add colors to map\r\nmy_map = plot_graph_folium_FK(MERGED_clean_EDGES, graph_map=None, popup_attribute=None,\r\n zoom=1, fit_bounds=True, edge_width=4, edge_opacity=1) # tiles='cartodbpositron'\r\nmy_map.save(\"clean_matched_route.html\")\r\n\r\n\r\n######################################################################\r\n######################## COLORBAR ####################################\r\n######################################################################\r\n\r\nimport matplotlib as mpl\r\nCOLORS_by_records = pd.DataFrame( MERGED_clean_EDGES.drop_duplicates(['records', 'color']))[['records', 'color']]\r\n# sort by ascending order of the column records\r\nCOLORS_by_records = COLORS_by_records.sort_values(by=['records'])\r\nlen(COLORS_by_records)\r\n# keep same order...\r\ncolor_list = COLORS_by_records.color.drop_duplicates().tolist()\r\n# display colorbar based on hex colors:\r\n\r\nfig, ax = plt.subplots(figsize=(6, 1))\r\nfig.subplots_adjust(bottom=0.5)\r\n# cmap = matplotlib.colors.ListedColormap(color_list)\r\ncmap = mpl.cm.Reds\r\nMAX = max(COLORS_by_records.records)\r\nMIN = min(COLORS_by_records.records)\r\ncmap.set_over(str(MAX + 5))\r\ncmap.set_under(str(MIN -5))\r\n\r\ncmap.set_over('k')\r\ncmap.set_under('white')\r\n\r\n# make a sequence list of records\r\nbounds = np.arange(MIN, MAX, 10).tolist()\r\n\r\nnorm = mpl.colors.BoundaryNorm(bounds, cmap.N)\r\ncb2 = mpl.colorbar.ColorbarBase(ax, cmap=cmap,\r\n norm=norm,\r\n boundaries=[5] + bounds + [MAX+5],\r\n extend='both',\r\n ticks=bounds,\r\n spacing='uniform',\r\n orientation='horizontal')\r\ncb2.set_label('travel frequency (a.u.)')\r\n# fig.show()\r\n# save colorbar (map-matching frequency)\r\nfig.savefig('colorbar_map_matched.png')\r\n\r\n\r\nmerc = os.path.join('colorbar_map_matched.png')\r\n# overlay colorbar to my_map\r\nfolium.raster_layers.ImageOverlay(merc, bounds = [[37.822617, 15.734203], [37.768644,15.391770]], interactive=True, opacity=1).add_to(my_map)\r\n# re-save map\r\nmy_map.save(\"clean_matched_route.html\")\r\n\r\n\r\n'''\r\n# MERGED_clean_EDGES.plot(alpha=0.5,figsize=(20,40),edgecolor='black')\r\n# remove all the boundaries\r\nfinal_EDGES_CATANIA = MERGED_clean_EDGES.dissolve(by ='id')\r\nfinal_EDGES_CATANIA = final_EDGES_CATANIA[['geometry']] # keep only 'geometry'\r\n# final_EDGES_CATANIA.plot(alpha=0.5,edgecolor='black',figsize=(20,40))\r\n# save as geojson file\r\nfinal_EDGES_CATANIA.geometry.to_file(filename='final_EDGES_CATANIA.geojson', driver='GeoJSON')\r\n'''\r\n\r\n###########################################################################\r\n###########################################################################\r\n###########################################################################\r\n###### //////////////////////////////////////////// #######################\r\n###########################################################################\r\n###########################################################################\r\n\r\n'''\r\n# chose a specific ID ID (from all_EDGES)\r\nAAA = pd.DataFrame(gdf_all_EDGES)\r\n# BBB = AAA[AAA.id == '577']\r\n\r\ndf_all_EDGES_records.head()\r\n# which ID crossed the same edge?? (11 records)\r\nBBB = AAA[AAA.u == 33589436]\r\nBBB = BBB[BBB.v == 254098470]\r\n# get all the \"story of the 11 records\r\nID_list = list(BBB.id)\r\n\r\n# filter gdf_all_EDGES based on a list of index\r\nCCC = gdf_all_EDGES[gdf_all_EDGES.index.isin( ID_list )]\r\n\r\n###########################################################################\r\n# create basemap\r\nave_LAT = 37.53988692816245\r\nave_LON = 15.044971594798902\r\nmy_map = folium.Map([ave_LAT, ave_LON], zoom_start=11, tiles='cartodbpositron')\r\n#############################################################################\r\n\r\n\r\n# filter recover_all_EDGES (geo-dataframe) with df_recover_all_EDGES_sel (dataframe)\r\n\r\nCCC.geometry.to_file(filename='trip_by_ID.geojson', driver='GeoJSON')\r\nfolium.GeoJson('trip_by_ID.geojson').add_to(my_map)\r\nmy_map.save(\"trip_by_ID.html\")\r\n'''\r\n\r\n###########################################################################\r\n###########################################################################\r\n###########################################################################\r\n###### //////////////////////////////////////////// #######################\r\n###########################################################################\r\n###########################################################################\r\n\r\n","sub_path":"postprocessing/paths_cleaning.py","file_name":"paths_cleaning.py","file_ext":"py","file_size_in_byte":7842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"258629144","text":"\nimport os, pip\n\n# from pip.req import parse_requirements\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\ndef read_version():\n with open(os.path.join(os.path.dirname(__file__), 'recorder/__init__.py')) as f:\n for line in f:\n if 'VERSION' in line:\n version = line.split('=')[1].replace(\"\\\"\", \"\").strip()\n return version\n\ninstall_reqs = pip.req.parse_requirements('requirements.txt', session=pip.download.PipSession())\n\nrequirements = [str(ir.req) for ir in install_reqs if ir is not None]\n\nsetup(name = \"ece-recorder\",\n author = \"Aljosha Friemann\",\n author_email = \"aljosha.friemann@gmail.com\",\n license = \"\",\n version = read_version(),\n description = \"EclipseConEurope recording script\",\n url = \"www.bitbucket.org/afriemann/ece-recorder.git\",\n keywords = [],\n # download_url = \"\",\n install_requires = requirements,\n long_description = read('README.rst'),\n classifiers = [],\n packages = [\"recorder\"],\n scripts = [\"scripts/recorder\"]\n)\n","sub_path":"pypi_install_script/ece-recorder-0.0.7.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"403552379","text":"import math\nimport uuid\nfrom datetime import datetime, timedelta\nfrom typing import Any, Dict, List, Tuple, Union\n\nfrom dateutil import parser\n\nfrom services.mysql import SqlHandler\n\n\nclass Cities:\n def __init__(self, sql: SqlHandler) -> None:\n self.sql = sql\n self.spatial_columns = [\"timestamp\", \"longitude\", \"latitude\"]\n\n def describe(self, city_id: str) -> Dict:\n city_description = self.sql.get_unique(\n f\"SELECT city, state, max_date, department FROM Cities WHERE id=?\",\n (city_id,),\n )\n # reformat the name of the associated table\n table_name = \"\".join(city_description.get(\"city\").strip().split(\" \"))\n table_name = f\"{city_description.get('state').upper()}_{table_name}\"\n if city_description.get(\"department\") != \"police\":\n table_name += f\"_{city_description.get('department').capitalize()}\"\n # update description\n city_description.update({\"table\": table_name})\n\n return city_description\n\n @staticmethod\n def record_to_key(\n record: Dict, keys: List[str], intervals: List[Any]\n ) -> Union[None, List[str]]:\n date = parser.parse(f\"{record.get('date')} {record.get('time', '00:00')}\")\n if date < intervals[0]:\n return None\n elif date > intervals[-1]:\n return None\n else:\n gaps = [interval - date for interval in intervals]\n indexes = len([gap for gap in gaps if gap.total_seconds() < 0]) - 1\n return keys[indexes]\n\n @staticmethod\n def has_null(record: Dict) -> bool:\n def is_null(value: Any) -> bool:\n if value is None or value != value:\n return True\n elif isinstance(value, str):\n return (\n value == \"nan\"\n or value == \"None\"\n or value == \"NaN\"\n or value.strip() == \"\"\n )\n elif isinstance(value, float):\n return math.isnan(value)\n else:\n return False\n\n return any([is_null(v) for v in record.values()])\n\n @staticmethod\n def build_geojson(records: List[Dict], info_column: str) -> Dict:\n return {\n \"type\": \"FeatureCollection\",\n \"features\": [\n {\n \"type\": \"Feature\",\n \"properties\": {\n \"id\": uuid.uuid4().hex,\n info_column: record.get(info_column),\n },\n \"geometry\": {\n \"type\": \"Point\",\n \"coordinates\": [\n record.get(\"longitude\"),\n record.get(\"latitude\"),\n 0.0,\n ],\n },\n }\n for record in records\n ],\n }\n\n @staticmethod\n def list_dates(\n start: str, end: str, formatted_for_query: bool = False\n ) -> List[str]:\n if end == None:\n end = start\n start_date, end_date = parser.parse(start), parser.parse(end)\n days_difference = (end_date - start_date).days\n list_dates = [\n (start_date + timedelta(days=days)).strftime(\"%Y-%m-%d\")\n for days in range(days_difference + 1)\n ]\n if formatted_for_query:\n list_dates = [r\"'{}'\".format(date) for date in list_dates]\n list_dates = f\"({', '.join(list_dates)})\"\n\n return list_dates\n\n @staticmethod\n def dates_to_timestamps(self, start: str, end: str) -> Tuple[int]:\n if end == None:\n end = start\n start_date, end_date = parser.parse(start), parser.parse(end)\n start_timestamp = datetime.datetime.combine(\n start_date, datetime.time.min\n ).timestamp()\n end_timestamp = datetime.datetime.combine(\n end_date, datetime.time.max\n ).timestamp()\n\n return int(start_timestamp), int(end_timestamp)\n\n def valid_dates(self, table: str) -> Dict:\n dates_extrema = self.sql.get_unique(\n f\"SELECT min(date) as minDate, max(date) as maxDate FROM {table} WHERE date IS NOT NULL AND date<>'None'\"\n )\n if dates_extrema is None:\n minimum, maximum = None, None\n else:\n minimum = dates_extrema.get(\"minDate\", None)\n maximum = dates_extrema.get(\"maxDate\", None)\n\n return {\"minDate\": minimum, \"maxDate\": maximum}\n","sub_path":"server/services/cities.py","file_name":"cities.py","file_ext":"py","file_size_in_byte":4534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"214399226","text":"import random\nimport sys\nimport os\n\n#print (\"Hello World\")\n\n#Comment\n\n'''\nMultiline Comment\n'''\n\nname='Panshul'\n#print(name)\n\n#Number Strings Lists Tuples Dictionaries\n#+ - * / % ** //\nquote= \"\\\" this is a quote\\\"\"\n\nmultiline = ''' Hello\nWorld\nkjc'''\n#print (\"%s %s %s\" %('this is a string','another string',quote),end=\"\")\n#print (multiline)\n\nrandom_list=['r0','r1','r2','r3','r4','r5']\n#print (random_list[0])\nrandom_list[0]='R0'\n\n#print (random_list[0])\n#print(random_list[1:3])\n\nother_list=['o0','o1','o2','o3']\n\nnew_list=[random_list,other_list]\n#print (new_list)\n#print (new_list[0])\n#print (new_list[1][2])\nrandom_list.insert(2,'r0')\nrandom_list.reverse()\nrandom_list.sort()\n#print(random_list)\n\nnew_list1=random_list+other_list\n#print(new_list1)\n\n\n# Tuples\n\npi_tuple = (1,3,'hello world')\nnew_tuple= list(pi_tuple)\nnew_list=tuple(new_tuple)\n#print (pi_tuple)\n#print (new_tuple)\n#print (new_list)\n#print(len(pi_tuple)) \n#print(max(new_tuple)) \n#print(min(new_tuple))\nnew_dict={1:0,2:5,'a1':'b1',1:'b1'}\n#print (new_dict)\n#print (new_dict[1])\n#pi_tuple[2]=5\nkeys=new_dict.keys()\nvalues=new_dict.values()\n#print (keys)\n#print (values)\n\nif len(pi_tuple)>4:\n\tprint(\"len>4\")\nelif len(pi_tuple)==4:\n\tprint(\"len=4\")\nelse:\n\t#print(\"len<4\")\n\tpass\n\n\nfor i in range(len(pi_tuple)):\n\tpass\n\t#print(pi_tuple[i])\nfor i in new_dict:\n\tpass\n\t#print (str(i)+' '+str(new_dict[i]))\n\ni=0\nwhile(i<=20):\n\tif i%2==0 :\n\t\t#print i\n\t\tbreak\n\ti+=1\n\ndef func1(dict):\n\tdict[1]=55\n\n\ni=5\n#print new_dict\nfunc1(new_dict)\n#print new_dict\n\n\ntest_file =open(\"Commands\",\"r+\")\n#print(test_file.mode)\n#print(test_file.name)\nread_file=test_file.read()\n#print (read_file)\n\nclass Animal:\n\t#__name=None\n\theight=0\n\t__sound=0\n\t__weight=0\n\n\tdef __init__(self,name,h,w,s):\n\t\tself.__name=name\n\t\tself.height=h\n\t\tsound=s\n\t\t__weight=w\n\tdef set_name(self,name):\n\t\tself.__name=name\n\tdef get_name(self):\n\t\treturn self.__name\n\n\tdef __str__(self):\n\t\treturn ('{} = name {} =weight {}=height {}=sound'.format(self.__name,self.__weight,self.height,self.__sound))\n\nanimal = Animal('hello',5,25,'bulla')\nprint (animal.get_name())\nprint (animal)\n\n\nclass Dog(Animal):\n\towner=\"\"\n\tdef __init__(self,name,owner,height,weight,sound):\n\t\tself.owner=owner\n\t\tsuper().__init__(name,height,weight,sound)\n\n\tdef get_name(self):\n\t\treturn self.owner\n\n#dog=Dog('jacky','panshul','50','25','bark')\n\n\n","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"636070995","text":"import time\nfrom time import strftime, sleep\nimport subprocess\nimport digitalio\nimport board\nfrom PIL import Image, ImageDraw, ImageFont\nimport adafruit_rgb_display.st7789 as st7789\nimport qwiic_joystick\n\n\n# Configuration for CS and DC pins (these are FeatherWing defaults on M0/M4):\ncs_pin = digitalio.DigitalInOut(board.CE0)\ndc_pin = digitalio.DigitalInOut(board.D25)\nreset_pin = None\n\n# Config for display baudrate (default max is 24mhz):\nBAUDRATE = 64000000\n\n# Setup SPI bus using hardware SPI:\nspi = board.SPI()\n\n# Create the ST7789 display:\ndisp = st7789.ST7789(\n spi,\n cs=cs_pin,\n dc=dc_pin,\n rst=reset_pin,\n baudrate=BAUDRATE,\n width=135,\n height=240,\n x_offset=53,\n y_offset=40,\n)\n\n# Create blank image for drawing.\n# Make sure to create image with mode 'RGB' for full color.\nheight = disp.width # we swap height/width to rotate it to landscape!\nwidth = disp.height\nimage = Image.new(\"RGB\", (width, height))\nrotation = 90\n\n# Get drawing object to draw on image.\ndraw = ImageDraw.Draw(image)\n\n# Draw a black filled box to clear the image.\ndraw.rectangle((0, 0, width, height), outline=0, fill=(0, 0, 0))\ndisp.image(image, rotation)\n# Draw some shapes.\n# First define some constants to allow easy resizing of shapes.\npadding = -2\ntop = padding\nbottom = height - padding\n# Move left to right keeping track of the current x position for drawing shapes.\nx = 0\n\n# Alternatively load a TTF font. Make sure the .ttf font file is in the\n# same directory as the python script!\n# Some other nice fonts to try: http://www.dafont.com/bitmap.php\nfont = ImageFont.truetype(\"/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf\", 18)\n\n# Turn on the backlight\nbacklight = digitalio.DigitalInOut(board.D22)\nbacklight.switch_to_output()\nbacklight.value = True\n\n# Enable the buttons for demo\nbuttonA = digitalio.DigitalInOut(board.D23)\nbuttonB = digitalio.DigitalInOut(board.D24)\n\n# Define default coordination\nx, y = 3, 5\n\n# Enable the joystick\njoystick = qwiic_joystick.QwiicJoystick()\n\nif joystick.is_connected() == False:\n print(\"The Qwiic Joystick device isn't connected to the system. Please check your connection\", \\\n file=sys.stderr)\n\njoystick.begin()\n\nprint(\"Initialized. Firmware Version: %s\" % joystick.get_version())\n\nwhile True:\n # Draw a black filled box to clear the image.\n draw.rectangle((0, 0, width, height), outline=0, fill=0)\n\n #TODO: fill in here. You should be able to look in cli_clock.py and stats.py \n\n # Get current time hour \n hour = int(strftime(\"%H\"))\n\n print(\"X: %d, Y: %d, Button: %d\" % ( \\\n joystick.get_horizontal(), \\\n joystick.get_vertical(), \\\n joystick.get_button()))\n\n time.sleep(0.1)\n\n # Button Push\n while joystick.get_button() == 0:\n ma_img = Image.open(\"mawen.jpeg\")\n ma_img = ma_img.resize((240, 135), Image.BICUBIC)\n\n disp.image(ma_img, rotation)\n time.sleep(0.1)\n # Left\n while 500 < joystick.get_horizontal() <= 600 and joystick.get_vertical() == 0:\n ma_img = Image.open(\"1.png\")\n ma_img = ma_img.resize((240, 135), Image.BICUBIC)\n \n disp.image(ma_img, rotation)\n time.sleep(0.1)\n # Upper Left\n while joystick.get_horizontal() == 1023 and 0 <= joystick.get_vertical() < 100:\n ma_img = Image.open(\"2.png\")\n ma_img = ma_img.resize((240, 135), Image.BICUBIC)\n \n disp.image(ma_img, rotation)\n time.sleep(0.1)\n # Up\n while joystick.get_horizontal() == 1023 and 500 <= joystick.get_vertical() < 600:\n ma_img = Image.open(\"3.png\")\n ma_img = ma_img.resize((240, 135), Image.BICUBIC)\n \n disp.image(ma_img, rotation)\n time.sleep(0.1)\n # Upper Right\n while joystick.get_horizontal() == 1023 and 1000 <= joystick.get_vertical() < 1024:\n ma_img = Image.open(\"4.png\")\n ma_img = ma_img.resize((240, 135), Image.BICUBIC)\n \n disp.image(ma_img, rotation)\n time.sleep(0.1)\n # Right\n while 500 <= joystick.get_horizontal() < 600 and 0 <= joystick.get_vertical() == 1023:\n ma_img = Image.open(\"5.png\")\n ma_img = ma_img.resize((240, 135), Image.BICUBIC)\n \n disp.image(ma_img, rotation)\n time.sleep(0.1)\n # Lower Right\n while joystick.get_horizontal() == 0 and 1000 <= joystick.get_vertical() < 1024:\n ma_img = Image.open(\"6.png\")\n ma_img = ma_img.resize((240, 135), Image.BICUBIC)\n \n disp.image(ma_img, rotation)\n time.sleep(0.1)\n # Down\n while joystick.get_horizontal() == 0 and 500 <= joystick.get_vertical() < 600:\n ma_img = Image.open(\"7.png\")\n ma_img = ma_img.resize((240, 135), Image.BICUBIC)\n \n disp.image(ma_img, rotation)\n time.sleep(0.1)\n # Lower Left\n while 0 <=joystick.get_horizontal() < 100 and 0 <= joystick.get_vertical() < 100:\n ma_img = Image.open(\"8.png\")\n ma_img = ma_img.resize((240, 135), Image.BICUBIC)\n \n disp.image(ma_img, rotation)\n time.sleep(0.1)\n","sub_path":"Lab 2/screen_clock.py","file_name":"screen_clock.py","file_ext":"py","file_size_in_byte":5007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"113276895","text":"import pickle\r\nimport traceback\r\nfrom datetime import datetime\r\nfrom typing import List\r\n\r\nimport praw\r\nimport time\r\n\r\nfrom common_wordx import common_words\r\n\r\nr = praw.Reddit('TvC data collection')\r\ncomments_file = 'comments %s to %s.pck'\r\n\r\n\r\ndef ingest_submissions(submissions, subreddit, all_submissions):\r\n n = 0\r\n s = 0\r\n for submission in submissions:\r\n try:\r\n if submission.score < 25:\r\n continue\r\n print(\"next submission\")\r\n if hasattr(submission, '_comments'):\r\n date = datetime.utcfromtimestamp(submission.created_utc)\r\n print(date)\r\n current_submission = Submission(submission.title, submission.score, sub_id=submission.name)\r\n print('replacing more')\r\n submission.replace_more_comments(limit=2)\r\n print('flattening tree')\r\n comments = praw.helpers.flatten_tree(submission.comments)\r\n for comment in comments:\r\n n += 1\r\n if hasattr(comment, \"body\"):\r\n current_submission.add_comment(comment)\r\n\r\n all_submissions.add(current_submission)\r\n print(str(len(current_submission.comments)) + ' comments ')\r\n print(str(submission.score) + \" score\")\r\n if s % 100 == 0:\r\n all_submissions.dump(comments_file)\r\n s += 1\r\n except KeyboardInterrupt:\r\n all_submissions.dump(comments_file)\r\n raise\r\n except:\r\n traceback.print_exc()\r\n continue\r\n all_submissions.dump(comments_file)\r\n print(\"Done\")\r\n\r\n\r\ndef get_data(start_time: datetime, end_time: datetime, subreddit='politics'):\r\n # unix_start = 1462060800 5/1\r\n # unix_start = 1463674480 5/19\r\n # unix_start = 1470105157 8/2/16\r\n # unix_start = 1454284800 # 2/1/16\r\n\r\n unix_start = time.mktime(start_time.timetuple())\r\n unix_end = time.mktime(end_time.timetuple())\r\n\r\n # Latest 1470592542\r\n # unix_stop = 1462060800\r\n global comments_file\r\n\r\n comments_file = comments_file % (\r\n str(start_time), str(datetime.now()))\r\n comments_file = comments_file.replace(':', '.')\r\n submissions = praw.helpers.submissions_between(r, subreddit=subreddit, lowest_timestamp=unix_start,\r\n highest_timestamp=unix_end, newest_first=False )\r\n all_submissions = Submissions(subreddit)\r\n\r\n while True:\r\n\r\n try:\r\n ingest_submissions(submissions, subreddit, all_submissions)\r\n break\r\n except KeyboardInterrupt:\r\n all_submissions.dump(comments_file)\r\n raise\r\n except:\r\n try:\r\n next(submissions)\r\n except StopIteration:\r\n break\r\n traceback.print_exc()\r\n\r\n return comments_file\r\n\r\n\r\nclass Submissions(object):\r\n def __init__(self, subreddit):\r\n self.subreddit = subreddit\r\n self.submissions = [] # type: List[Submission]\r\n\r\n def add(self, sub):\r\n self.submissions.append(sub)\r\n\r\n def dump(self, filename):\r\n print(\"Writing \" + str(len(self.submissions)) + \" submissions to file\")\r\n with open(filename, 'wb') as file:\r\n pickle.dump(self, file)\r\n\r\n @staticmethod\r\n def load(filename):\r\n print(\"Reading submissions\")\r\n with open(filename, 'rb') as f:\r\n sub = pickle.load(f) # type: Submissions\r\n return sub\r\n\r\n def __iter__(self):\r\n return self.submissions.__iter__()\r\n\r\n def __len__(self):\r\n return self.submissions.__len__()\r\n\r\n\r\nclass Submission(object):\r\n def __init__(self, title, karma, sub_id):\r\n self.sub_id = sub_id\r\n self.karma = karma\r\n self.title = title # type: str\r\n self.comments = [] # type: List[Submission.Comment]\r\n self.weight = 1\r\n\r\n @property\r\n def num_comments(self):\r\n return len(self.comments)\r\n\r\n def add_comment(self, comment):\r\n self.comments.append(self.Comment(comment))\r\n\r\n class Comment:\r\n def __init__(self, comment):\r\n self.body = comment.body # type: str\r\n self.utc_timestamp = datetime.utcfromtimestamp(comment.created_utc)\r\n self.karma = comment.score\r\n self.author = comment.author\r\n self.weight = 1\r\n\r\n\r\nif __name__ == '__main__':\r\n get_data()\r\n","sub_path":"data_collector.py","file_name":"data_collector.py","file_ext":"py","file_size_in_byte":4467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"487733310","text":"from django import forms\nfrom .models import Article\n\n\nclass ArticleModelForm(forms.ModelForm):\n\n title = forms.CharField(\n widget=forms.TextInput(\n attrs={\n \"placeholder\": \"Your title\",\n }\n )) #required is TRUE by default\n\n content = forms.CharField(\n required=False,\n widget=forms.Textarea(\n attrs={\n \"placeholder\": \"Define your products\",\n \"rows\": 20,\n \"cols\": 30\n }\n )\n )\n \n class Meta:\n model = Article\n fields = [\n 'title',\n 'content',\n 'active'\n ]\n\n def clean_title(self, *args, **kwargs):\n title = self.cleaned_data.get(\"title\")\n if \"küfür\" in title:\n raise forms.ValidationError(\"This is not a valid Title\")\n \n return title\n\n def clean_content(self, *args, **kwargs):\n content = self.cleaned_data.get(\"content\")\n if \"küfür\" in content:\n raise forms.ValidationError(\"This is not a valid content\")\n return content\n \n","sub_path":"blog/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"573101859","text":"import heapq\nimport sys\ninput = lambda :sys.stdin.readline().rstrip()\n\ndef INPUT(mode=int):\n return list(map(mode, input().split()))\n\ndef Dijkstra_heap(s, edge):\n # 始点sから各頂点への最短距離#始点sから各頂点への最短距離\n d = [10**20] * n\n used = [False] * n\n d[s] = 0\n used[s] = True\n edgelist = []\n for i, j in edge[s]:\n heapq.heappush(edgelist, i+j*(10**6))\n while len(edgelist):\n minedge = heapq.heappop(edgelist)\n v = minedge % (10**6)\n # まだ使われてない頂点の中から最小の距離のものを探す\n if used[v]: continue\n d[v] = minedge // (10**6)\n used[v] = True\n for e in edge[v]:\n if not used[e[0]]:\n heapq.heappush(edgelist, e[0]+(e[1]+d[v]) * (10**6))\n return d\n\nn, m = INPUT()\nAB = [INPUT() for _ in range(m)]\nedge = [[] for _ in range(n)]\nfor a, b in AB:\n a -= 1\n b -= 1\n edge[a].append((b, 1))\n edge[b].append((a, 1))\n\ndist = Dijkstra_heap(0, edge)\nif dist[-1] <= 2:\n print(\"POSSIBLE\")\nelse:\n print(\"IMPOSSIBLE\")","sub_path":"Python_codes/p03645/s993556337.py","file_name":"s993556337.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"62946650","text":"import collections\nimport re\n\n\"\"\"\nA tokenizer or scanner analyzes a string to categorize groups of characters.\nThis is a useful first step in writing a compiler or interpreter.\n\nThe text categories are specified with regular expressions.\nThe technique is to combine those into a single master regular\nexpression and to loop over successive matches\n\"\"\"\n\nToken = collections.namedtuple('Token', ['typ', 'value', 'line', 'column'])\n\n\ndef tokenize(code):\n keywords = {'IF', 'THEN', 'ENDIF', 'FOR', 'NEXT', 'GOSUB', 'RETURN'}\n token_specification = [\n ('NUMBER', r'\\d+(\\.\\d*)?'), # Integer or decimal number\n ('ASSIGN', r':='), # Assignment operator\n ('END', r';'), # Statement terminator\n ('ID', r'[A-Za-z]+'), # Identifiers\n ('OP', r'[+\\-*/]'), # Arithmetic operators\n ('NEWLINE', r'\\n'), # Line endings\n ('SKIP', r'[ \\t]+'), # Skip over spaces and tabs\n ('MISMATCH',r'.'), # Any other character\n ]\n tok_regex = '|'.join('(?P<%s>%s)' % pair for pair in token_specification)\n line_num = 1\n line_start = 0\n\n for mo in re.finditer(tok_regex, code):\n kind = mo.lastgroup\n value = mo.group(kind)\n\n if kind == 'NEWLINE':\n line_start = mo.end()\n line_num += 1\n elif kind == 'SKIP':\n pass\n elif kind == 'MISMATCH':\n raise RuntimeError(f'{value!r} unexpected on line {line_num}')\n else:\n if kind == 'ID' and value in keywords:\n kind = value\n column = mo.start() - line_start\n yield Token(kind, value, line_num, column)\n\nstatements = '''\n IF quantity THEN\n total := total + price * quantity;\n tax := price * 0.05;\n ENDIF;\n'''\n\nfor token in tokenize(statements):\n print(token)\n\n# Token(typ='IF', value='IF', line=2, column=4)\n# Token(typ='ID', value='quantity', line=2, column=7)\n# Token(typ='THEN', value='THEN', line=2, column=16)\n# Token(typ='ID', value='total', line=3, column=8)\n# Token(typ='ASSIGN', value=':=', line=3, column=14)\n# Token(typ='ID', value='total', line=3, column=17)\n# Token(typ='OP', value='+', line=3, column=23)\n# Token(typ='ID', value='price', line=3, column=25)\n# Token(typ='OP', value='*', line=3, column=31)\n# Token(typ='ID', value='quantity', line=3, column=33)\n# Token(typ='END', value=';', line=3, column=41)\n# Token(typ='ID', value='tax', line=4, column=8)\n# Token(typ='ASSIGN', value=':=', line=4, column=12)\n# Token(typ='ID', value='price', line=4, column=15)\n# Token(typ='OP', value='*', line=4, column=21)\n# Token(typ='NUMBER', value='0.05', line=4, column=23)\n# Token(typ='END', value=';', line=4, column=27)\n# Token(typ='ENDIF', value='ENDIF', line=5, column=4)\n# Token(typ='END', value=';', line=5, column=9)\n","sub_path":"stdlib/src/re-example-3.py","file_name":"re-example-3.py","file_ext":"py","file_size_in_byte":2845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"517552478","text":"import argparse\nimport json\nimport os\nimport requests\nimport socket\nfrom hashlib import blake2b\nfrom json.decoder import JSONDecodeError\nfrom operator import itemgetter\nfrom pathlib import Path\nfrom re import sub\n\nfrom pytezos import pytezos\nfrom base58 import b58decode_check, b58encode_check\nfrom nacl.signing import SigningKey\n\nACCOUNTS = json.loads(os.environ[\"ACCOUNTS\"])\nCHAIN_PARAMS = json.loads(os.environ[\"CHAIN_PARAMS\"])\nNODES = json.loads(os.environ[\"NODES\"])\n\nMY_POD_NAME = os.environ[\"MY_POD_NAME\"]\nMY_NODE_TYPE = MY_NODE = None\n# The chain initiator job does not have a MY_NODE_TYPE or MY_NODE. Only\n# statefulsets.\nif os.environ.get(\"MY_NODE_TYPE\"):\n MY_NODE_TYPE = os.environ[\"MY_NODE_TYPE\"]\n MY_NODE = NODES[MY_NODE_TYPE][MY_POD_NAME]\n\n\nALL_NODES = { **NODES.get(\"baking\", {}), **NODES.get(\"regular\", {}) }\nBAKING_NODES = NODES[\"baking\"]\nNETWORK_CONFIG = CHAIN_PARAMS[\"network\"]\n\nSHOULD_GENERATE_UNSAFE_DETERMINISTIC_DATA = CHAIN_PARAMS.get(\n \"should_generate_unsafe_deterministic_data\"\n)\n\n# If there are no genesis params, this is a public chain.\nTHIS_IS_A_PUBLIC_NET = True if not NETWORK_CONFIG.get(\"genesis\") else False\n\ndef main():\n all_accounts = ACCOUNTS\n\n if SHOULD_GENERATE_UNSAFE_DETERMINISTIC_DATA:\n fill_in_missing_genesis_block()\n all_accounts = fill_in_missing_baker_accounts()\n fill_in_missing_keys(all_accounts)\n\n import_keys(all_accounts)\n\n if MY_POD_NAME in BAKING_NODES:\n # If this node is a baker, it must have an account with a secret key.\n verify_this_bakers_account(all_accounts)\n\n main_parser = argparse.ArgumentParser()\n main_parser.add_argument(\n \"--generate-parameters-json\",\n action=\"store_true\",\n help=\"generate parameters.json\",\n )\n main_parser.add_argument(\n \"--generate-config-json\", action=\"store_true\", help=\"generate config.json\"\n )\n main_args = main_parser.parse_args()\n\n # Create parameters.json\n if main_args.generate_parameters_json:\n print(\"Starting parameters.json file generation\")\n bootstrap_accounts_pubkey_hashes = get_bootstrap_accounts_pubkey_hashes(\n all_accounts\n )\n baker_bootstrap_accounts_pubkeys = get_bootstrap_baker_accounts_pubkeys(\n all_accounts\n )\n protocol_parameters = create_protocol_parameters_json(\n bootstrap_accounts_pubkey_hashes, baker_bootstrap_accounts_pubkeys\n )\n\n protocol_params_json = json.dumps(protocol_parameters, indent=2)\n with open(\"/etc/tezos/parameters.json\", \"w\") as json_file:\n print(protocol_params_json, file=json_file)\n\n # Create config.json\n if main_args.generate_config_json:\n print(\"\\nStarting config.json file generation\")\n bootstrap_peers = CHAIN_PARAMS.get(\"bootstrap_peers\", [])\n\n my_zerotier_ip = None\n zerotier_data_file_path = Path(\"/var/tezos/zerotier_data.json\")\n if is_chain_running_on_zerotier_net(zerotier_data_file_path):\n my_zerotier_ip = get_my_pods_zerotier_ip(zerotier_data_file_path)\n if bootstrap_peers == []:\n bootstrap_peers.extend(get_zerotier_bootstrap_peer_ips())\n\n if THIS_IS_A_PUBLIC_NET:\n with open(\"/etc/tezos/data/config.json\", \"r\") as f:\n bootstrap_peers.extend(json.load(f)[\"p2p\"][\"bootstrap-peers\"])\n else:\n local_bootstrap_peers = []\n for name, settings in ALL_NODES.items():\n print(\" -- is \" + name + \" a bootstrap peer?\\n\");\n my_pod_fqdn_with_port = f\"{socket.getfqdn()}:9732\"\n if (\n settings.get(\"is_bootstrap_node\", False)\n and name not in my_pod_fqdn_with_port\n ):\n # Construct the FBN of the bootstrap node for all node's bootstrap_peers\n print(\" -- YES!\\n\")\n bootstrap_peer_domain = sub(r\"-\\d+$\", \"\", name)\n bootstrap_peer_fbn_with_port = (\n f\"{name}.{bootstrap_peer_domain}:9732\"\n )\n local_bootstrap_peers.append(bootstrap_peer_fbn_with_port)\n bootstrap_peers.extend(local_bootstrap_peers)\n\n if not bootstrap_peers and not MY_NODE.get(\"is_bootstrap_node\", False):\n raise Exception(\n \"ERROR: No bootstrap peers found for this non-bootstrap node\"\n )\n\n config_json = json.dumps(\n create_node_config_json(\n bootstrap_peers,\n my_zerotier_ip,\n ),\n indent=2,\n )\n print(\"Generated config.json :\")\n print(config_json)\n with open(\"/etc/tezos/config.json\", \"w\") as json_file:\n print(config_json, file=json_file)\n\n\n# If NETWORK_CONFIG[\"genesis\"][\"block\"] hasn't been specified, we generate a\n# deterministic one.\ndef fill_in_missing_genesis_block():\n print(\"\\nEnsure that we have genesis_block\")\n genesis_config = NETWORK_CONFIG[\"genesis\"]\n genesis_block_placeholder = \"YOUR_GENESIS_BLOCK_HASH_HERE\"\n\n if (\n genesis_config.get(\"block\", genesis_block_placeholder)\n == genesis_block_placeholder\n ):\n print(\"Deterministically generating missing genesis_block\")\n seed = \"foo\"\n gbk = blake2b(seed.encode(), digest_size=32).digest()\n gbk_b58 = b58encode_check(b\"\\x01\\x34\" + gbk).decode(\"utf-8\")\n genesis_config[\"block\"] = gbk_b58\n\n\n# Secret and public keys are matches and need be processed together. Neither key\n# must be specified, as later code will fill in the details if they are not.\n#\n# We create any missing accounts that are refered to by a node at\n# BAKING_NODES to ensure that all named accounts exist.\ndef fill_in_missing_baker_accounts():\n print(\"\\nFilling in any missing baker accounts...\")\n new_accounts = {}\n for baker_name, baker_values in BAKING_NODES.items():\n baker_account_name = baker_values.get(\"bake_using_account\")\n\n if not baker_account_name or baker_account_name not in ACCOUNTS:\n new_baker_account_name = None\n if not baker_account_name:\n print(f\"A new account named {baker_name} will be created\")\n new_baker_account_name = baker_name\n else:\n print(\n f\"Specified account named {baker_account_name} is missing and will be created\"\n )\n new_baker_account_name = baker_account_name\n\n new_accounts[new_baker_account_name] = {\n \"bootstrap_balance\": CHAIN_PARAMS[\"default_bootstrap_mutez\"],\n \"is_bootstrap_baker_account\": True,\n }\n # Add to the baker the account name it will use to bake\n baker_values[\"bake_using_account\"] = new_baker_account_name\n\n return {**new_accounts, **ACCOUNTS}\n\n\n# Verify that the current baker has a baker account with secret key\ndef verify_this_bakers_account(accounts):\n account_using_to_bake = MY_NODE.get(\"bake_using_account\")\n if not account_using_to_bake:\n raise Exception(f\"ERROR: No account specified for baker {MY_POD_NAME}\")\n\n account = accounts.get(account_using_to_bake)\n if not account:\n raise Exception(\n f\"ERROR: No account named {account_using_to_bake} found for baker {MY_POD_NAME}\"\n )\n\n if account.get(\"type\") != \"secret\" or not account.get(\"key\"):\n raise Exception(\n \"ERROR: Either a secret key was not provided or the key type not specified\"\n f\", for account {account_using_to_bake} for baker {MY_POD_NAME}\"\n )\n\n\n#\n# import_keys() creates three files in /var/tezos/client which specify\n# the keys for each of the accounts: secret_keys, public_keys, and\n# public_key_hashs.\n#\n# We iterate over fill_in_missing_baker_accounts() which ensures that we\n# have a full set of accounts for which to write keys.\n#\n# If the account has a private key specified, we parse it and use it to\n# derive the public key and its hash. If a public key is also specified,\n# we check to ensure that it matches the secret key. If neither a secret\n# nor a public key are specified, then we derive one from a hash of\n# the account name and the gensis_block (which may be generated above.)\n#\n# Both specified and generated keys are stable for the same _values.yaml\n# files. The specified keys for obvious reasons. The generated keys\n# are stable because we take care not to use any information that is not\n# specified in the _values.yaml file in the seed used to generate them.\n\nedsk = b\"\\x0d\\x0f\\x3a\\x07\"\n\ndef fill_in_missing_keys(all_accounts):\n print(\"\\nFill in missing keys\")\n\n for account_name, account_values in all_accounts.items():\n account_key_type = account_values.get(\"type\")\n account_key = account_values.get(\"key\")\n\n if account_key == None and account_key_type != None:\n raise Exception(f\"ERROR: {account_name} specifies \" +\n f\"type {account_key_type} without \" +\n f\"a key\")\n\n if account_key == None:\n print(f\" Deriving secret key for account \" +\n f\"{account_name} from genesis_block\")\n seed = account_name + \":\" + NETWORK_CONFIG[\"genesis\"][\"block\"]\n sk = blake2b(seed.encode(), digest_size=32).digest()\n sk_b58 = b58encode_check(edsk + sk).decode(\"utf-8\")\n account_values[\"key\"] = sk_b58\n account_values[\"type\"] = \"secret\"\n\n\ndef import_keys(all_accounts):\n print(\"\\nImporting keys\")\n tezdir = \"/var/tezos/client\"\n secret_keys = []\n public_keys = []\n public_key_hashs = []\n\n for account_name, account_values in all_accounts.items():\n print(\"\\n Importing keys for account: \" + account_name)\n account_key_type = account_values.get(\"type\")\n account_key = account_values.get(\"key\")\n\n if account_key == None:\n raise Exception(f\"{account_name} defined w/o a key\")\n\n key = pytezos.key.from_encoded_key(account_key)\n try:\n key.secret_key()\n except ValueError:\n if account_key_type == \"secret\":\n raise ValueError(account_name + \"'s key marked as \" +\n \"secret, but it is public\")\n else:\n if account_key_type == \"public\":\n raise ValueError(account_name + \"'s key marked as \" +\n \"public, but it is secret\")\n\n try:\n sk_b58 = key.secret_key()\n print(\" Appending secret key\")\n secret_keys.append({\"name\": account_name,\n \"value\": \"unencrypted:\" + sk_b58})\n except ValueError:\n pass\n\n pk_b58 = key.public_key()\n print(f\" Appending public key: {pk_b58}\")\n public_keys.append(\n {\n \"name\": account_name,\n \"value\": {\"locator\": \"unencrypted:\" + pk_b58, \"key\": pk_b58},\n }\n )\n\n pkh_b58 = key.public_key_hash()\n print(f\" Appending public key hash: {pkh_b58}\")\n public_key_hashs.append({\"name\": account_name, \"value\": pkh_b58})\n\n # XXXrcd: fix this print!\n\n print(f\" Account key type: {account_values.get('type')}\")\n print(\n f\" Account bootstrap balance: \" +\n f\"{account_values.get('bootstrap_balance')}\"\n )\n print(\n f\" Is account a bootstrap baker: \" +\n f\"{account_values.get('is_bootstrap_baker_account', False)}\"\n )\n\n print(\"\\n Writing \" + tezdir + \"/secret_keys\")\n json.dump(secret_keys, open(tezdir + \"/secret_keys\", \"w\"), indent=4)\n print(\" Writing \" + tezdir + \"/public_keys\")\n json.dump(public_keys, open(tezdir + \"/public_keys\", \"w\"), indent=4)\n print(\" Writing \" + tezdir + \"/public_key_hashs\")\n json.dump(public_key_hashs, open(tezdir + \"/public_key_hashs\", \"w\"), indent=4)\n\n\ndef get_bootstrap_accounts(accounts, keys_list, is_getting_accounts_for_bakers):\n keys = {}\n for key in keys_list:\n key_name = key[\"name\"]\n bootstrap_balance = accounts[key_name].get(\"bootstrap_balance\", \"0\")\n # Don't add accounts with 0 tez to parameters.json\n if bootstrap_balance == \"0\":\n continue\n\n # If we are handling pubkeys for baker accounts\n if is_getting_accounts_for_bakers and accounts[key_name].get(\n \"is_bootstrap_baker_account\", False\n ):\n keys[key_name] = {\n \"key\": key[\"value\"][\"key\"],\n \"bootstrap_balance\": bootstrap_balance,\n }\n elif ( # We are handling pubkey hashes for regular accounts\n not is_getting_accounts_for_bakers\n and not accounts[key_name].get(\"is_bootstrap_baker_account\", True)\n ):\n keys[key_name] = {\n \"key\": key[\"value\"],\n \"bootstrap_balance\": bootstrap_balance,\n }\n\n return keys\n\n\n# Get baking account's pubkeys for parameters.json bootstrap_accounts\ndef get_bootstrap_baker_accounts_pubkeys(accounts):\n with open(\"/var/tezos/client/public_keys\", \"r\") as f:\n pubkey_list = json.load(f)\n return get_bootstrap_accounts(\n accounts, pubkey_list, is_getting_accounts_for_bakers=True\n )\n\n\n# Get non-baking account's pubkey hashes for parameters.json bootstrap_accounts\ndef get_bootstrap_accounts_pubkey_hashes(accounts):\n with open(\"/var/tezos/client/public_key_hashs\", \"r\") as f:\n pubkey_hash_list = json.load(f)\n return get_bootstrap_accounts(\n accounts, pubkey_hash_list, is_getting_accounts_for_bakers=False\n )\n\n\ndef get_genesis_accounts_pubkey_and_balance(accounts):\n pubkey_and_balance_pairs = []\n\n for account_values in accounts.values():\n pubkey_and_balance_pairs.append(\n [account_values[\"key\"], account_values[\"bootstrap_balance\"]]\n )\n\n return pubkey_and_balance_pairs\n\n\n#\n# commitments and bootstrap_accounts are not part of\n# `CHAIN_PARAMS[\"protocol_parameters\"]`. The commitment size for Florence was\n# too large to load from Helm to k8s. So we are mounting a file containing them.\n# bootstrap accounts always needs massaging so they are passed as arguments.\ndef create_protocol_parameters_json(bootstrap_accounts, bootstrap_baker_accounts):\n \"\"\" Create the protocol's parameters.json file \"\"\"\n\n accounts = {**bootstrap_accounts, **bootstrap_baker_accounts}\n pubkeys_with_balances = get_genesis_accounts_pubkey_and_balance(accounts)\n\n protocol_activation = CHAIN_PARAMS[\"protocol_activation\"]\n protocol_params = protocol_activation[\"protocol_parameters\"]\n protocol_params[\"bootstrap_accounts\"] = pubkeys_with_balances\n\n print(json.dumps(protocol_activation, indent=4))\n\n # genesis contracts and commitments are downloaded from a http location (like a bucket)\n # they are typically too big to be passed directly to helm\n if protocol_activation.get(\"bootstrap_contract_urls\"):\n protocol_params[\"bootstrap_contracts\"] = []\n for url in protocol_activation[\"bootstrap_contract_urls\"]:\n print(f\"Injecting bootstrap contract from {url}\")\n protocol_params[\"bootstrap_contracts\"].append(requests.get(url).json())\n\n if protocol_activation.get(\"commitments_url\"):\n print(f\"Injecting commitments (faucet account precursors) from {protocol_activation['commitments_url']}\")\n protocol_params[\"commitments\"] = requests.get(protocol_activation[\"commitments_url\"]).json()\n\n return protocol_params\n\n\ndef is_chain_running_on_zerotier_net(file):\n return file.is_file()\n\n\ndef get_my_pods_zerotier_ip(zerotier_data_file_path):\n with open(zerotier_data_file_path, \"r\") as f:\n return json.load(f)[0][\"assignedAddresses\"][0].split(\"/\")[0]\n\n\ndef get_zerotier_bootstrap_peer_ips():\n with open(\"/var/tezos/zerotier_network_members.json\", \"r\") as f:\n network_members = json.load(f)\n return [\n n[\"config\"][\"ipAssignments\"][0]\n for n in network_members\n if \"ipAssignments\" in n[\"config\"]\n and n[\"name\"] == f\"{CHAIN_PARAMS['network']['chain_name']}_bootstrap\"\n ]\n\n\ndef get_genesis_pubkey():\n with open(\"/var/tezos/client/public_keys\", \"r\") as f:\n pubkeys = json.load(f)\n genesis_pubkey = None\n for _, pubkey in enumerate(pubkeys):\n if pubkey[\"name\"] == NETWORK_CONFIG[\"activation_account_name\"]:\n genesis_pubkey = pubkey[\"value\"][\"key\"]\n break\n if not genesis_pubkey:\n raise Exception(\"ERROR: Couldn't find the genesis_pubkey\")\n return genesis_pubkey\n\n\ndef create_node_config_json(\n bootstrap_peers,\n net_addr=None,\n):\n \"\"\" Create the node's config.json file \"\"\"\n\n node_config = {\n \"data-dir\": \"/var/tezos/node/data\",\n \"rpc\": {\n \"listen-addrs\": [f\"{os.getenv('MY_POD_IP')}:8732\", \"127.0.0.1:8732\"],\n },\n \"p2p\": {\n \"bootstrap-peers\": bootstrap_peers,\n \"listen-addr\": (net_addr + \":9732\" if net_addr else \"[::]:9732\"),\n },\n \"shell\": MY_NODE.get(\"config\", {}).get(\"shell\", {}),\n # \"log\": {\"level\": \"debug\"},\n }\n\n if THIS_IS_A_PUBLIC_NET:\n node_config[\"network\"] = NETWORK_CONFIG[\"chain_name\"]\n else:\n if CHAIN_PARAMS.get(\"expected-proof-of-work\") != None:\n node_config[\"p2p\"][\"expected-proof-of-work\"] = CHAIN_PARAMS[\n \"expected-proof-of-work\"\n ]\n\n node_config[\"network\"] = NETWORK_CONFIG\n node_config[\"network\"][\"sandboxed_chain_name\"] = \"SANDBOXED_TEZOS\"\n node_config[\"network\"][\"default_bootstrap_peers\"] = []\n node_config[\"network\"][\"genesis_parameters\"] = {\n \"values\": {\"genesis_pubkey\": get_genesis_pubkey()} }\n node_config[\"network\"].pop(\"activation_account_name\")\n\n return node_config\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"utils/config-generator.py","file_name":"config-generator.py","file_ext":"py","file_size_in_byte":17963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"480100724","text":"import os\nimport sys\nimport rtconfig\n\nif os.getenv('RTT_ROOT'):\n RTT_ROOT = os.getenv('RTT_ROOT')\nelse:\n RTT_ROOT = os.path.normpath(os.getcwd() + '/../..')\n\nsys.path = sys.path + [os.path.join(RTT_ROOT, 'tools')]\nfrom building import *\n\nTARGET = 'rtthread-stm32f4xx.' + rtconfig.TARGET_EXT\n\nenv = Environment(tools = ['mingw'],\n\tAS = rtconfig.AS, ASFLAGS = rtconfig.AFLAGS,\n\tCC = rtconfig.CC, CCFLAGS = rtconfig.CFLAGS,\n\tAR = rtconfig.AR, ARFLAGS = '-rc',\n\tLINK = rtconfig.LINK, LINKFLAGS = rtconfig.LFLAGS)\n\nif rtconfig.PLATFORM == 'armcc':\n env[\"TEMPFILE\"] = SCons.Platform.TempFileMunge\n # env[\"LINKCOM\"] = \"${TEMPFILE('$LINK -o $TARGET $SOURCES')}\"\n env[\"LINKCOM\"] = \"$LINK -o $TARGET $LINKFLAGS ${TEMPFILE('--via $SOURCES')}\"\n env[\"TEMPFILEPREFIX\"] = ' ' # arm tool chain\n\nExport('RTT_ROOT')\nExport('rtconfig')\n\n# prepare building environment\n#RTT_RTGUI = r'C:\\work\\rt-thread\\RTGUI_git\\components\\rtgui'\nRTT_RTGUI = os.getenv('RTT_RTGUI')\nif RTT_RTGUI:\n\t# if GUI dir is set to other place, don't use the one in RTT_ROOT\n objs = PrepareBuilding(env, RTT_ROOT, has_libcpu=False, remove_components=['rtgui'])\n objs += SConscript(os.path.join(RTT_RTGUI, 'SConscript'),\n variant_dir='build/components/rtgui',\n duplicate=0)\nelse:\n objs = PrepareBuilding(env, RTT_ROOT, has_libcpu=False)\n\n# build program\nprogram = env.Program(TARGET, objs)\n\n# end building\nEndBuilding(TARGET, program)\n","sub_path":"software/realtouch/SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"56478441","text":"number = 1\r\nwhile number <11:\r\n print(number)\r\n number += 1\r\n\r\nprint(' ')\r\nbalance = 1000\r\nrate = 1.02\r\nyears = 0\r\nwhile balance <5000:\r\n balance *= rate\r\n years += 1\r\nprint(\"It Takes \" + str(years) + \" years to reach $5,000 if you have a balance of $1,000 and an annual rate of 1.01%.\")\r\n\r\nprint(' ')\r\nfor i in [1,1,2,3,4,5,6,7,8,9,10]:\r\n print(i)\r\n\r\nprint(' ')\r\nfor name in [\"Jane\", \"John\", \"Matt\", \"George\"]:\r\n print(name)\r\n\r\n(' ')\r\nfor i in range (1,11):\r\n print(i)","sub_path":"Code/Python/Python Files/loops.py","file_name":"loops.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"186932179","text":"load(\"@bazel_tools//tools/cpp:cc_toolchain_config_lib.bzl\", \"tool_path\")\n\ndef _avr_toolchain_impl(ctx):\n bin_ = ctx.attr.bin\n lib_avr = ctx.attr.lib_avr\n lib_gcc_avr = ctx.attr.lib_gcc_avr\n # bin_ = ctx.file._bin.path\n # lib_avr = ctx.file._lib_avr.path\n # lib_gcc_avr = ctx.file._lib_gcc_avr.path\n tool_paths = [\n tool_path(\n name = \"gcc\",\n path = bin_+\"/avr-gcc\",\n # path = \"../\"+bin_+\"/avr-gcc\",\n ),\n tool_path(\n name = \"ld\",\n path = bin_+\"/avr-ld\",\n # path = \"../\"+bin_+\"/avr-ld\",\n ),\n tool_path(\n name = \"ar\",\n path = bin_+\"/avr-ar\",\n # path = \"../\"+bin_+\"/avr-ar\",\n ),\n tool_path(\n name = \"as\",\n path = bin_+\"/avr-as\",\n # path = \"../\"+bin_+\"/avr-as\",\n ),\n tool_path(\n name = \"cpp\",\n path = bin_+\"/avr-cpp\",\n # path = \"../\"+bin_+\"/avr-cpp\",\n ),\n tool_path(\n name = \"gcov\",\n path = bin_+\"/avr-gcov\",\n # path = \"../\"+bin_+\"/avr-gcov\",\n ),\n tool_path(\n name = \"nm\",\n path = bin_+\"/avr-gcc-nm\",\n # path = \"../\"+bin_+\"/avr-gcc-nm\",\n ),\n tool_path(\n name = \"objcopy\",\n path = bin_+\"/avr-objcopy\",\n # path = \"../\"+bin_+\"/avr-objcopy\",\n ),\n tool_path(\n name = \"objdump\",\n path = bin_+\"/avr-objdump\",\n # path = \"../\"+bin_+\"/avr-objdump\",\n ),\n tool_path(\n name = \"size\",\n path = bin_+\"/avr-size\",\n # path = \"../\"+bin_+\"/avr-size\",\n ),\n tool_path(\n name = \"strip\",\n path = bin_+\"/avr-strip\",\n # path = \"../\"+bin_+\"/avr-strip\",\n ),\n ]\n return cc_common.create_cc_toolchain_config_info(\n ctx = ctx,\n toolchain_identifier = \"avr-toolchain\",\n host_system_name = \"x86_64-pc-linux-gnu\",\n target_system_name = \"avr\",\n target_cpu = \"avr\",\n target_libc = \"avr\",\n compiler = \"avr-gcc\",\n abi_version = \"avr\",\n abi_libc_version = \"avr\",\n tool_paths = tool_paths,\n cxx_builtin_include_directories = [\n lib_gcc_avr+\"/include\",\n lib_gcc_avr+\"/include-fixed\",\n lib_avr+\"/include\",\n ],\n builtin_sysroot = \"\",\n )\n\ncc_toolchain_config = rule(\n implementation = _avr_toolchain_impl,\n attrs = {\n \"bin\" : attr.string(default=\"/usr/bin\"),\n \"lib_avr\" : attr.string(default=\"/usr/lib/avr\"),\n \"lib_gcc_avr\" : attr.string(default=\"/usr/lib/gcc/avr/5.4.0\"),\n # \"_bin\": attr.label(\n # allow_single_file=True,\n # cfg=\"host\",\n # default=Label(\"@avr_tools//:bin_folder\")\n # ),\n # \"_lib_avr\": attr.label(\n # allow_single_file=True,\n # cfg=\"host\",\n # default=Label(\"@avr_tools//:lib_avr_folder\")\n # ),\n # \"_lib_gcc_avr\": attr.label(\n # allow_single_file=True,\n # cfg=\"host\",\n # default=Label(\"@avr_tools//:lib_gcc_avr_folder\")\n # ),\n },\n provides = [CcToolchainConfigInfo],\n)\n","sub_path":"crosscompilation/toolchain/cc_toolchain_config.bzl","file_name":"cc_toolchain_config.bzl","file_ext":"bzl","file_size_in_byte":3317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"50951527","text":"import math\nfrom typing import List\n\nimport matplotlib.animation as animation\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom .ObjectDetectionListener import DetectedObject\nfrom .ObjectDetectionListener import IObjectDetectionListener\nfrom .PoseUpdater import Pose\nfrom .PositionListener import IPositionListener\nfrom .Robot import Robot\nfrom .Robot import WHEELS_RAD\nfrom .RobotMonitor import RobotMonitor\n\n\nclass DynamicPlot(IPositionListener, IObjectDetectionListener):\n def __init__(self):\n self.pos_x = []\n self.pos_y = []\n self.detected = []\n\n self.lastX = 0\n self.lastY = 0\n self.lastAngle = 0\n\n def newPosition(self, pose: Pose):\n self.pos_x.append(pose.x)\n self.pos_y.append(pose.y)\n self.lastX, self.lastY = pose.x, pose.y\n self.lastAngle = pose.orientation\n\n def objectDetected(self, detectedObjs: List[DetectedObject]):\n for d in detectedObjs:\n self.detected.append(d)\n\n def abs_pos_x(self, d):\n return self.lastX + ((WHEELS_RAD + d.dist) * math.cos(self.lastAngle + d.angle))\n\n def abs_pos_y(self, d):\n return self.lastY + ((WHEELS_RAD + d.dist) * math.sin(self.lastAngle + d.angle))\n\n\ndef updateRobotAndObjects(num, d: DynamicPlot, robotGraph, linesList):\n robotGraph.set_xdata(d.pos_x)\n robotGraph.set_ydata(d.pos_y)\n\n for detected in d.detected:\n hl = linesList[detected.sensorIndex]\n hl.set_xdata(np.append(hl.get_xdata(), d.abs_pos_x(detected)))\n hl.set_ydata(np.append(hl.get_ydata(), d.abs_pos_y(detected)))\n\n d.detected.clear()\n\n return robotGraph, (*linesList),\n\n\ndef plotRobotAndObjects(monitor: RobotMonitor, intervalMs=500):\n dp = DynamicPlot()\n monitor.subscribeChangePosition(dp)\n monitor.subscribeToFrontObjectDetection(dp)\n\n fig1 = plt.figure()\n linesList = [None] * 8\n\n robotLine, = plt.plot([], [], 'ro', label='robot')\n robotLine.set_markersize(1)\n\n for i in range(0, 8):\n l, = plt.plot([], [], 'o', label='sensor' + (str(i + 1)))\n l.set_markersize(1)\n linesList[i] = l\n l.set_color('C' + str(i))\n\n plt.xlim(-10, 10)\n plt.ylim(-10, 10)\n plt.xlabel('x')\n plt.ylabel('y')\n plt.title('Robot and Objects detected by Sensors')\n leg = plt.legend(bbox_to_anchor=(1.1, 1.05))\n\n for l in leg.get_lines():\n l.set_marker('.')\n l.set_markersize(5)\n\n line_ani = animation.FuncAnimation(fig1, updateRobotAndObjects, None, fargs=(dp, robotLine, linesList),\n interval=intervalMs, blit=True, repeat=False)\n\n # To save the animation, use the command: line_ani.save('lines.mp4')\n plt.show()\n\n\ndef update_line(num, robot: Robot, robotLine, gtLine):\n robotLine.set_xdata(np.append(robotLine.get_xdata(), robot.pose.x))\n robotLine.set_ydata(np.append(robotLine.get_ydata(), robot.pose.y))\n\n gtLine.set_xdata(np.append(gtLine.get_xdata(), robot.gtPose.x))\n gtLine.set_ydata(np.append(gtLine.get_ydata(), robot.gtPose.y))\n\n return robotLine, gtLine,\n\n\ndef plotRobot(robot: Robot, invert, intervalMs=500):\n fig1 = plt.figure()\n\n robotLine, = plt.plot([], [], 'ro', label='Estimated')\n robotLine.set_markersize(1)\n gtLine, = plt.plot([], [], 'bo', label='Ground Truth')\n gtLine.set_markersize(1)\n\n plt.xlim(-10, 10)\n plt.ylim(-10, 10)\n plt.xlabel('x')\n plt.ylabel('y')\n plt.title('Estimated Pose vs Ground Truth')\n if invert:\n plt.gca().invert_xaxis()\n plt.gca().invert_yaxis()\n\n leg = plt.legend(bbox_to_anchor=(1.1, 1.05))\n for l in leg.get_lines():\n l.set_marker('.')\n l.set_markersize(5)\n\n line_ani = animation.FuncAnimation(fig1, update_line, None, fargs=(robot, robotLine, gtLine),\n interval=intervalMs, blit=True, repeat=False)\n\n # To save the animation, use the command: line_ani.save('lines.mp4')\n plt.show()\n","sub_path":"andabb/plotrobot.py","file_name":"plotrobot.py","file_ext":"py","file_size_in_byte":3949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"418485679","text":"import datetime\n\nclass tugas:\n \"\"\"CLASS TUGAS\"\"\"\n def __init__(self, nama, desc='', dl= None):\n self.nama = nama\n self.desc = desc\n if dl[4] == dl[7] == '/' and dl[13] == ':':\n self.deadline = dl #'yyyy-mm-dd hh:mm'\n elif dl == None:\n self.deadline = dl\n else:\n print('Tanggal tidak terdeteksi, deadline dianggap tidak ada.')\n self.deadline = None\n self.score = {}\n\n def getSisaWaktu(self):\n curt = datetime.datetime.now\n # tahun\n a = int(self.deadline[:4]) - curt.year\n if a > 0:\n print(a, 'Tahun', end=' ')\n # bulan\n a = int(self.deadline[5:7]) - curt.month\n if a > 0:\n print(a, 'Bulan', end=' ')\n # hari\n a = int(self.deadline[8:10]) - curt.day\n if a > 0:\n print(a, 'Hari', end=' ')\n # jam\n a = int(self.deadline[11:13]) - curt.hour\n if a > 0:\n print(a, 'Jam', end=' ')\n # menit\n a = int(self.deadline[14:]) - curt.minute\n if a > 0:\n print(a, 'Menit', end=' ')\n print()\n\n def printDataTugas(self):\n print(\"\"\"\n Nama tugas : %s\n Deskripsi : %s\n Deadline : %s\n \"\"\"%(self.nama,self.desc,self.deadline))\n\n def setSubmitStatus(self, murid):\n self.score[murid.getID()] = 0\n\n def getSubmitStatus(self, murid):\n return murid.getID() in self.score.keys()\n\n def setNilai(self, murid, nilai):\n if not self.getSubmitStatus(murid):\n print (murid.getName(), \"belum mengumpulkan tugas!\")\n self.score[murid.getID()] = nilai\n\n def getNilai(self, murid):\n try:\n return self.score[murid.getID()]\n except:\n print (murid.getName(), \"belum mengumpulkan tugas!\")","sub_path":"src/tugas.py","file_name":"tugas.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"78447914","text":"import pygame\nimport random\nimport sys\nfrom pygame.locals import *\n\npygame.init()\nclock = pygame.time.Clock()\nsizeoknox = 600\nsizeoknoy = 600\nfps = 10\nxs = [300, 300, 300, 300, 300]\nys = [300, 280, 260, 240, 220]\n\n\ndef collision(x1, x2, y1, y2, w1, w2, h1, h2):\n if x1 + w1 > x2 and x1 < x2 + w2 and y1 + h1 > y2 and y1 < y2 + h2:\n return True\n else:\n return False\n\n\ndef die(screen, score):\n global clock\n die = True\n while die:\n for direct in pygame.event.get():\n if direct.type == QUIT:\n sys.exit(0)\n elif direct.type == KEYDOWN:\n if direct.key == pygame.K_m:\n menu_loop()\n elif direct.key == pygame.K_SPACE:\n game_loop(screen)\n\n screen.fill((0, 255, 255))\n pygame.display.set_caption('Snake.Died_loop')\n\n f = pygame.font.SysFont('New Times Roman', 30)\n\n one = f.render('Конец игры. Ваш счёт : ' + str(score), True, (0, 0, 0))\n two = f.render('Для начала новой игры - нажмите Пробел', True, (0, 0, 0))\n three = f.render('Для выхода в меню - нажмине M', True, (0, 0, 0))\n screen.blit(one, (15, 300))\n screen.blit(two, (15, 320))\n screen.blit(three, (15, 340))\n\n pygame.display.update()\n clock.tick(10)\n\n\ndef menu_loop():\n global clock, screen\n menu_running = True\n\n while menu_running:\n for direct in pygame.event.get():\n if direct.type == QUIT:\n sys.exit(0)\n elif direct.type == KEYDOWN:\n if direct.key == pygame.K_1:\n game_loop(screen)\n elif direct.key == pygame.K_0:\n pygame.quit()\n sys.exit()\n\n screen = pygame.display.set_mode((sizeoknox, sizeoknoy))\n screen.fill((0, 255, 255))\n pygame.display.set_caption('Snake.Menu_loop')\n\n f = pygame.font.SysFont('New Times Roman', 30)\n\n one = f.render('Для начала новой игры нажмите 1', True, (0, 0, 0))\n two = f.render('Для выхода из игры - нажмите 0', True, (0, 0, 0))\n three = f.render('Version : demo v0.1', True, (0, 0, 0))\n\n screen.blit(one, (60, 300))\n screen.blit(two, (60, 320))\n screen.blit(three, (60, 340))\n\n pygame.display.update()\n clock.tick(10)\n\n\ndef pause_loop(screen):\n global clock\n pause_running = True\n\n while pause_running:\n for direct in pygame.event.get():\n if direct.type == QUIT:\n sys.exit(0)\n elif direct.type == KEYDOWN:\n if direct.key == pygame.K_ESCAPE:\n pause_running = False\n elif direct.key == pygame.K_m:\n pause_running = False\n menu_loop()\n\n # screen = pygame.display.set_mode((sizeoknox, sizeoknoy))\n screen.fill((0, 255, 255))\n pygame.display.set_caption('Snake.Pause_loop')\n\n f = pygame.font.SysFont('New Times Roman', 30)\n\n one = f.render('Pause loop', True, (0, 0, 0))\n two = f.render('Для продолжения - нажмите ESC', True, (0, 0, 0))\n three = f.render('Для выхода в меню - нажмите M', True, (0, 0, 0))\n four = f.render('Version : demo v0.1', True, (0, 0, 0))\n\n screen.blit(one, (60, 300))\n screen.blit(two, (60, 320))\n screen.blit(three, (60, 340))\n screen.blit(four, (60, 3600))\n\n\n pygame.display.update()\n clock.tick(1)\n\n\nappleimage = pygame.Surface((20, 20))\nappleimage.fill((0, 255, 0))\n\nimg = pygame.Surface((20, 20))\nimg=pygame.image.load(\"saturn_family1.jpg\").convert()\n\n\n\n\ndef game_loop(screen):\n global sizeoknox, sizeoknoy, clock, fps, dirs, score\n # Следующие переменные для обнуления игры и начать заного\n score = 0\n xs = [300, 300, 300, 300, 300]\n ys = [300, 280, 260, 240, 220]\n fps = 10\n dirs = 0\n #\n\n applepos = (random.randint(30, 600 - 10), random.randint(30, 600 - 10))\n screen = pygame.display.set_mode((sizeoknox, sizeoknoy))\n\n pygame.display.set_caption('Snake')\n\n\n\n while True:\n clock.tick(fps)\n screen.fill((255, 255, 255))\n\n for direct in pygame.event.get():\n if direct.type == QUIT:\n sys.exit(0)\n elif direct.type == KEYDOWN:\n if direct.key == K_UP and dirs != 0:\n dirs = 2\n elif direct.key == K_DOWN and dirs != 2:\n dirs = 0\n elif direct.key == K_LEFT and dirs != 1:\n dirs = 3\n elif direct.key == K_RIGHT and dirs != 3:\n dirs = 1\n elif direct.key == pygame.K_ESCAPE:\n pause_loop(screen)\n\n i = len(xs) - 1\n while i >= 2:\n if collision(xs[0], xs[i], ys[0], ys[i], 20, 20, 20, 20):\n die(screen, score)\n i -= 1\n if collision(xs[0], applepos[0], ys[0], applepos[1], 20, 10, 20, 10):\n score += 1\n fps += 0.5\n xs.append(700)\n ys.append(700)\n applepos = (random.randint(0, sizeoknox), random.randint(0, sizeoknoy))\n if xs[0] < 0 or xs[0] > sizeoknox or ys[0] < 0 or ys[0] > sizeoknoy:\n die(screen, score)\n i = len(xs) - 1\n while i >= 1:\n xs[i] = xs[i - 1]\n ys[i] = ys[i - 1]\n i -= 1\n if dirs == 0:\n ys[0] += 20\n elif dirs == 1:\n xs[0] += 20\n elif dirs == 2:\n ys[0] -= 20\n elif dirs == 3:\n xs[0] -= 20\n for i in range(0, len(xs)):\n screen.blit(img, (xs[i], ys[i]))\n screen.blit(appleimage, applepos)\n f = pygame.font.SysFont('Arial', 20)\n t = f.render('Твой счёт : ' + str(score), True, (0, 0, 0))\n screen.blit(t, (10, 10))\n pygame.display.update()\n\n\ndef __main__():\n menu_loop()\n\n\nif __name__ == '__main__':\n __main__()\n","sub_path":"snake_3ball/snake_test.py","file_name":"snake_test.py","file_ext":"py","file_size_in_byte":6192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"416671477","text":"# -*- coding: utf-8 -*-\nclass Matriz(object):\n def __init__(self, N):\n self.dim = N\n del N\n self.matriz = []\n for i in range(1, self.dim+1):\n linha = []\n for j in range(1, self.dim+1):\n linha.append(int(input()))\n del j\n self.matriz.append(linha)\n del i\n \n def transposta(self):\n self.transposta = []\n for i in range(0, self.dim):\n linha = []\n for j in range(0, self.dim):\n linha.append(self.matriz[j][i])\n \n del j\n \n self.transposta.append(linha)\n del i\n \n def pqM(self):\n Matriz.transposta(self)\n \n somas_l, somas_c, somas = [], [], []\n \n for i in self.matriz:\n somas_l.append(sum(i))\n somas.append(sum(i))\n del i\n \n for i in self.transposta:\n somas_c.append(sum(i))\n somas.append(sum(i))\n del i\n \n for i in somas:\n cont = 0\n for j in somas:\n if i == j:\n cont += 1\n \n del j\n if cont == (2*self.dim - 2):\n val_ref = i\n del i\n break\n \n for i in range(0, len(somas_l)):\n if somas_l[i] != val_ref:\n ind_i = i\n del i\n break\n \n for i in range(0, len(somas_c)):\n if somas_c[i] != val_ref:\n ind_j = i\n del i\n break\n \n colocou_no_lugar = self.matriz[ind_i][ind_j]\n \n original = val_ref - (sum(self.matriz[ind_i]) - colocou_no_lugar)\n \n print(original)\n print(colocou_no_lugar)\n \n def mostraMatriz(self):\n for i in self.matriz:\n print(i)\n del i\n \n print(\"\\n\")\n \n Matriz.transposta(self)\n for i in self.transposta:\n print(i)\n del i\n \n#################################### - PROGRAMA PRINCIPAL - ###########################################\nN = int(input())\nA = Matriz(N)\nA.mostraMatriz()\nA.pqM()\n","sub_path":"moodledata/vpl_data/455/usersdata/292/106066/submittedfiles/programa.py","file_name":"programa.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"596435299","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# reminisce.py\n\nimport sys\nimport os\nfrom PyQt4.QtGui import *\nfrom PyQt4 import QtGui, QtCore\nfrom asyncio.locks import Event\n\n\nclass MainWindow(QtGui.QMainWindow):\n def __init__(self, parent = None):\n super(MainWindow, self).__init__(parent)\n \n self.filename = \"\"\n \n QtGui.qApp.installEventFilter(self)\n QtCore.QTimer.singleShot(0, self.menuBar().hide)\n\n self.initUI()\n \n \n # UI \n def initUI(self):\n \n self.showFullScreen()\n \n self.setWindowTitle(\"Reminisce\")\n \n self.text = QtGui.QTextEdit(self)\n self.setCentralWidget(self.text)\n\n self.initMenuBar()\n \n \n # EventFilter: Autohide Menubar \n def eventFilter(self, source, event):\n \n #Show MenuBar\n if QtGui.qApp.activePopupWidget() is None:\n if event.type() == QtCore.QEvent.MouseMove:\n if self.menuBar().isHidden():\n rect = self.geometry() \n # Set Reactive Zone\n rect.setHeight(30)\n if rect.contains(event.globalPos()):\n self.menuBar().show()\n else:\n rect = QtCore.QRect(\n self.menuBar().mapToGlobal(QtCore.QPoint(0,0)),\n self.menuBar().size())\n if not rect.contains(event.globalPos()):\n self.menuBar().hide()\n elif event.type() == QtCore.QEvent.Leave and source is self:\n self.menuBar().hide()\n return QtGui.QMainWindow.eventFilter(self, source, event)\n \n # Spawn\n def new(self):\n \n spawn = MainWindow(self)\n spawn.show()\n \n \n # New File\n def newFile(self):\n self.text.clear()\n \n \n # Open file\n def openFile(self):\n filename = QtGui.QFileDialog.getOpenFileName(self, 'Open File', os.getenv('HOME'))\n f = open(filename, 'r')\n filedata = f.read()\n self.text.setText(filedata)\n f.close()\n \n \n # Save \n def save(self):\n \n # Only open dialog if no file name currently exists \n if not self.filename:\n self.filename = QtGui.QFileDialog.getSaveFileName(self, 'Save File')\n \n # Append extension if necessary\n if not self.filename.endswith(\".txt\"):\n self.filename +=\".txt\" \n \n # Store in html format (CHANGE LATER_\n with open(self.filename, \"wt\") as file:\n file.write(self.text.toHtml())\n\n # MenuBar \n def initMenuBar(self):\n \n menubar = self.menuBar()\n \n # Set menubar translucent background\n menubar.setStyleSheet(\"\"\"\n menuBar\n {\n background-color: blue; \n }\n \"\"\")\n \n \n # Add buttons\n file = menubar.addMenu(\"File\")\n edit = menubar.addMenu(\"Edit\")\n view = menubar.addMenu(\"View\") \n \n # Open File\n openFile = QtGui.QAction(\"&Open File\", self)\n openFile.setShortcut(\"Ctrl+O\")\n openFile.setStatusTip('Open File')\n openFile.triggered.connect(self.openFile) \n \n # Close\n closeAction = QtGui.QAction('Close', self)\n closeAction.setShortcut('Ctrl+Q')\n closeAction.setStatusTip('Close')\n closeAction.triggered.connect(self.close)\n \n # New File\n newAction = QtGui.QAction('New', self)\n newAction.setShortcut('Ctrl+N')\n newAction.setStatusTip('New')\n newAction.triggered.connect(self.newFile)\n \n # Add Buttons to File \n file.addAction(newAction)\n file.addAction(openFile)\n file.addAction(closeAction)\n\n \ndef main():\n \n app = QtGui.QApplication(sys.argv)\n \n main = MainWindow()\n main.show()\n \n sys.exit(app.exec_())\n \n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"reminisce/reminisce/reminisce.py","file_name":"reminisce.py","file_ext":"py","file_size_in_byte":4081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"483401243","text":"# file_path = 'D:\\source\\\\test\\\\python\\\\learn/module.py'\n\n# f = open(file_path,'r',encoding='utf-8')\n# s = f.read()\n# print(s)\n\n# try:\n# f= open(file_path,'r',encoding='utf-8')\n# print(f.read())\n# finally:\n# if f:\n# f.close()\n\n# with open(file_path,'r',encoding='utf-8') as f:\n# print(f.read())\n\n# with open(file_path,encoding='utf-8',errors='ignore') as f:\n# for s in f.readlines():\n# print(s.strip())\n\n\n# from io import StringIO,BytesIO\n# f = StringIO()\n# f.write('hello')\n# print(f.getvalue())\n\n# f = StringIO('hello')\n# f.write('word')\n# print(f.getvalue())\n\n# b = BytesIO()\n# b.write('中文'.encode('utf-8'))\n# print(b.getvalue())\n\n# b1= BytesIO(b'\\xe4\\xb8\\xad\\xe6\\x96\\x87')\n# print(b1.read())\n\nimport os\n\n# print(os.name,os.path,os.environ,os.pathsep,os.chdir,os.system,os.getcwd,sep='\\n\\n')\n# print(os.environ)\n# print(os.environ.get('path'))\n# print(os.environ.get('nnn','default')) ##不存在默认返回None\n\n# print(os.path.abspath('.'))\n# print(os.path.join('/','test.py'))\n\n# dirs = [x for x in os.listdir('.') if os.path.isdir(x)]\n# print('dirs',dirs)\n\n# files = [x for x in os.listdir('.') if os.path.isfile(x)]\n# print('files',files)\n\n\n# def get_all_dirs(path,data=[]):\n# dir_list = [x for x in os.listdir(path) if os.path.isdir(x)]\n# if not dir_list:\n# return data\n# data = data + dir_list\n# for d in dir_list:\n# get_all_dirs(d,data)\n# return data\n\n# ddd = get_all_dirs('.')\n# print(ddd)\n\n\n\n# for root,dirs,files in os.walk('./'):\n# for file in files:\n# print('dirs',dirs)\n# print(os.path.join(root,file))\n\ndef get_abspath(s):\n path = '.' # path可随指定路径不同而进行修改,在这表示当前路径\n for root, dir_names, file_names in os.walk(path):\n for file_name in file_names:\n if s in file_name:\n print(os.path.abspath(os.path.join(root, file_name)))\nget_abspath('test')\n","sub_path":"learn/IO.py","file_name":"IO.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"499377857","text":"def remove_str(str, n):\n new_str = \"\"\n # print(\"原始字符串为 : \" + test_str)\n for i in range(0, len(str)):\n if (i != n):\n new_str = new_str + str[i]\n return new_str\n\n\n# replace:\ndef remove_str1(str, n):\n new_str = str.replace(str[n], \"\", 1)\n return new_str\n\n\n# 切片\ndef remove_str2(str, n):\n return str[0:n] + str[n + 1:]\n\n\ntest_str = \"Runoob\"\n# remove_str(test_str,2)\nprint(remove_str(test_str, 2))\nprint(remove_str1(test_str, 2))\nprint(remove_str2(test_str, 2))\n","sub_path":"实例/050 移除字符串中的指定位置字符.py","file_name":"050 移除字符串中的指定位置字符.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"633927627","text":"from typing import List\nclass Solution:\n def minPathSum(self, grid: List[List[int]]) -> int:\n if len(grid)==0:\n return 0\n n = len(grid)\n m = len(grid[0])\n prev = 0\n for i in range(n):\n grid[i][0] += prev\n prev = grid[i][0]\n prev =0\n for j in range(m):\n grid[0][j] += prev\n prev = grid[0][j]\n for i in range(1,n):\n for j in range(1,m):\n grid[i][j] += min(grid[i-1][j],grid[i][j-1])\n return grid[-1][-1]","sub_path":"leetcode/dp/minimum-path-sum.py","file_name":"minimum-path-sum.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"283201023","text":"#导入相应的模块\r\nimport pygame\r\nfrom pygame.locals import *\r\nimport time,random\r\n\r\nclass HeroPlane:\r\n\t'''玩家飞机(英雄)'''\r\n\tdef __init__(self,screen_temp):\r\n\t\tself.x = 200\r\n\t\tself.y = 400\r\n\t\tself.screen = screen_temp\r\n\t\tself.image = pygame.image.load(\"./images/me.png\")\r\n\t\tself.bullet_list = [] #用于存放玩家的子弹列表\r\n\r\n\tdef display(self):\r\n\t\t'''绘制玩家飞机'''\r\n\t\t#绘制子弹\r\n\t\tfor b in self.bullet_list:\r\n\t\t\tb.display()\r\n\t\t\tif b.move():\r\n\t\t\t\tself.bullet_list.remove(b)\r\n\t\tself.screen.blit(self.image,(self.x,self.y))\r\n\t\t\r\n\t\r\n\tdef move_left(self):\r\n\t\t'''左移动飞机'''\r\n\t\tself.x -= 5\r\n\t\tif self.x <= 0:\r\n\t\t\tself.x=0\r\n\r\n\tdef move_right(self):\r\n\t\t'''右移动飞机'''\r\n\t\tself.x += 5\r\n\t\tif self.x>=406:\r\n\t\t\tself.x=406\r\n\tdef fire(self):\r\n\t\tself.bullet_list.append(Bullet(self.screen,self.x,self.y))\r\n\t\tprint(len(self.bullet_list))\r\n\r\nclass Bullet:\r\n\t'''子弹类'''\r\n\tdef __init__(self,screen_temp,x,y):\r\n\t\tself.x = x+53\r\n\t\tself.y = y\r\n\t\tself.screen = screen_temp\r\n\t\tself.image = pygame.image.load(\"./images/pd.png\")\r\n\r\n\tdef display(self):\r\n\t\t'''绘制子弹'''\r\n\t\tself.screen.blit(self.image,(self.x,self.y))\r\n\t\r\n\tdef move(self):\r\n\t\tself.y -= 10\r\n\t\tif self.y <=-20:\r\n\t\t\treturn True\r\n\r\nclass EnemyPlane:\r\n\t'''敌机类'''\r\n\tdef __init__(self,screen_temp):\r\n\t\tself.x = random.choice(range(408))\r\n\t\tself.y = -75\r\n\t\tself.screen = screen_temp\r\n\t\tself.image = pygame.image.load(\"./images/e\"+str(random.choice(range(3)))+\".png\")\r\n\r\n\tdef display(self):\r\n\t\t'''绘制敌机'''\r\n\t\tself.screen.blit(self.image,(self.x,self.y))\r\n\t\r\n\tdef move(self,hero):\r\n\t\tself.y += 4\r\n\t\t#敌机出屏幕\r\n\t\tif self.y>568:\r\n\t\t\treturn True\r\n\r\n\t\t# 遍历所有子弹,并执行碰撞检测\r\n\t\tfor bo in hero.bullet_list:\r\n\t\t\tif bo.x>self.x+12 and bo.xself.y+20 and bo.y=-200:\r\n\t\t\tm = -968\r\n\r\n\t\t#绘制玩家飞机\r\n\t\thero.display()\r\n\t\t\r\n\t\t#执行键盘控制\r\n\t\tkey_control(hero)\r\n\r\n\t\t#随机绘制敌机\r\n\t\tif random.choice(range(50))==10:\r\n\t\t\tenemylist.append(EnemyPlane(screen))\r\n\t\t#遍历敌机并绘制移动\r\n\t\tfor em in enemylist:\r\n\t\t\tem.display()\r\n\t\t\tif em.move(hero):\r\n\t\t\t\tenemylist.remove(em)\r\n\r\n\t\t#更新显示\r\n\t\tpygame.display.update()\r\n\r\n\t\t#定时显示\r\n\t\ttime.sleep(0.04)\r\n\r\n\r\n#判断当前是否是主运行程序,并调用\r\nif __name__ == \"__main__\":\r\n\tmain()\r\n","sub_path":"2nd_week/plane_demo/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"376550622","text":"par = {}\nranking = {}\n\ndef init(l):\n for i in l:\n par[i] = i\n ranking[i] = 0\n\ndef find(x):\n if x==par[x]:\n return x\n par[x] = find(par[x])\n return par[x]\n\ndef same(x,y):\n if find(x)==find(y):\n return True\n return False\n\ndef unite(x,y):\n x = find(x)\n y = find(y)\n if(ranking[x]\n# License: MIT (LICENSE.md)\n# Copyright (c) 2021 Vitor Oriel\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n## https://github.com/NESCAU-UFLA/FuzzingTool\n\nfrom .parsers.CLIParser import CLIParser\nfrom .core.Fuzzer import Fuzzer\nfrom .core.Payloader import Payloader\nfrom .conn.Request import Request\nfrom .conn.Response import Response\nfrom .conn.RequestException import RequestException\nfrom .IO.OutputHandler import outputHandler as oh\nfrom .IO.FileHandler import fileHandler as fh\n\nimport time\n\nAPP_VERSION = {\n 'MAJOR_VERSION': 3,\n \"MINOR_VERSION\": 8,\n \"PATCH\": 1\n}\n\ndef version():\n global APP_VERSION\n version = (str(APP_VERSION['MAJOR_VERSION'])+\".\"+\n str(APP_VERSION['MINOR_VERSION'])+\".\"+\n str(APP_VERSION['PATCH']))\n return version\n\ndef banner():\n banner = (\"\\033[36m ____ _____ _\"+'\\n'+\n \"\\033[36m | __|_ _ ___ ___ _ ___ ___ |_ _|_ ___| | \\033[0mVersion \"+version()+'\\n'+\n \"\\033[36m | __| | |- _|- _|'| | . | | | . | . | |\"+'\\n'+\n \"\\033[36m |_| |___|___|___|_|_|_|_ | |_|___|___|_|\"+'\\n'+\n \"\\033[36m |___|\\033[0m\\n\\n\"\n \" [!] Disclaimer: We're not responsible for the misuse of this tool.\\n\"\n \" This project was created for educational purposes\\n\"\n \" and should not be used in environments without legal authorization.\\n\")\n return banner\n\nclass ApplicationManager:\n \"\"\"Class that handle with the entire application\n\n Attributes:\n fuzzer: The fuzzer object\n requester: The request object\n startedTime: The time when start the fuzzing test\n \"\"\"\n def __init__(self):\n \"\"\"Class constructor\"\"\"\n self.__fuzzer = None\n self.__requester = None\n self.__startedTime = 0\n\n def main(self, argv: list):\n \"\"\"The main function\n\n @type argv: list\n @param argv: The arguments given in the execution\n \"\"\"\n if len(argv) < 2:\n oh.print(banner())\n oh.errorBox(\"Invalid format! Use -h on 2nd parameter to show the help menu.\")\n if argv[1] == '-h' or argv[1] == '--help':\n oh.showHelpMenu()\n if argv[1] == '-v' or argv[1] == '--version':\n exit(\"FuzzingTool v\"+version())\n oh.print(banner())\n self.init(argv)\n self.prepare()\n self.start()\n\n def init(self, argv: list):\n \"\"\"The initialization function\n\n @type argv: list\n @param argv: The arguments given in the execution\n \"\"\"\n cliParser = CLIParser(argv)\n url, method, requestData, httpHeader = cliParser.getDefaultRequest()\n cliParser.getWordlistFile()\n wordlist, dictSizeof = fh.getWordlistContentAndLength()\n self.__fuzzer = Fuzzer(\n Request(url, method, requestData, httpHeader),\n Payloader(wordlist),\n dictSizeof\n )\n del wordlist\n self.__requester = self.__fuzzer.getRequester()\n oh.infoBox(f\"Set target: {self.__requester.getUrl()}\")\n oh.infoBox(f\"Set request method: {method}\")\n if requestData:\n oh.infoBox(f\"Set request data: {str(requestData)}\")\n cliParser.checkCookie(self.__requester)\n cliParser.checkProxy(self.__requester)\n cliParser.checkProxies(self.__requester)\n cliParser.checkTimeout(self.__requester)\n cliParser.checkUnfollowRedirects(self.__requester)\n cliParser.checkDelay(self.__fuzzer)\n cliParser.checkVerboseMode(self.__fuzzer)\n cliParser.checkNumThreads(self.__fuzzer)\n cliParser.checkPrefixAndSuffix(self.__fuzzer.getPayloader())\n cliParser.checkCase(self.__fuzzer.getPayloader())\n cliParser.checkReporter(self.__requester)\n self.__checkScanners()\n cliParser.checkScanner(self.__fuzzer)\n cliParser.checkMatcher(self.__fuzzer.getScanner())\n\n def prepare(self):\n \"\"\"Prepares the application\"\"\"\n try:\n oh.setPrintContentMode(self.__fuzzer.getScanner(), self.__fuzzer.isVerboseMode())\n self.__checkConnectionAndRedirections()\n self.__checkProxies()\n if not self.__requester.isUrlFuzzing() and not self.__fuzzer.getScanner().comparatorIsSet():\n self.__checkDataComparator()\n self.__checkIgnoreErrors()\n except KeyboardInterrupt:\n exit('')\n\n def start(self):\n \"\"\"Starts the application\"\"\"\n oh.infoBox(f\"Starting test on '{self.__requester.getUrl()}' ...\")\n self.__startedTime = time.time()\n try:\n self.__fuzzer.start()\n except KeyboardInterrupt:\n self.__fuzzer.stop()\n oh.abortBox(\"Test aborted\")\n self.__showFooter()\n else:\n if not self.__fuzzer.isVerboseMode():\n oh.print(\"\")\n self.__showFooter()\n oh.infoBox(\"Test completed\")\n\n def __checkScanners(self):\n \"\"\"Check what's the scanners that will be used on Fuzzer\"\"\"\n if self.__requester.isUrlFuzzing():\n if self.__requester.isSubdomainFuzzing():\n from .core.scanners.default.SubdomainScanner import SubdomainScanner\n scanner = SubdomainScanner()\n else:\n from .core.scanners.default.PathScanner import PathScanner\n scanner = PathScanner()\n else:\n from .core.scanners.default.DataScanner import DataScanner\n scanner = DataScanner()\n self.__fuzzer.setScanner(scanner)\n\n def __checkConnectionAndRedirections(self):\n \"\"\"Test the connection and redirection to target\"\"\"\n # If we'll not fuzzing the url paths, so\n # test the redirections before start the fuzzing\n if self.__requester.isUrlFuzzing():\n oh.infoBox(\"Test mode set to URL Fuzzing\")\n oh.infoBox(\"Testing connection ...\")\n try:\n self.__requester.testConnection()\n except RequestException as e:\n if not oh.askYesNo('warning', f\"Connection to {str(e)} failed. Continue anyway?\"):\n exit()\n else:\n oh.infoBox(\"Connection status: OK\")\n else:\n oh.infoBox(\"Testing connection ...\")\n try:\n self.__requester.testConnection()\n except RequestException as e:\n oh.errorBox(f\"Failed to connect to {str(e)}\")\n oh.infoBox(\"Connection status: OK\")\n oh.infoBox(\"Testing redirections ...\")\n try:\n if self.__requester.hasRedirection():\n if not oh.askYesNo('warning', \"You was redirected to another page. Continue?\"):\n exit()\n else:\n oh.infoBox(\"No redirections\")\n except RequestException as e:\n oh.errorBox(str(e))\n \n def __checkProxies(self):\n \"\"\"Check for connection status using a proxy, if a proxy is given\"\"\"\n if self.__requester.getProxy():\n oh.infoBox(\"Testing proxy ...\")\n try:\n self.__requester.testConnection(proxy=True)\n oh.infoBox(f\"Proxy {self.__requester.getProxy()['http']} worked\")\n except RequestException:\n oh.warningBox(f\"Proxy {proxy['http']} not worked\")\n self.__requester.setProxy({})\n elif self.__requester.getProxyList():\n proxyList = []\n oh.infoBox(\"Testing proxies ...\")\n for proxy in self.__requester.getProxyList():\n self.__requester.setProxy(proxy)\n proxyList.append(proxy)\n try:\n self.__requester.testConnection(proxy=True)\n proxyList.append(proxy)\n oh.infoBox(f\"Proxy {proxy['http']} worked\")\n except RequestException:\n oh.warningBox(f\"Proxy {proxy['http']} not worked\")\n self.__requester.setProxy({})\n self.__requester.setProxyList(proxyList)\n\n def __checkIgnoreErrors(self):\n \"\"\"Check if the user wants to ignore the errors during the tests\"\"\"\n if self.__requester.isUrlFuzzing():\n self.__fuzzer.setIgnoreErrors(True)\n fh.openLog()\n else:\n if oh.askYesNo('info', \"Do you want to ignore errors during the tests, and save them into a log file?\"):\n self.__fuzzer.setIgnoreErrors(True)\n fh.openLog()\n\n def __checkDataComparator(self):\n \"\"\"Check if the user wants to insert custom data comparator to validate the responses\"\"\"\n comparator = {\n 'Length': None,\n 'Time': None,\n }\n payload = ' '\n oh.infoBox(f\"Making first request with '{payload}' as payload ...\")\n try:\n response = self.__requester.request(payload)\n except RequestException as e:\n oh.errorBox(f\"{str(e)}\")\n firstResult = self.__fuzzer.getScanner().getResult(\n response=response\n )\n oh.printContent(firstResult, False)\n defaultLength = int(firstResult['Length'])+300\n if oh.askYesNo('info', \"Do you want to exclude responses based on custom length?\"):\n length = oh.askData(f\"Insert the length (default {defaultLength})\")\n if not length:\n length = defaultLength\n comparator['Length'] = length\n defaultTime = firstResult['Time Taken']+5.0\n if oh.askYesNo('info', \"Do you want to exclude responses based on custom time?\"):\n time = oh.askData(f\"Insert the time (in seconds, default {defaultTime} seconds)\")\n if not time:\n time = defaultTime\n comparator['Time'] = time\n self.__fuzzer.getScanner().setComparator(comparator)\n\n def __showFooter(self):\n \"\"\"Show the footer content of the software, after maked the fuzzing\"\"\"\n if self.__startedTime:\n oh.infoBox(f\"Time taken: {float('%.2f'%(time.time() - self.__startedTime))} seconds\")\n output = self.__fuzzer.getOutput()\n if output:\n oh.infoBox(f\"Found {len(output)} possible payload(s)\")\n if self.__fuzzer.isVerboseMode():\n for content in output:\n oh.printContent(content, True)\n fh.writeReport(output)\n else:\n oh.infoBox(\"No vulnerable entries was found\")","sub_path":"src/modules/ApplicationManager.py","file_name":"ApplicationManager.py","file_ext":"py","file_size_in_byte":11602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"173906474","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport os\nimport sys\nimport tensorflow as tf\nimport wfdb\nfrom utils import qrs_detect, comp_cosEn, save_dict\nimport metrics\n\ndef load_data(sample_path):\n sig, fields = wfdb.rdsamp(sample_path)\n label = fields['comments']\n\n return sig, label\n\ndef challenge_entry(sample_path):\n \"\"\"\n Load and run model on test set.\n \"\"\"\n # Load data\n test_sample, label = load_data(sample_path)\n print(label)\n \n # Load pretrained model\n model_load = tf.keras.models.load_model(os.path.join(os.getcwd(), 'trained_model'), custom_objects={'mean_iou': metrics.mean_iou,\n 'dice_coefficient': metrics.dice_coefficient})\n# model_load.summary()\n \n # Predict test sample\n label_predict = model_load.predict(tf.convert_to_tensor(np.expand_dims(test_sample, axis=0))).squeeze().round()\n label_predict[np.arange(0, len(label_predict)), np.argmax(label_predict, axis=1)] = 1\n label_predict[label_predict<1] = 0 \n \n # Get the endpoints from predicted label\n end_points = []\n diff_persistentAF = np.diff(np.pad(label_predict[:, 1], (1, 1), 'constant', constant_values=(0, 0)))\n ind_persistentAF_start = np.where(diff_persistentAF==1)[0]\n ind_persistentAF_end = np.where(diff_persistentAF==-1)[0]\n for start, end in zip(ind_persistentAF_start, ind_persistentAF_end):\n end_points.append([int(start), int(end-1)])\n \n diff_paroAF = np.diff(np.pad(label_predict[:, 2], (1, 1), 'constant', constant_values=(0, 0)))\n ind_paroAF_start = np.where(diff_paroAF==1)[0]\n ind_paroAF_end = np.where(diff_paroAF==-1)[0]\n for start, end in zip(ind_paroAF_start, ind_paroAF_end):\n end_points.append([int(start), int(end-1)])\n \n pred_dict = {'predict_endpoints': end_points}\n \n return pred_dict\n\n\nif __name__ == '__main__':\n DATA_PATH = sys.argv[1]\n RESULT_PATH = sys.argv[2]\n if not os.path.exists(RESULT_PATH):\n os.makedirs(RESULT_PATH)\n \n test_set = open(os.path.join(DATA_PATH, 'RECORDS'), 'r').read().splitlines()\n for i, sample in enumerate(test_set):\n print(sample)\n sample_path = os.path.join(DATA_PATH, sample)\n pred_dict = challenge_entry(sample_path)\n print(pred_dict)\n\n save_dict(os.path.join(RESULT_PATH, sample+'.json'), pred_dict)\n\n","sub_path":"entry_2021.py","file_name":"entry_2021.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"644950591","text":"#\n# Copyright 2021 QuantRocket LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom datetime import time\nfrom .exchange_calendar_cmes import CMESExchangeCalendar\n\n\nclass CBOTExchangeCalendar(CMESExchangeCalendar):\n \"\"\"\n Exchange calendar for the Chicago Board of Trade (CBOT).\n CBOT is owned and operated by CME Group and follows the CME\n holiday schedule.\n\n Open Time: 7:00 PM, America/Chicago\n Close Time: 1:20 PM, America/Chicago\n\n Trading break: 7:45 - 8:30 AM\n \"\"\"\n\n name = \"CBOT\"\n open_times = ((None, time(19, 1)),)\n close_times = ((None, time(13, 20)),)\n\n break_start_times = (\n (None, time(7, 45)),\n )\n break_end_times = (\n (None, time(8, 31)),\n )\n","sub_path":"trading_calendars/exchange_calendar_cbot.py","file_name":"exchange_calendar_cbot.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"235506978","text":"import sys\nimport os\nimport errno\nimport time\n\nimport logging\nlog = logging.getLogger(__name__)\n\n# global configuration\nimport vprimer.glv as glv\nimport vprimer.utils as utl\n\nclass AlleleSelect(object):\n\n def __init__(self, min_indel_len, max_indel_len):\n\n self.lines = list()\n self.var_types = list()\n\n self.top_smpl_list = list()\n self.gr_list = list()\n\n self.chrom = ''\n self.pos = 0\n self.alt_vc = 0\n self.vseq_ano = list()\n self.vseq_ano_str = ''\n self.len_ano = list()\n\n self.vseq_gno_str = \"\"\n\n # indel size threshold\n self.min_indel_len = min_indel_len\n self.max_indel_len = max_indel_len\n\n # ano_Group_alleleIdx\n self.g0x0 = -1\n self.g0x1 = -1\n self.g1x0 = -1\n self.g1x1 = -1\n\n # some list\n self.ano__gno_aix = list() # from record\n\n self.rgt__gno_aix = list()\n self.gti__gno_aix = list()\n self.ali__gno_aix = list()\n self.len__gno_aix = list()\n\n self.altype_gno = list()\n self.alint_gno = list()\n\n self.segr_ptn = glv.segr_ptn_NOP\n self.diff_allele_set = list()\n self.diff_allele_cnt = 0\n\n\n def select_diff_allele(self, record, top_smpl_list, gr_list):\n\n self.top_smpl_list = top_smpl_list # ['Akenohoshi', 'Akitakomachi']\n self.gr_list = gr_list # ['gAkenohoshi', 'gAkitakomachi']\n\n # get basic record info & ano__gno_aix\n self._get_record_info(record)\n\n # get basic sample's allele combination info\n self._get_allele_info()\n\n # get allele combination for marker among four alleles in two sample\n self._get_segregation_pattern()\n\n # for print\n for no, diff_allele in enumerate(self.diff_allele_set, 1):\n\n var_type, line = self._construct_var_line(no, diff_allele)\n self.lines.append(line)\n self.var_types.append(var_type)\n\n\n def _construct_var_line(self, no, diff_allele):\n\n line_list = list()\n\n # g0\n g0_ano = diff_allele[0]\n g0_len = self.len_ano[g0_ano]\n g0_name = self.gr_list[0]\n\n # g1\n g1_ano = diff_allele[1]\n g1_len = self.len_ano[g1_ano]\n g1_name = self.gr_list[1]\n\n # gts_segr_lens\n gts_str, lens_str = self._get_gts_lens_str()\n gts_segr_lens = \"{},{},{}\".format(\n gts_str, self.segr_ptn, lens_str)\n\n # var_type, longest_gno, longest_len, diff_len\n var_type, longest_gno, longest_len, diff_len = \\\n self._get_variant_type(g0_ano, g1_ano)\n\n targ_grp = \"{},{}\".format(g0_name, g1_name)\n targ_ano = \"{},{}\".format(g0_ano, g1_ano)\n set_n = \"{}/{}\".format(no, self.diff_allele_cnt)\n len_g0g1_dif_long = \"{},{},{},{}\".format(\n g0_len, g1_len, diff_len, longest_gno)\n\n vseq_gno_str = \"{},{}\".format(\n self.vseq_ano[g0_ano],\n self.vseq_ano[g1_ano])\n\n # ---------------------------------------\n # Synchronize with outlist.py\n line_list += [self.chrom]\n line_list += [self.pos]\n line_list += [targ_grp]\n line_list += [targ_ano]\n line_list += [vseq_gno_str]\n line_list += [gts_segr_lens]\n line_list += [var_type]\n # ---------------------\n line_list += [set_n]\n line_list += [len_g0g1_dif_long]\n line_list += [self.vseq_ano_str.upper()]\n\n return var_type, '\\t'.join(map(str, line_list))\n\n\n def _get_gts_lens_str(self):\n\n # gt_segr_len genotypes and segregation type and allele len\n gtsegr_gtlist = list()\n gtsegr_lenlist = list()\n\n for gno in range(2):\n # for gt_segr_len\n gtsegr_gtlist.append(\"\".join(map(str, self.gti__gno_aix[gno])))\n gtsegr_lenlist.append(\".\".join(map(str, self.len__gno_aix[gno])))\n\n gts_str = \"/\".join(map(str, gtsegr_gtlist))\n lens_str = \"/\".join(map(str, gtsegr_lenlist))\n\n return gts_str, lens_str\n\n\n def _get_variant_type(self, g0_ano, g1_ano):\n\n # glv.SNP\n # glv.MNV\n # glv.MIND\n # glv.INDEL\n # glv.OutOfRange\n\n var_type = glv.OutOfRange \n\n g0_len = self.len_ano[g0_ano]\n g1_len = self.len_ano[g1_ano]\n\n longest_gno = glv.SAME_LENGTH\n longest_len = g0_len\n\n if g0_len < g1_len:\n longest_gno = 1\n longest_len = g1_len\n elif g0_len > g1_len:\n longest_gno = 0\n longest_len = g0_len\n\n diff_len = abs(g0_len - g1_len)\n\n if diff_len == 0 and g0_len == 1:\n var_type = glv.SNP\n\n elif diff_len == 0 and longest_len > 1:\n self.var_type = glv.MNV\n\n elif diff_len < self.min_indel_len:\n var_type = glv.MIND\n\n elif self.min_indel_len <= diff_len and \\\n diff_len <= self.max_indel_len:\n var_type = glv.INDEL\n\n else:\n var_type = glv.OutOfRange\n\n return var_type, longest_gno, longest_len, diff_len\n \n\n def _get_segregation_pattern(self):\n\n # -- already removed\n # segr_ptn_NOT_EXIST_ALLELE = 'not_exist_allele'\n # segr_ptn_SAME_HOMO = 'same_homo'\n # segr_ptn_SAME_HETERO = 'same_hetero'\n\n # -- check now\n # segr_ptn_HOMO_HOMO = 'hoho'\n # segr_ptn_HOMO_HETERO_SHARE = 'hohe_s'\n # segr_ptn_HOMO_HETERO_NOT_SHARE = 'hohe_n'\n # segr_ptn_HETERO_HETERO_SHARE = 'hehe_s'\n # segr_ptn_HETERO_HETERO_NOT_SHARE = 'hehe_n'\n\n # self.g0x0, self.g0x1, self.g1x0, self.g1x1\n #log.debug(\"g0=[{}, {}], g1=[{}, {}]\".format(\n # self.g0x0, self.g0x1, self.g1x0, self.g1x1))\n\n # set_n\n # 1.homo vs homo\n # hoho 1 00/11 0,1\n # 2.homo vs hetero\n # hohe_s 1 00/01 0,1\n # hohe_n 2 00/12 0,1 0,2\n # 3.hetero vs hetero\n # hehe_s 3 01/02 0,2 1,0 1,2\n # hehe_n 4 01/23 0,2 0,3 1,2 1,3\n\n # 1.homo vs homo\n if utl.is_homo_homo(self.g0x0, self.g0x1, self.g1x0, self.g1x1):\n # AA,BB\n # hoho 1 00/11 0,1\n self.segr_ptn = glv.segr_ptn_HOMO_HOMO\n # [ AA0, BB0]\n self.diff_allele_set.append([self.g0x0, self.g1x0])\n\n # 2.homo vs hetero\n elif utl.is_homo_hetero(self.g0x0, self.g0x1, self.g1x0, self.g1x1):\n\n if utl.is_share(self.g0x0, self.g0x1, self.g1x0, self.g1x1):\n # AA,AB\n # hohe_s 1 00/01 0,1\n self.segr_ptn = glv.segr_ptn_HOMO_HETERO_SHARE\n\n if self.altype_gno[0] == glv.AL_HETERO:\n # AB,AA(BB)\n if self.g0x0 != self.g1x0:\n # AB,BB [ AB0, BB0]\n self.diff_allele_set.append([self.g0x0, self.g1x0])\n else:\n # AB,AA [ AB1, AA0]\n self.diff_allele_set.append([self.g0x1, self.g1x0])\n\n else:\n # AA(BB),AB\n if self.g0x0 != self.g1x0:\n # BB,AB [ BB0, AB0]\n self.diff_allele_set.append([self.g0x0, self.g1x0])\n else:\n # AA,AB [ AA0, AB1]\n self.diff_allele_set.append([self.g0x0, self.g1x1])\n\n else:\n # AA,BC\n # hohe_n 2 00/12 0,1 0,2\n self.segr_ptn = glv.segr_ptn_HOMO_HETERO_NOT_SHARE\n\n if self.altype_gno[0] == glv.AL_HETERO:\n # BC,AA -> [B,A] [C,A]\n self.diff_allele_set.append([self.g0x0, self.g1x0])\n self.diff_allele_set.append([self.g0x1, self.g1x0])\n\n else:\n # AA,BC -> [A,B] [A,C]\n self.diff_allele_set.append([self.g0x0, self.g1x0])\n self.diff_allele_set.append([self.g0x0, self.g1x1])\n\n # 3.hetero vs hetero\n else:\n\n if utl.is_share(self.g0x0, self.g0x1, self.g1x0, self.g1x1):\n # AB,AC\n # hehe_s 3 01/02 0,2 1,0 1,2\n self.segr_ptn = glv.segr_ptn_HETERO_HETERO_SHARE\n\n\n # 01,02\n if self.g0x0 == self.g1x0:\n # 0/2, 1/0, 1/2\n #self.diff_allele_set.append([self.g0x0, self.g1x0])\n self.diff_allele_set.append([self.g0x0, self.g1x1])\n self.diff_allele_set.append([self.g0x1, self.g1x0])\n self.diff_allele_set.append([self.g0x1, self.g1x1])\n\n # 01,20\n elif self.g0x0 == self.g1x1:\n # 0/2, 1,2, 1,0\n self.diff_allele_set.append([self.g0x0, self.g1x0])\n #self.diff_allele_set.append([self.g0x0, self.g1x1])\n self.diff_allele_set.append([self.g0x1, self.g1x0])\n self.diff_allele_set.append([self.g0x1, self.g1x1])\n\n # 10,02\n elif self.g0x1 == self.g1x0:\n # 1,0, 1,2, 0,2\n self.diff_allele_set.append([self.g0x0, self.g1x0])\n self.diff_allele_set.append([self.g0x0, self.g1x1])\n #self.diff_allele_set.append([self.g0x1, self.g1x0])\n self.diff_allele_set.append([self.g0x1, self.g1x1])\n\n # 10,20\n elif self.g0x1 == self.g1x1:\n # 1,2, 1,0, 0,2\n self.diff_allele_set.append([self.g0x0, self.g1x0])\n self.diff_allele_set.append([self.g0x0, self.g1x1])\n self.diff_allele_set.append([self.g0x1, self.g1x0])\n #self.diff_allele_set.append([self.g0x1, self.g1x1])\n\n else:\n # AB,CD\n # hehe_n 4 01/23 0,2 0,3 1,2 1,3\n self.segr_ptn = glv.segr_ptn_HETERO_HETERO_NOT_SHARE\n # all combination 4 type\n # A. C.\n self.diff_allele_set.append([self.g0x0, self.g1x0])\n # A. .D\n self.diff_allele_set.append([self.g0x0, self.g1x1])\n # .B C.\n self.diff_allele_set.append([self.g0x1, self.g1x0])\n # .B .D\n self.diff_allele_set.append([self.g0x1, self.g1x1])\n\n self.diff_allele_cnt = len(self.diff_allele_set)\n\n\n def _get_allele_info(self):\n\n # gno 0..1 aix 0..1\n for gno in range(2):\n\n rgt_aix = list() # raw_gt\n gti_aix = list() # None->-1\n ali_aix = list() # +1\n len_aix = list()\n\n for aix in range(2):\n raw_gt = self.ano__gno_aix[gno][aix]\n if raw_gt is None: # ./.\n rgt_aix.append(',')\n ali = -1 + 1\n lena = 0\n gti_aix.append(-1)\n else:\n ali = raw_gt + 1\n rgt_aix.append(raw_gt)\n gti_aix.append(raw_gt)\n lena = self.len_ano[raw_gt]\n\n ali_aix.append(ali)\n len_aix.append(lena)\n\n # row genotype [gno][aix]\n self.rgt__gno_aix.append(rgt_aix)\n\n # genotype integer [gno][aix] (None == 0)\n self.gti__gno_aix.append(gti_aix)\n\n # allele integer [gno][aix]\n self.ali__gno_aix.append(ali_aix)\n\n # allele len [gno][aix]\n self.len__gno_aix.append(len_aix)\n\n # self.altype_gno\n if self.ano__gno_aix[gno][0] == self.ano__gno_aix[gno][1]:\n self.altype_gno.append(glv.AL_HOMO)\n else:\n self.altype_gno.append(glv.AL_HETERO)\n\n # allele int\n # (self.ali__gno_aix[gno][0] * 10) + self.ali__gno_aix[gno][1])\n self.alint_gno.append(\n self._get_allele_int(\n self.ali__gno_aix[gno][0],\n self.ali__gno_aix[gno][1]))\n\n\n def _get_allele_int(self, allele_int_10, allele_int_1):\n '''\n '''\n # 1 has already been added to this.\n return allele_int_10 * 10 + allele_int_1\n\n\n def _get_record_info(self, record):\n\n self.chrom = record.CHROM\n self.pos = record.POS\n self.alt_vc = len(record.ALT) # alt variant count\n # get vseq from ano\n self.vseq_ano = [record.REF]\n [self.vseq_ano.append(alt.value) for alt in record.ALT]\n # str\n self.vseq_ano_str = ','.join(map(str, self.vseq_ano))\n # len\n [self.len_ano.append(len(sqa)) for sqa in self.vseq_ano]\n # get ano from gno x aix\n sample0 = self.top_smpl_list[0]\n sample1 = self.top_smpl_list[1]\n\n #log.debug(\"sample0={}\".format(sample0))\n #log.debug(\"sample1={}\".format(sample1))\n\n # get ano from record by sample name\n self.g0x0, self.g0x1, self.g1x0, self.g1x1 = \\\n AlleleSelect.record_call_for_sample(record, sample0, sample1)\n\n # ano__gno_aix\n self.ano__gno_aix = [[self.g0x0, self.g0x1], [self.g1x0, self.g1x1]]\n\n\n @classmethod\n def allele_convert(cls, raw_gt, mode):\n '''\n raw_gt: ./. 0/0 0/1 ....\n '''\n\n ret = \"\"\n\n # separate into 1st and 2nd allele\n al_1st_str, al_2nd_str = raw_gt.split(\"/\")\n\n if al_1st_str == \"None\":\n al_1st_str = \".\"\n al_2nd_str = \".\"\n\n if mode == \"int\":\n if al_1st_str == \".\":\n al_int_10 = 0\n al_int_01 = 0\n else:\n # convert for index int\n al_1st_no = int(al_1st_str)\n al_2nd_no = int(al_2nd_str)\n\n al_int_10 = (al_1st_no + 1) * 10\n al_int_01 = al_2nd_no + 1\n\n ret = al_int_10 + al_int_01\n\n elif mode == \"ab\":\n if al_1st_str == \".\":\n al_1st_ab = al_1st_str\n al_2nd_ab = al_2nd_str\n else:\n abcd = [\"a\", \"b\", \"c\", \"d\"]\n al_1st_no = int(al_1st_str)\n al_2nd_no = int(al_2nd_str)\n al_1st_ab = abcd[al_1st_no]\n al_2nd_ab = abcd[al_2nd_no]\n\n ret = \"{}{}\".format(al_1st_ab, al_2nd_ab)\n\n else: # gt\n ret = \"{}/{}\".format(al_1st_str, al_2nd_str)\n\n return ret\n\n\n# @classmethod\n# def allele_int(cls, raw_gt, mode):\n# ''' convert allele GT to integer\n# ./. -> 0, 0/1 -> 12, 1/1 -> 22, 1/3 -> 24\n# '''\n#\n# #mode int, str\n# #print(raw_gt)\n#\n# allele_int_10 = 0 \n# allele_int_1 = 0\n#\n# allele_int = 0\n#\n# al_10, al_1 = raw_gt.split(\"/\")\n#\n# # for allele_int\n# if al_10 == \".\" or al_10 == \"None\":\n# al_10 = \".\"\n# else:\n# allele_int_10 = int(al_10) + 1\n#\n# if al_1 == \".\" or al_1 == \"None\":\n# al_1 = \".\"\n# else:\n# allele_int_1 = int(al_1) + 1\n#\n# allele_int = int(allele_int_10) * 10 + int(allele_int_1)\n#\n# ret = allele_int\n# if mode == \"gt\":\n# ret = \"{}/{}\".format(al_10, al_1)\n#\n# return ret\n\n\n @classmethod\n def record_call_for_sample(cls, record, sample0, sample1):\n\n fullname0 = utl.get_fullname(sample0)\n fullname1 = utl.get_fullname(sample1)\n\n #log.debug(\"CHROM={}, POS={}, ALT={}\".format(\n # record.CHROM, record.POS, record.ALT))\n #log.debug(\"sample0={}, gt_alleles {}\".format(\n # sample0,\n # record.call_for_sample[fullname0].gt_alleles[0]))\n #log.debug(\"sample1={}, gt_alleles {}\".format(\n # sample1,\n # record.call_for_sample[fullname1].gt_alleles[0]))\n\n #log.debug(\"plodity={}, is_variant={}, is_phased={}\".format(\n # record.call_for_sample[fullname1].plodity,\n # record.call_for_sample[fullname1].is_variant,\n # record.call_for_sample[fullname1].is_phased))\n #log.debug(\"is_het={}, gt_type={}\".format(\n # record.call_for_sample[fullname1].is_het,\n # record.call_for_sample[fullname1].gt_type))\n # HOM_REF, HOM_ALT, and HET\n\n #log.info(\"sample0={}, fullname0={}\".format(sample0, fullname0))\n #log.info(\"sample1={}, fullname1={}\".format(sample1, fullname1))\n\n # for REF 20200708\n if sample0 == 'ref':\n s0_0 = 0\n s0_1 = 0\n s1_0 = record.call_for_sample[fullname1].gt_alleles[0]\n if s1_0 is None:\n s1_1 = None\n else:\n s1_1 = record.call_for_sample[fullname1].gt_alleles[1]\n\n elif sample1 == 'ref':\n s0_0 = record.call_for_sample[fullname0].gt_alleles[0]\n if s0_0 is None:\n s0_1 = None\n else:\n s0_1 = record.call_for_sample[fullname0].gt_alleles[1]\n s1_0 = 0\n s1_1 = 0\n\n else:\n s0_0 = record.call_for_sample[fullname0].gt_alleles[0]\n if s0_0 is None:\n s0_1 = None\n else:\n s0_1 = record.call_for_sample[fullname0].gt_alleles[1]\n\n s1_0 = record.call_for_sample[fullname1].gt_alleles[0]\n if s1_0 is None:\n s1_1 = None\n else:\n s1_1 = record.call_for_sample[fullname1].gt_alleles[1]\n\n # int\n return s0_0, s0_1, s1_0, s1_1\n\n\n","sub_path":"vprimer/allele_select.py","file_name":"allele_select.py","file_ext":"py","file_size_in_byte":18046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"650182726","text":"import cv2\nimport numpy as np \n\n'''\nThis helps to remove the noise from the faces for enhancement \n'''\n\ndef cut_faces(images, coord):\n\tfaces = []\n\tfor (x,y,w,h) in coord:\n\t\tw_rm = int(0.2*w/2)\n\t\tfaces.append(image[y:y+h, x+w_rm: x-w_rm])\n\treturn faces\n\n# resizing the images to 50X50 photo size\n\ndef resize(images, size = (50,50)):\n\timage_norm = []\n\tfor image in images:\n\t\tif image.shape > size:\n\t\t\timage_norm = cv2.resize(image, size, interpolation = cv2.INTER_AREA )\n\t\telse:\n\t\t\timage_norm = cv2.resize(image, size, interpolation = \n\t\t\t\tcv2.INTER_CUBIC)\n\n# drawinng rectangle on the faces\ndef draw_rectangle(images, face_coord):\n\tfor (x,y,w,h) in face_coord:\n\t\tw_rm = int(0.2*w/2)\n\t\tcv2.rectangle(images, (x+w_rm, y), (x+w-w_rm, y+h),\n\t\t\t(150,150,0), 5)\n\ndef normalize_intensity(images):\n\timages_norm = []\n\tfor image in images:\n\t\tif len(image): # if this return 1 then it contiues else False\n\t\t\timage.append(cv2.equalizeHist(image))\n\n\n# helping function that combines the above functions\n\ndef normalize_faces(images, face_coord):\n\tfaces = cut_faces(images, face_coord)\n\tfaces = normalize_intensity(faces)\n\tfaces = resize(faces)\n\treturn faces\n\n\n\n\n","sub_path":"normalize.py","file_name":"normalize.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"90954214","text":"\"\"\"\nConvert epydoc markup into renderable content.\n\"\"\"\n\nimport astor\n\nfrom importlib import import_module\nfrom urllib.parse import quote\nimport ast\nimport builtins\nimport itertools\nimport os\nimport sys\nfrom typing import Mapping\n\nfrom pydoctor import model\nfrom pydoctor.epydoc.markup import ParseError\nfrom twisted.web.template import Tag, tags\nfrom pydoctor.epydoc.markup import DocstringLinker, ParsedDocstring\nimport pydoctor.epydoc.markup.plaintext\n\n\ndef _find_stdlib_dir():\n \"\"\"Find the standard library location for the currently running\n Python interpreter.\n \"\"\"\n\n # When running in a virtualenv, when some (but not all) modules\n # may be symlinked. We want the actual installation location of\n # the standard library, not the location of the virtualenv.\n os_mod_path = os.__file__\n if os_mod_path.endswith('.pyc') or os_mod_path.endswith('.pyo'):\n os_mod_path = os_mod_path[:-1]\n return os.path.dirname(os.path.realpath(os_mod_path))\n\nSTDLIB_DIR = _find_stdlib_dir()\nSTDLIB_URL = 'https://docs.python.org/3/library/'\n\n\ndef link(o):\n return quote(o.fullName()+'.html')\n\n\ndef get_parser(obj):\n formatname = obj.system.options.docformat\n try:\n mod = import_module('pydoctor.epydoc.markup.' + formatname)\n except ImportError as e:\n msg = 'Error trying to import %r parser:\\n\\n %s: %s\\n\\nUsing plain text formatting only.'%(\n formatname, e.__class__.__name__, e)\n obj.system.msg('epydoc2stan', msg, thresh=-1, once=True)\n mod = pydoctor.epydoc.markup.plaintext\n return mod.parse_docstring\n\n\ndef get_docstring(obj):\n for source in obj.docsources():\n doc = source.docstring\n if doc:\n return doc, source\n if doc is not None:\n # Treat empty docstring as undocumented.\n return None, source\n return None, None\n\n\ndef stdlib_doc_link_for_name(name):\n parts = name.split('.')\n for i in range(len(parts), 0, -1):\n sub_parts = parts[:i]\n filename = '/'.join(sub_parts)\n sub_name = '.'.join(sub_parts)\n if sub_name == 'os.path' \\\n or os.path.exists(os.path.join(STDLIB_DIR, filename) + '.py') \\\n or os.path.exists(os.path.join(STDLIB_DIR, filename, '__init__.py')) \\\n or os.path.exists(os.path.join(STDLIB_DIR, 'lib-dynload', filename) + '.so') \\\n or sub_name in sys.builtin_module_names:\n return STDLIB_URL + sub_name + '.html#' + name\n part0 = parts[0]\n if part0 in builtins.__dict__ and not part0.startswith('__'):\n bltin = builtins.__dict__[part0]\n if isinstance(bltin, type):\n if issubclass(bltin, BaseException):\n return STDLIB_URL + 'exceptions.html#' + name\n else:\n return STDLIB_URL + 'stdtypes.html#' + name\n elif callable(bltin):\n return STDLIB_URL + 'functions.html#' + name\n else:\n return STDLIB_URL + 'constants.html#' + name\n return None\n\n\nclass _EpydocLinker(DocstringLinker):\n\n def __init__(self, obj):\n self.obj = obj\n\n def _objLink(self, obj):\n if obj.documentation_location is model.DocLocation.PARENT_PAGE:\n p = obj.parent\n if isinstance(p, model.Module) and p.name == '__init__':\n p = p.parent\n return link(p) + '#' + quote(obj.name)\n elif obj.documentation_location is model.DocLocation.OWN_PAGE:\n return link(obj)\n else:\n raise AssertionError(\n f\"Unknown documentation_location: {obj.documentation_location}\")\n\n def look_for_name(self, name, candidates):\n part0 = name.split('.')[0]\n potential_targets = []\n for src in candidates:\n if part0 not in src.contents:\n continue\n target = src.resolveName(name)\n if target is not None and target not in potential_targets:\n potential_targets.append(target)\n if len(potential_targets) == 1:\n return potential_targets[0]\n elif len(potential_targets) > 1:\n self.obj.report(\n \"ambiguous ref to %s, could be %s\" % (\n name,\n ', '.join(ob.fullName() for ob in potential_targets)),\n section='resolve_identifier_xref')\n return None\n\n def look_for_intersphinx(self, name):\n \"\"\"\n Return link for `name` based on intersphinx inventory.\n\n Return None if link is not found.\n \"\"\"\n return self.obj.system.intersphinx.getLink(name)\n\n def resolve_identifier_xref(self, fullID):\n\n # There is a lot of DWIM here. Look for a global match first,\n # to reduce the chance of a false positive.\n\n # Check if fullID is the fullName of an object.\n target = self.obj.system.objForFullName(fullID)\n if target is not None:\n return self._objLink(target)\n\n # Check to see if fullID names a builtin or standard library module.\n fullerID = self.obj.expandName(fullID)\n linktext = stdlib_doc_link_for_name(fullerID)\n if linktext is not None:\n return linktext\n\n # Check if the fullID exists in an intersphinx inventory.\n target = self.look_for_intersphinx(fullerID)\n if not target:\n # FIXME: https://github.com/twisted/pydoctor/issues/125\n # expandName is unreliable so in the case fullerID fails, we\n # try our luck with fullID.\n target = self.look_for_intersphinx(fullID)\n if target:\n return target\n\n # Since there was no global match, go look for the name in the\n # context where it was used.\n\n # Check if fullID refers to an object by Python name resolution\n # in our context. Walk up the object tree and see if fullID refers\n # to an object by Python name resolution in each context.\n src = self.obj\n while src is not None:\n target = src.resolveName(fullID)\n if target is not None:\n return self._objLink(target)\n src = src.parent\n\n # Walk up the object tree again and see if fullID refers to an\n # object in an \"uncle\" object. (So if p.m1 has a class C, the\n # docstring for p.m2 can say L{C} to refer to the class in m1).\n # If at any level fullID refers to more than one object, complain.\n src = self.obj\n while src is not None:\n target = self.look_for_name(fullID, src.contents.values())\n if target is not None:\n return self._objLink(target)\n src = src.parent\n\n # Examine every module and package in the system and see if fullID\n # names an object in each one. Again, if more than one object is\n # found, complain.\n target = self.look_for_name(fullID, itertools.chain(\n self.obj.system.objectsOfType(model.Module),\n self.obj.system.objectsOfType(model.Package)))\n if target is not None:\n return self._objLink(target)\n\n if fullID == fullerID:\n self.obj.report(\n \"invalid ref to '%s' not resolved\" % (fullID,),\n section='resolve_identifier_xref')\n else:\n self.obj.report(\n \"invalid ref to '%s' resolved as '%s'\" % (fullID, fullerID),\n section='resolve_identifier_xref')\n raise LookupError(fullID)\n\n\nclass FieldDesc:\n def __init__(self):\n self.kind = None\n self.name = None\n self.type = None\n self.body = None\n def format(self):\n if self.body is None:\n body = ''\n else:\n body = self.body\n if self.type is not None:\n body = body, ' (type: ', self.type, ')'\n return body\n def __repr__(self):\n contents = ', '.join(\n f\"{k}={v!r}\"\n for k, v in self.__dict__.items()\n )\n return f\"<{self.__class__.__name__}({contents})>\"\n\n\ndef format_desc_list(singular, descs, plural=None):\n if plural is None:\n plural = singular + 's'\n if not descs:\n return ''\n if len(descs) > 1:\n label = plural\n else:\n label = singular\n r = []\n first = True\n for d in descs:\n if first:\n row = tags.tr(class_=\"fieldStart\")\n row(tags.td(class_=\"fieldName\")(label))\n first = False\n else:\n row = tags.tr()\n row(tags.td())\n if d.name is None:\n row(tags.td(colspan=\"2\")(d.format()))\n else:\n row(tags.td(class_=\"fieldArg\")(d.name), tags.td(d.format()))\n r.append(row)\n return r\n\ndef format_field_list(obj, singular, fields, plural=None):\n if plural is None:\n plural = singular + 's'\n if not fields:\n return ''\n if len(fields) > 1:\n label = plural\n else:\n label = singular\n rows = []\n first = True\n for field in fields:\n if first:\n row = tags.tr(class_=\"fieldStart\")\n row(tags.td(class_=\"fieldName\")(label))\n first=False\n else:\n row = tags.tr()\n row(tags.td())\n row(tags.td(colspan=\"2\")(field.body))\n rows.append(row)\n return rows\n\n\nclass Field:\n \"\"\"Like pydoctor.epydoc.markup.Field, but without the gross accessor\n methods and with a formatted body.\"\"\"\n def __init__(self, field, source):\n self.tag = field.tag()\n self.arg = field.arg()\n self.source = source\n self.lineno = field.lineno\n self.body = field.body().to_stan(_EpydocLinker(source))\n\n def __repr__(self):\n r = repr(self.body)\n if len(r) > 25:\n r = r[:20] + '...' + r[-2:]\n return \"<%s %r %r %d %s>\"%(self.__class__.__name__,\n self.tag, self.arg, self.lineno, r)\n\n def report(self, message: str) -> None:\n self.source.report(message, lineno_offset=self.lineno, section='docstring')\n\n\nclass FieldHandler:\n\n def __init__(self, obj, annotations):\n self.obj = obj\n\n self.types = {}\n self.types.update(annotations)\n\n self.parameter_descs = []\n self.return_desc = None\n self.raise_descs = []\n self.seealsos = []\n self.notes = []\n self.authors = []\n self.sinces = []\n self.unknowns = []\n self.unattached_types = {}\n\n @classmethod\n def from_ast_annotations(cls, obj: model.Documentable, annotations: Mapping[str, ast.expr]) -> \"FieldHandler\":\n linker = _EpydocLinker(obj)\n formatted_annotations = {\n name: AnnotationDocstring(value).to_stan(linker)\n for name, value in annotations.items()\n }\n ret_type = formatted_annotations.pop('return', None)\n handler = cls(obj, formatted_annotations)\n if ret_type is not None:\n return_type = FieldDesc()\n return_type.body = ret_type\n handler.handle_returntype(return_type)\n return handler\n\n def redef(self, field):\n self.obj.system.msg(\n \"epytext\",\n \"on %r: redefinition of @type %s\"%(self.obj.fullName(), field.arg),\n thresh=-1)\n\n def handle_return(self, field):\n if field.arg is not None:\n field.report('Unexpected argument in %s field' % (field.tag,))\n if not self.return_desc:\n self.return_desc = FieldDesc()\n if self.return_desc.body:\n self.obj.system.msg('epydoc2stan', 'XXX')\n self.return_desc.body = field.body\n handle_returns = handle_return\n\n def handle_returntype(self, field):\n if getattr(field, 'arg', None) is not None:\n field.report('Unexpected argument in %s field' % (field.tag,))\n if not self.return_desc:\n self.return_desc = FieldDesc()\n if self.return_desc.type:\n self.obj.system.msg('epydoc2stan', 'XXX')\n self.return_desc.type = field.body\n handle_rtype = handle_returntype\n\n def add_type_info(self, desc_list, field):\n if desc_list and desc_list[-1].name == field.arg:\n if desc_list[-1].type is not None:\n self.redef(field)\n desc_list[-1].type = field.body\n else:\n d = FieldDesc()\n d.kind = field.tag\n d.name = field.arg\n d.type = field.body\n desc_list.append(d)\n\n def _handle_param_name(self, field):\n name = field.arg\n if name is None:\n field.report('Parameter name missing')\n return None\n if name and name.startswith('*'):\n field.report('Parameter name \"%s\" should not include asterixes' % (name,))\n return name.lstrip('*')\n else:\n return name\n\n def add_info(self, desc_list, name, field):\n d = FieldDesc()\n d.kind = field.tag\n d.name = name\n d.body = field.body\n desc_list.append(d)\n\n def handle_type(self, field):\n name = self._handle_param_name(field)\n if name is not None:\n self.types[name] = field.body\n\n def handle_param(self, field):\n name = self._handle_param_name(field)\n if name is not None:\n self.add_info(self.parameter_descs, name, field)\n handle_arg = handle_param\n handle_keyword = handle_param\n\n\n def handled_elsewhere(self, field):\n # Some fields are handled by extract_fields below.\n pass\n\n handle_ivar = handled_elsewhere\n handle_cvar = handled_elsewhere\n handle_var = handled_elsewhere\n\n def handle_raises(self, field):\n name = field.arg\n if name is None:\n field.report('Exception type missing')\n self.add_info(self.raise_descs, name, field)\n handle_raise = handle_raises\n\n def handle_seealso(self, field):\n self.seealsos.append(field)\n handle_see = handle_seealso\n\n def handle_note(self, field):\n self.notes.append(field)\n\n def handle_author(self, field):\n self.authors.append(field)\n\n def handle_since(self, field):\n self.sinces.append(field)\n\n def handleUnknownField(self, field):\n field.report('Unknown field \"%s\"' % (field.tag,))\n self.add_info(self.unknowns, field.arg, field)\n\n def handle(self, field):\n m = getattr(self, 'handle_' + field.tag, self.handleUnknownField)\n m(field)\n\n def resolve_types(self):\n for pd in self.parameter_descs:\n if pd.name in self.types:\n pd.type = self.types[pd.name]\n\n def format(self):\n r = []\n\n r.append(format_desc_list('Parameters', self.parameter_descs, 'Parameters'))\n if self.return_desc:\n r.append(tags.tr(class_=\"fieldStart\")(tags.td(class_=\"fieldName\")('Returns'),\n tags.td(colspan=\"2\")(self.return_desc.format())))\n r.append(format_desc_list(\"Raises\", self.raise_descs, \"Raises\"))\n for s, p, l in (('Author', 'Authors', self.authors),\n ('See Also', 'See Also', self.seealsos),\n ('Present Since', 'Present Since', self.sinces),\n ('Note', 'Notes', self.notes)):\n r.append(format_field_list(self.obj, s, l, p))\n unknowns = {}\n for fieldinfo in self.unknowns:\n unknowns.setdefault(fieldinfo.kind, []).append(fieldinfo)\n for kind, fieldlist in unknowns.items():\n label = f\"Unknown Field: {kind}\"\n r.append(format_desc_list(label, fieldlist, label))\n\n if any(r):\n return tags.table(class_='fieldTable')(r)\n else:\n return tags.transparent\n\n\ndef reportErrors(obj, errs):\n if errs and obj.fullName() not in obj.system.docstring_syntax_errors:\n obj.system.docstring_syntax_errors.add(obj.fullName())\n\n for err in errs:\n lineno_offset = 0\n if isinstance(err, str):\n descr = err\n elif isinstance(err, ParseError):\n descr = err.descr()\n lineno_offset = err.linenum() - 1\n else:\n raise TypeError(type(err).__name__)\n\n obj.report(\n 'bad docstring: ' + descr,\n lineno_offset=lineno_offset,\n section='docstring'\n )\n\n\ndef parse_docstring(obj, doc, source):\n \"\"\"Parse a docstring.\n @rtype: L{ParsedDocstring}\n \"\"\"\n\n parser = get_parser(obj)\n errs = []\n try:\n pdoc = parser(doc, errs)\n except Exception as e:\n errs.append(f'{e.__class__.__name__}: {e}')\n pdoc = None\n if pdoc is None:\n pdoc = pydoctor.epydoc.markup.plaintext.parse_docstring(doc, errs)\n if errs:\n reportErrors(source, errs)\n return pdoc\n\n\ndef format_docstring(obj):\n \"\"\"Generate an HTML representation of a docstring\"\"\"\n\n doc, source = get_docstring(obj)\n\n # Use cached or split version if possible.\n pdoc = getattr(obj, 'parsed_docstring', None)\n\n if pdoc is None:\n if doc is None:\n return tags.div(class_='undocumented')(\"Undocumented\")\n pdoc = parse_docstring(obj, doc, source)\n obj.parsed_docstring = pdoc\n elif source is None:\n # A split field is documented by its parent.\n source = obj.parent\n\n try:\n stan = pdoc.to_stan(_EpydocLinker(source))\n except Exception as e:\n errs = [f'{e.__class__.__name__}: {e}']\n if doc is None:\n stan = tags.p(class_=\"undocumented\")('Broken description')\n else:\n pdoc_plain = pydoctor.epydoc.markup.plaintext.parse_docstring(doc, errs)\n stan = pdoc_plain.to_stan(_EpydocLinker(source))\n reportErrors(source, errs)\n\n content = [stan] if stan.tagName else stan.children\n fields = pdoc.fields\n s = tags.div(*content)\n fh = FieldHandler.from_ast_annotations(obj, getattr(source, 'annotations', {}))\n for field in fields:\n fh.handle(Field(field, source))\n fh.resolve_types()\n s(fh.format())\n return s\n\n\ndef format_summary(obj):\n \"\"\"Generate an shortened HTML representation of a docstring.\"\"\"\n\n doc, source = get_docstring(obj)\n if doc is None:\n # Attributes can be documented as fields in their parent's docstring.\n if isinstance(obj, model.Attribute):\n pdoc = getattr(obj, 'parsed_docstring', None)\n else:\n pdoc = None\n if pdoc is None:\n return format_undocumented(obj)\n source = obj.parent\n else:\n # Use up to three first non-empty lines of doc string as summary.\n lines = itertools.dropwhile(lambda line: not line.strip(),\n doc.split('\\n'))\n lines = itertools.takewhile(lambda line: line.strip(), lines)\n lines = [ line.strip() for line in lines ]\n if len(lines) > 3:\n return tags.span(class_='undocumented')(\"No summary\")\n pdoc = parse_docstring(obj, ' '.join(lines), source)\n\n try:\n stan = pdoc.to_stan(_EpydocLinker(source))\n except Exception:\n # This problem will likely be reported by the full docstring as well,\n # so don't spam the log.\n return tags.span(class_='undocumented')(\"Broken description\")\n\n content = [stan] if stan.tagName else stan.children\n if content and isinstance(content[0], Tag) and content[0].tagName == 'p':\n content = content[0].children\n return tags.span(*content)\n\n\ndef format_undocumented(obj):\n \"\"\"Generate an HTML representation for an object lacking a docstring.\"\"\"\n subdocstrings = {}\n subcounts = {}\n for subob in obj.contents.values():\n k = subob.kind.lower()\n subcounts[k] = subcounts.get(k, 0) + 1\n if subob.docstring is not None:\n subdocstrings[k] = subdocstrings.get(k, 0) + 1\n if isinstance(obj, model.Package):\n subcounts['module'] -= 1\n if subdocstrings:\n plurals = {'class': 'classes'}\n text = (\n \"No \", obj.kind.lower(), \" docstring; \"\n ', '.join(\n f\"{subdocstrings.get(k, 0)}/{subcounts[k]} \"\n f\"{plurals.get(k, k + 's')}\"\n for k in sorted(subcounts)\n ),\n \" documented\"\n )\n else:\n text = \"Undocumented\"\n return tags.span(class_='undocumented')(text)\n\n\ndef type2stan(obj):\n parsed_type = get_parsed_type(obj)\n if parsed_type is None:\n return None\n else:\n return parsed_type.to_stan(_EpydocLinker(obj))\n\ndef get_parsed_type(obj):\n parsed_type = getattr(obj, 'parsed_type', None)\n if parsed_type is not None:\n return parsed_type\n\n annotation = getattr(obj, 'annotation', None)\n if annotation is not None:\n return AnnotationDocstring(annotation)\n\n return None\n\n\nclass AnnotationDocstring(ParsedDocstring):\n\n def __init__(self, annotation):\n ParsedDocstring.__init__(self, ())\n self.annotation = annotation\n\n def to_stan(self, docstring_linker):\n src = astor.to_source(self.annotation).strip()\n return tags.code(src)\n\n\nfield_name_to_human_name = {\n 'ivar': 'Instance Variable',\n 'cvar': 'Class Variable',\n 'var': 'Variable',\n }\n\n\ndef extract_fields(obj):\n doc, source = get_docstring(obj)\n if doc is None:\n return\n\n pdoc = parse_docstring(obj, doc, source)\n obj.parsed_docstring = pdoc\n\n for field in pdoc.fields:\n tag = field.tag()\n if tag in ['ivar', 'cvar', 'var', 'type']:\n arg = field.arg()\n if arg is None:\n source.report(\"Missing field name in @%s\" % (tag,),\n 'docstring', field.lineno)\n continue\n attrobj = obj.contents.get(arg)\n if attrobj is None:\n attrobj = obj.system.Attribute(obj.system, arg, obj)\n attrobj.kind = None\n attrobj.parentMod = obj.parentMod\n obj.system.addObject(attrobj)\n attrobj.setLineNumber(source.docstring_lineno + field.lineno)\n if tag == 'type':\n attrobj.parsed_type = field.body()\n else:\n attrobj.parsed_docstring = field.body()\n attrobj.kind = field_name_to_human_name[tag]\n","sub_path":"pydoctor/epydoc2stan.py","file_name":"epydoc2stan.py","file_ext":"py","file_size_in_byte":22323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"192281544","text":"#! /usr/bin/env python\n# coding:utf-8\n\n\ndef lev_dist(\n xs: str,\n ys: str\n):\n cost = [[0 for _ in range(len(ys) + 1)] for _ in range(len(xs) + 1)]\n com = [[\"nothing\" for _ in range(len(ys) + 1)] for _ in range(len(xs) + 1)]\n index_from = [[(0, 0) for _ in range(len(ys) + 1)]\n for _ in range(len(xs) + 1)]\n\n for i in range(len(xs) + 1):\n cost[i][0] = i\n if i != 0:\n index_from[i][0] = (i-1, 0)\n com[i][0] = \"del\"\n\n for j in range(len(ys) + 1):\n cost[0][j] = j\n if j != 0:\n index_from[0][j] = (0, j-1)\n com[0][j] = \"add\"\n\n for i in range(1, len(xs) + 1):\n for j in range(1, len(ys) + 1):\n if xs[i-1] == ys[j-1]:\n cost[i][j] = cost[i-1][j-1]\n com[i][j] = \"pass\"\n index_from[i][j] = (i-1, j-1)\n else:\n lst = [\n cost[i-1][j-1] + 1,\n cost[i-1][j] + 1,\n cost[i][j-1] + 1,\n ]\n min_val = min(lst)\n min_index = lst.index(min_val)\n cost[i][j] = min_val\n if min_index == 0:\n com[i][j] = \"subs\"\n index_from[i][j] = (i-1, j-1)\n elif min_index == 1:\n com[i][j] = \"del\"\n index_from[i][j] = (i-1, j)\n elif min_index == 2:\n com[i][j] = \"add\"\n index_from[i][j] = (i, j-1)\n\n words = []\n frm, to = len(xs), len(ys)\n while not (frm == 0 and to == 0):\n tp = []\n if com[frm][to] == \"subs\":\n tp = (xs[frm-1], ys[to-1])\n elif com[frm][to] == \"del\":\n tp = (xs[frm-1], \"\")\n elif com[frm][to] == \"add\":\n tp = (\"\", ys[to-1])\n if tp:\n words.append(tp)\n frm, to = index_from[frm][to]\n\n return cost[-1][-1], list(reversed(words))\n\n\nif __name__ == '__main__':\n cost, words = lev_dist(\n \"hello\",\n \"world\"\n )\n print(cost)\n print(words)\n","sub_path":"lev.py","file_name":"lev.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"315544251","text":"import os\nfrom datetime import datetime\n\ndef build(parent):\n\tfilePath = os.path.join(parent.configPath, 'sserial.hal')\n\tif os.path.exists(filePath):\n\t\tos.remove(filePath)\n\tif parent.ssCardCB.currentData():\n\t\tparent.machinePTE.appendPlainText(f'Building {filePath}')\n\t\tcontents = []\n\t\tcontents = ['# This file was created with the 7i96 Wizard on ']\n\t\tcontents.append(datetime.now().strftime('%b %d %Y %H:%M:%S') + '\\n')\n\t\tcontents.append('# If you make changes to this file DO NOT use the Configuration Tool\\n\\n')\n\t\tcontents.append(f'# Configuration file for the {parent.ssCardCB.currentText()} Smart Serial Card\\n\\n')\n\n\t\tif parent.ssCardCB.currentText() == '7i64':\n\t\t\tfor i in range(24):\n\t\t\t\tif getattr(parent, 'ss7i64in_' + str(i)).text() != 'Select':\n\t\t\t\t\tinPin = getattr(parent, 'ss7i64in_' + str(i)).text()\n\t\t\t\t\tcontents.append(f'net ss7i64in_{i} hm2_7i96.0.7i64.0.0.input-{i:02} <= {inPin}\\n')\n\t\t\tfor i in range(24):\n\t\t\t\tif getattr(parent, 'ss7i64out_' + str(i)).text() != 'Select':\n\t\t\t\t\toutPin = getattr(parent, 'ss7i64out_' + str(i)).text()\n\t\t\t\t\tcontents.append(f'net ss7i64out_{i} hm2_7i96.0.7i64.0.0.output-{i:02} => {outPin}\\n')\n\n\t\telif parent.ssCardCB.currentText() == '7i69':\n\t\t\tfor i in range(24):\n\t\t\t\tif getattr(parent, 'ss7i69in_' + str(i)).text() != 'Select':\n\t\t\t\t\tinPin = getattr(parent, 'ss7i69in_' + str(i)).text()\n\t\t\t\t\tcontents.append(f'net ss7i69in_{i} hm2_7i96.0.7i69.0.0.input-{i:02} <= {inPin}\\n')\n\t\t\tfor i in range(24):\n\t\t\t\tif getattr(parent, 'ss7i69out_' + str(i)).text() != 'Select':\n\t\t\t\t\toutPin = getattr(parent, 'ss7i69out_' + str(i)).text()\n\t\t\t\t\tcontents.append(f'net ss7i69out_{i} hm2_7i96.0.7i69.0.0.output-{i:02} => {outPin}\\n')\n\n\t\telif parent.ssCardCB.currentText() == '7i70':\n\t\t\tfor i in range(48):\n\t\t\t\tif getattr(parent, 'ss7i70in_' + str(i)).text() != 'Select':\n\t\t\t\t\tinPin = getattr(parent, 'ss7i70in_' + str(i)).text()\n\t\t\t\t\tcontents.append(f'net ss7i70in_{i} hm2_7i96.0.7i70.0.0.input-{i:02} <= {inPin}\\n')\n\n\t\telif parent.ssCardCB.currentText() == '7i71':\n\t\t\tfor i in range(48):\n\t\t\t\tif getattr(parent, 'ss7i71out_' + str(i)).text() != 'Select':\n\t\t\t\t\tinPin = getattr(parent, 'ss7i71out_' + str(i)).text()\n\t\t\t\t\tcontents.append(f'net ss7i71out_{i} hm2_7i96.0.7i71.0.0.output-{i:02} <= {inPin}\\n')\n\n\t\telif parent.ssCardCB.currentText() == '7i72':\n\t\t\tfor i in range(48):\n\t\t\t\tif getattr(parent, 'ss7i72out_' + str(i)).text() != 'Select':\n\t\t\t\t\tinPin = getattr(parent, 'ss7i72out_' + str(i)).text()\n\t\t\t\t\tcontents.append(f'net ss7i72out_{i} hm2_7i96.0.7i72.0.0.output-{i:02} <= {inPin}\\n')\n\n\t\telif parent.ssCardCB.currentText() == '7i73':\n\t\t\tfor i in range(16):\n\t\t\t\tif getattr(parent, 'ss7i73key_' + str(i)).text() != 'Select':\n\t\t\t\t\tkeyPin = getattr(parent, 'ss7i73key_' + str(i)).text()\n\t\t\t\t\tcontents.append(f'net ss7i73key_{i} hm2_7i96.0.7i73.0.0.input-{i:02} <= {keyPin}\\n')\n\t\t\tfor i in range(12):\n\t\t\t\tif getattr(parent, 'ss7i73lcd_' + str(i)).text() != 'Select':\n\t\t\t\t\tlcdPin = getattr(parent, 'ss7i73lcd_' + str(i)).text()\n\t\t\t\t\tcontents.append(f'net ss7i73lcd_{i} hm2_7i96.0.7i73.0.0.output-{i:02} => {lcdPin}\\n')\n\t\t\tfor i in range(16):\n\t\t\t\tif getattr(parent, 'ss7i73in_' + str(i)).text() != 'Select':\n\t\t\t\t\tinPin = getattr(parent, 'ss7i73in_' + str(i)).text()\n\t\t\t\t\tcontents.append(f'net ss7i73in_{i} hm2_7i96.0.7i73.0.0.input-{i:02} <= {inPin}\\n')\n\t\t\tfor i in range(2):\n\t\t\t\tif getattr(parent, 'ss7i73out_' + str(i)).text() != 'Select':\n\t\t\t\t\toutPin = getattr(parent, 'ss7i73out_' + str(i)).text()\n\t\t\t\t\tcontents.append(f'net ss7i73out_{i} hm2_7i96.0.7i84.0.0.output-{i:02} => {outPin}\\n')\n\n\n\t\telif parent.ssCardCB.currentText() == '7i84':\n\t\t\tfor i in range(32):\n\t\t\t\tif getattr(parent, 'ss7i84in_' + str(i)).text() != 'Select':\n\t\t\t\t\tinPin = getattr(parent, 'ss7i84in_' + str(i)).text()\n\t\t\t\t\tcontents.append(f'net ss7i84in_{i} hm2_7i96.0.7i84.0.0.input-{i:02} <= {inPin}\\n')\n\t\t\tfor i in range(16):\n\t\t\t\tif getattr(parent, 'ss7i84out_' + str(i)).text() != 'Select':\n\t\t\t\t\toutPin = getattr(parent, 'ss7i84out_' + str(i)).text()\n\t\t\t\t\tcontents.append(f'net ss7i84out_{i} hm2_7i96.0.7i84.0.0.output-{i:02} => {outPin}\\n')\n\n\t\telif parent.ssCardCB.currentText() == '7i87':\n\t\t\tfor i in range(32):\n\t\t\t\tif getattr(parent, 'ss7i87in_' + str(i)).text() != 'Select':\n\t\t\t\t\tinPin = getattr(parent, 'ss7i87in_' + str(i)).text()\n\t\t\t\t\tcontents.append(f'net ss7i87in_{i} hm2_7i96.0.7i87.0.0.input-{i:02} <= {inPin}\\n')\n\n\t\ttry:\n\t\t\twith open(filePath, 'w') as f:\n\t\t\t\tf.writelines(contents)\n\t\texcept OSError:\n\t\t\tparent.machinePTE.appendPlainText(f'OS error\\n {traceback.print_exc()}')\n","sub_path":"7i76e/src/lib7i76e/buildss.py","file_name":"buildss.py","file_ext":"py","file_size_in_byte":4490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"462124828","text":"\nfrom utils.df_handle import *\nimport pendulum\nfrom airflow import DAG\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.providers.tableau.operators.tableau_refresh_workbook import TableauRefreshWorkbookOperator\n# from airflow.providers.postgres.hooks.postgres import PostgresHook\n# from airflow.providers.microsoft.mssql.hooks.mssql import MsSqlHook\n\n\nlocal_tz = pendulum.timezone(\"Asia/Bangkok\")\n\nname='BaoCaoSales'\nprefix='Sales'\ncsv_path = '/home/biserver/data_lake/'+prefix+name+'/'\n\ndag_params = {\n 'owner': 'airflow',\n \"depends_on_past\": False,\n 'start_date': datetime(2021, 10, 1, tzinfo=local_tz),\n 'email_on_failure': True,\n 'email_on_retry': False,\n 'email':['duyvq@merapgroup.com', 'vanquangduy10@gmail.com'],\n 'do_xcom_push': False,\n 'execution_timeout':timedelta(seconds=300)\n # 'retries': 3,\n # 'retry_delay': timedelta(minutes=10),\n}\n\ndag = DAG(prefix+name,\n catchup=False,\n default_args=dag_params,\n schedule_interval= '*/15 8-17,23-23 * * *',\n tags=[prefix+name, 'Daily', '15_mins']\n)\n\ndef etl_to_postgres():\n # day_ago = 2\n datenow = datetime.now().strftime(\"%Y%m%d\")\n # datenow_day_ago = ( datetime.now()-timedelta(day_ago) ).strftime(\"%Y%m%d\")\n # param_1 = f\"'{datenow_day_ago}'\"\n # param_2 = f\"'20210901'\"\n param_3 = f\"'{datenow}'\"\n # param_4 = f\"'20211109'\"\n\n query = f\"EXEC [pr_OM_RawdataSellOutPayroll_BI_v1] @Fromdate={param_3}, @Todate={param_3}\"\n\n FINAL = get_ms_df(sql=query)\n\n FINAL.columns = cleancols(FINAL)\n\n FINAL.NgayGiaoHang.fillna(datetime(1900, 1, 1), inplace=True)\n\n FINAL['phanloaispcl'] = FINAL['MaSanPham'].map(\n df_to_dict(get_ps_df(\"select masanpham, phanloai from d_nhom_sp where nhomsp='SPCL'\"))\n ).fillna('Khác')\n\n FINAL['nhomsp'] = FINAL['MaSanPham'].map(\n df_to_dict(get_ps_df(\"select masanpham, nhomsp from d_nhom_sp where nhomsp IN ('SPCL', 'SP MOI') \"))\n ).fillna('Khác')\n\n FINAL['khuvucviettat'] = FINAL['TenKhuVuc'].map(\n df_to_dict(get_ps_df(\"select * from d_mkv_viet_tat\"))\n )\n\n FINAL['chinhanh'] = FINAL['MaCongTyCN'].map(\n df_to_dict(get_ps_df(\"select * from d_chi_nhanh\"))\n )\n\n FINAL['newhco'] = (FINAL['MaKenhPhu']+FINAL['MaPhanLoaiHCO']).map(\n df_to_dict(get_ps_df(\"SELECT concat(makenhphu, maphanloaihco) as concat, new_mahco FROM d_pl_hco\"))\n )\n\n FINAL['phanam'] = FINAL['MaSanPham'].map(\n df_to_dict(get_ps_df(\"select masanpham, nhomsp from d_nhom_sp where nhomsp='PHA NAM'\"))).fillna('Merap')\n\n FINAL['thang'] = FINAL['NgayChungTu'] + pd.offsets.Day() - pd.offsets.MonthBegin()\n\n FINAL['inserted_at'] = datetime.now()\n\n pk = ['macongtycn', 'ngaychungtu', 'sodondathang', 'masanpham', 'solo', 'lineref', 'soluong']\n\n execute_values_upsert(FINAL, 'f_sales', pk=pk)\n\n\n\ndummy_start = DummyOperator(task_id=\"dummy_start\", dag=dag)\n\npy_etl_to_postgres = PythonOperator(task_id=\"etl_to_postgres\", python_callable=etl_to_postgres, dag=dag)\n\n\n# hello_task4 = ToCSVMsSqlOperator(task_id='sample-task-4', mssql_conn_id=\"1_dms_conn_id\", sql=sql, database=\"PhaNam_eSales_PRO\", path=path, dag=dag)\n\ntab_refresh = TableauRefreshWorkbookOperator(task_id='tab_refresh', workbook_name='Báo Cáo Sales Trong Tháng v1.2', dag=dag)\n\n\ndummy_start >> py_etl_to_postgres >> tab_refresh\n# >> tab_refresh\n","sub_path":"dags/BaoCaoSales.py","file_name":"BaoCaoSales.py","file_ext":"py","file_size_in_byte":3417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"120026212","text":"S=input()\ntotal=0\nfor bit in range(1<=10.0.0\"],\n entry_points={\n 'console_scripts': [\n 'complex-dist=complexdist:main',\n 'complex-dist2=complexdist:main',\n ],\n },\n )\n\n","sub_path":"Sklearn_scipy_numpy/source/wheel/test/complex-dist/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"513272608","text":"from flask import Flask,request\nimport random\nfrom os import getenv\nimport random \nimport datetime \nimport calendar\n\napp = Flask(__name__)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = getenv(\"DATABASE_URI\")\n\n@app.route('/get_fortune//', methods=['POST'])\ndef get_fortune(day, number):\n if number == 10:\n return f\"On {day} you loose you're house and fiance, Sorry about that!\"\n elif number <= 13 :\n return f\"On {day} you will win £300 from the lottery, that's it sorry!\"\n elif number <= 16:\n return f\"On {day} you will recieve unlimited food, Love dat!\"\n elif number <= 19:\n return f\"On {day} you will becomone a millionaire, lucky you!!\"\n else :\n return f\"On {day} you will find nothing but peace & happiness\"\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=5000, debug=True)","sub_path":"service-4/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"280565338","text":"# -*- coding: utf-8 -*-\nimport datetime\nfrom datetime import timedelta\nfrom django.db import models\nfrom django.utils.html import format_html\n\nfrom artnuser.models import ArtnUser\nfrom artwork.models import Artwork\nfrom frame.models import Frame\nfrom pod.models import MakePod\nfrom stock.models import UnitStock\nfrom coupon.models import UnitCoupon\nfrom secret.models import PurchaseItem\n\n\nclass BaseOrderInfo(models.Model):\n\n class Meta:\n abstract = True\n\n # billing information (주문하시는 분)\n billing_name = models.CharField(verbose_name='주문자명', max_length=100)\n billing_phone = models.CharField(verbose_name='휴대전화', max_length=20)\n billing_email = models.EmailField(verbose_name='이메일', max_length=255)\n # shipping information (배송 받으시는 분)\n shipping_name = models.CharField(\n verbose_name='수령인명', max_length=100)\n shipping_phone_1 = models.CharField(\n verbose_name='일반전화', max_length=20, null=True, blank=True)\n shipping_phone_2 = models.CharField(\n verbose_name='휴대폰', max_length=20)\n shipping_zip = models.CharField(\n verbose_name='우편번호', max_length=10)\n shipping_address_1 = models.CharField(\n verbose_name='기본주소', max_length=255)\n shipping_address_2 = models.CharField(\n verbose_name='상세주소', max_length=255, null=True, blank=True)\n requirement = models.TextField(verbose_name='배송메모', null=True, blank=True)\n\n\nclass SellingMall(models.Model):\n ARTN = 1\n ALIGN = 2\n ARTN = 3\n SUPPORT = 4\n FRAME = 5\n\n SALES_CHANNEL = (\n (ARTN, '아트앤'),\n (ALIGN, '온라인제휴'),\n (ARTN, '오프라인'),\n (SUPPORT, '협찬'),\n (FRAME, '액자공방'),\n )\n channel = models.IntegerField(\n verbose_name='채널명', choices=SALES_CHANNEL, default=1)\n name = models.CharField(verbose_name='판매샵', max_length=50)\n\n def __unicode__(self):\n channel_name = ''\n if self.channel == 1:\n channel_name = u'그림닷컴'\n elif self.channel == 2:\n channel_name = u'온라인제휴'\n elif self.channel == 3:\n channel_name = u'오프라인'\n elif self.channel == 4:\n channel_name = u'협찬'\n else:\n channel_name = u'액자공방'\n return channel_name + ' - ' + self.name\n\n def channel_name(self):\n if self.channel == 1:\n return u'그림닷컴'\n elif self.channel == 2:\n return u'온라인제휴'\n elif self.channel == 3:\n return u'오프라인'\n elif self.channel == 4:\n return u'협찬'\n else:\n return u'액자공방'\n\n class Meta:\n db_table = 'selling_mall'\n verbose_name = '판매샵'\n verbose_name_plural = '판매샵 관리'\n\n\nclass Order(BaseOrderInfo):\n # each individual status(각 개별 상태)\n SUBMITTED = 1\n PAYING = 2\n PAID = 3\n CANCELLED = 4\n\n ORDER_STATUSES = (\n (SUBMITTED, '주문진행'),\n (PAYING, '주문완료(결제이전)'),\n (PAID, '주문완료(결제완료)'),\n (CANCELLED, '결제취소'),\n )\n\n PARCEL = 1\n QUICK = 2\n\n DELIVERY_METHOD = (\n (PARCEL, '택배'),\n (QUICK, '퀵 서비스'),\n )\n\n CREDIT = 1\n ACCOUNT = 2\n VIRTUAL = 3\n POINT = 4\n LATER = 5\n\n PAYMENT_METHOD = (\n (CREDIT, '신용카드'),\n (ACCOUNT, '계좌 이체'),\n (VIRTUAL, '가상계좌'),\n (POINT, '적립금결제'),\n (LATER, '제휴사 후불'),\n )\n\n # order info (주문 정보)\n use_from = models.CharField(verbose_name='브라우저', max_length=8)\n pay_from = models.CharField(verbose_name='주문형태', max_length=4)\n\n order_line = models.ForeignKey(SellingMall, verbose_name='판매처', default=1)\n order_number = models.CharField(\n verbose_name='주문번호', max_length=12, null=True, blank=True)\n date = models.DateTimeField(verbose_name='주문일', auto_now_add=True)\n order_confirm = models.BooleanField(verbose_name='확정(출하)', default=False)\n updated_at = models.DateTimeField(\n verbose_name='결제일', null=True, blank=True)\n status = models.IntegerField(verbose_name='주문(상태)',\n choices=ORDER_STATUSES, default=SUBMITTED)\n add_points = models.IntegerField(verbose_name='적립금추가', default=0)\n used_coupon = models.IntegerField(verbose_name='쿠폰할인', default=0)\n used_points = models.IntegerField(verbose_name='적립금할인', default=0)\n used_coupon_id = models.IntegerField(verbose_name='사용한쿠폰 id', default=0)\n m_money = models.IntegerField(verbose_name='관리자활인', default=0)\n c_money = models.IntegerField(verbose_name='취소금액', default=0)\n total = models.IntegerField(verbose_name='상품합계', default=0)\n shipping_cost = models.IntegerField(verbose_name='배송료', default=0)\n amount = models.IntegerField(verbose_name='결제가격', default=0)\n payment_method = models.IntegerField(verbose_name='결제방법',\n choices=PAYMENT_METHOD, null=True, blank=True)\n shipping_method = models.IntegerField(verbose_name='배송방법',\n choices=DELIVERY_METHOD, null=True, blank=True)\n user = models.ForeignKey(\n ArtnUser, verbose_name='회원', null=True, blank=True)\n\n def __unicode__(self):\n return self.order_number\n\n class Meta:\n db_table = 'orders'\n ordering = ['-date']\n verbose_name = '주문'\n verbose_name_plural = '주문 관리'\n\n def save(self, *args, **kwargs):\n super(Order, self).save(*args, **kwargs)\n if not self.order_number or self.order_number == '':\n self.order_number = 'GC' + str(10000000 + self.id)\n self.save(update_fields=['order_number'])\n\n def ship_method_info(self):\n if self.shipping_method == 1:\n return u'택배'\n else:\n return u'퀵'\n\n def ship_method_status(self):\n if self.status == 1:\n return '주문진행'\n elif self.status == 2:\n return '주문완료(결제이전)'\n elif self.status == 3:\n return '주문완료(결제완료)'\n else:\n return '결제취소'\n\n def latest_process_order(self):\n late_process = []\n order_items = self.orderitem_set.all()\n for oi in order_items:\n late_process.append(oi.status)\n return min(late_process)\n\n def pay_method_info(self):\n payment = '선택전'\n if self.payment_method:\n if self.payment_method == 1:\n payment = '신용카드'\n elif self.payment_method == 2:\n payment = '계좌이체'\n elif self.payment_method == 3:\n payment = '가상계좌'\n elif self.payment_method == 4:\n payment = '적립금결제'\n else:\n payment = '제휴사 후불'\n return payment\n\n def pay_method_type(self):\n payment = '선택전'\n if self.payment_method:\n if self.payment_method == 5:\n payment = '제휴사 후불'\n else:\n payment = 'PG결제'\n return payment\n\n def about_coupon(self):\n unit_coupon = None\n if not self.used_coupon_id == 0:\n unit_coupon = UnitCoupon.objects.get(id=self.used_coupon_id)\n return unit_coupon\n\n def excel_requirement(self):\n if self.requirement:\n ship_requirement = self.requirement.split(' ')\n result = \" \".join(ship_requirement)\n return result\n else:\n return u''\n\n def shipping_and_payment(self):\n shipping = '선택전'\n payment = '선택전'\n if self.shipping_method:\n if self.shipping_method == 1:\n shipping = '택배'\n if self.shipping_method == 2:\n shipping = '퀵'\n if self.payment_method:\n if self.payment_method == 1:\n payment = '신용카드'\n if self.payment_method == 2:\n payment = '계좌이체'\n if self.payment_method == 3:\n payment = '가상계좌'\n if self.payment_method == 4:\n payment = '적립금결제'\n if self.payment_method == 5:\n payment = '제휴사 후불'\n\n return format_html('

%s - %s

' % (shipping, payment))\n\n def etc_info(self):\n return format_html('

%s - %s

' % (self.use_from, self.pay_from))\n\n def order_status_detail(self):\n if self.order_confirm:\n is_detail_status = '/static/admin/img/icon-yes.gif'\n else:\n if self.status == 2:\n is_detail_status = '/static/admin/img/icon-ready.gif'\n elif self.status == 4:\n is_detail_status = '/static/admin/img/icon-cancel.gif'\n else:\n is_detail_status = '/static/admin/img/icon-no.gif'\n\n return format_html(u'\"yes_no_sign\"' % is_detail_status)\n\n def is_reorder(self):\n result = False\n if self.user:\n if '그림닷컴' in str(self.billing_name):\n pass\n else:\n orders = Order.objects.filter(user=self.user, status=3)\n for order in orders:\n if self.date > order.date:\n result = True\n return result\n\n\n shipping_and_payment.short_description = '배송/결제 방법'\n shipping_and_payment.allow_tags = True\n etc_info.short_description = '기타정보'\n etc_info.allow_tags = True\n order_status_detail.short_description = '확���(출하)'\n order_status_detail.allow_tags = True\n order_status_detail.admin_order_field = 'order_confirm'\n\n\nclass ManagerMemo(models.Model):\n order = models.ForeignKey(Order, verbose_name='해당주문')\n memo = models.CharField(verbose_name='관리자메모', max_length=200)\n created_at = models.DateTimeField(verbose_name='작성일', auto_now_add=True)\n user = user = models.ForeignKey(\n ArtnUser, verbose_name='회원', null=True, blank=True)\n\n def __unicode__(self):\n return self.order.order_number\n\n class Meta:\n db_table = 'manager_memo'\n ordering = ['-created_at']\n verbose_name = '관리자메모'\n verbose_name_plural = '관리자메모 관리'\n\n\nclass OrderItem(models.Model):\n PAID = 1\n PREPARE = 2\n F_WAIT = 3\n FRAME = 4\n S_WAIT = 5\n SHIPPED = 6\n CANCELLED = 7\n ORDER_STATUS = (\n (PAID, '결제완료'),\n (PREPARE, '제품준비중'),\n (F_WAIT, '액자제작대기'),\n (FRAME, '액자제작중'),\n (S_WAIT, '발송대기'),\n (SHIPPED, '발송완료'),\n (CANCELLED, '주문취소'),\n )\n order = models.ForeignKey(Order, verbose_name='주문번호')\n status = models.IntegerField(\n verbose_name='배송상태', choices=ORDER_STATUS, default=1)\n o_artwork = models.CharField(verbose_name='제품최종코드', max_length=50)\n frame = models.ForeignKey(\n Frame, verbose_name='선택액자', null=True, blank=True)\n t_width = models.FloatField(verbose_name='가로(전체)', default=0)\n t_height = models.FloatField(verbose_name='세로(전체)', default=0)\n mat_info = models.CharField(verbose_name='매트정보', max_length=200)\n g_price = models.IntegerField(verbose_name='그림가격', default=0)\n f_price = models.IntegerField(verbose_name='액자가격', default=0)\n m_price = models.IntegerField(verbose_name='매트가격', default=0)\n t_price = models.IntegerField(verbose_name='전체가격', default=0)\n quantity = models.IntegerField(verbose_name='수량', default=1)\n st_one = models.DateTimeField(verbose_name='제품준비중', null=True, blank=True)\n st_two = models.DateTimeField(verbose_name='액자제작대기', null=True, blank=True)\n st_three = models.DateTimeField(\n verbose_name='액자제작중', null=True, blank=True)\n st_four = models.DateTimeField(verbose_name='발송대기', null=True, blank=True)\n st_five = models.DateTimeField(verbose_name='발송완료', null=True, blank=True)\n st_six = models.DateTimeField(verbose_name='주문취소', null=True, blank=True)\n sms_one = models.DateTimeField(verbose_name='수입안내', null=True, blank=True)\n sms_two = models.DateTimeField(verbose_name='입고알림', null=True, blank=True)\n sms_three = models.DateTimeField(verbose_name='배송지연안내', null=True, blank=True)\n memo = models.TextField(verbose_name='제작메모', null=True, blank=True)\n review_ox = models.BooleanField(verbose_name='리뷰여부', default=False)\n created_at = models.DateTimeField(verbose_name='생성일', auto_now_add=True)\n\n def __unicode__(self):\n return self.order.order_number + '(' + self.o_artwork + ')'\n\n class Meta:\n db_table = 'order_items'\n verbose_name = '주문상품'\n verbose_name_plural = '주문상품 관리'\n\n def main_artwork(self):\n artwork_code = self.o_artwork.split('_')[0]\n artwork = Artwork.objects.get(artwork_code=artwork_code)\n return artwork\n\n def artwork_print_type(self):\n if self.main_artwork().division == 2:\n a_o = MakePod.objects.get(pod_code=self.o_artwork)\n return a_o.print_selected\n else:\n a_o = UnitStock.objects.get(option_code=self.o_artwork)\n return a_o.print_selected\n\n def about_mat(self):\n return self.mat_info.split('-')\n\n def option_artwork(self):\n if '-' in self.o_artwork:\n if len(Artwork.objects.filter(artwork_code=self.o_artwork.split('_')[0])) == 0:\n option = None\n return option\n else:\n real_artwork = Artwork.objects.get(\n artwork_code=self.o_artwork.split('_')[0])\n if real_artwork.option_info == '':\n option = None\n return option\n else:\n if real_artwork.option_info[0] == 'A':\n if len(MakePod.objects.filter(pod_code=real_artwork.option_info)) == 0:\n option = None\n return option\n else:\n option = MakePod.objects.get(\n pod_code=real_artwork.option_info)\n return option\n else:\n if len(UnitStock.objects.filter(option_code=real_artwork.option_info)) == 0:\n option = None\n return option\n else:\n option = UnitStock.objects.get(\n option_code=real_artwork.option_info)\n return option\n\n else:\n if self.o_artwork[0] == 'A':\n if len(MakePod.objects.filter(pod_code=self.o_artwork)) == 0:\n option = None\n return option\n else:\n option = MakePod.objects.get(pod_code=self.o_artwork)\n return option\n else:\n if len(UnitStock.objects.filter(option_code=self.o_artwork)) == 0:\n option = None\n return option\n else:\n option = UnitStock.objects.get(option_code=self.o_artwork)\n return option\n\n def order_item_status(self):\n if self.status == 1:\n return '결제완료'\n elif self.status == 2:\n return '제품준비중'\n elif self.status == 3:\n return '액자제작대기'\n elif self.status == 4:\n return '액자제작중'\n elif self.status == 5:\n return '발송대기'\n elif self.status == 6:\n return '출고완료'\n else:\n return '주문취소'\n\n def delivery_info(self):\n if self.order.shipping_method == 1:\n if len(self.delivery_set.all()) == 0:\n return u'송장입력 전'\n else:\n com_and_invoice = self.delivery_set.all()[0]\n return '[' + com_and_invoice.agent + ']' + ' ' + com_and_invoice.invoice\n\n def delivery_invoice(self):\n if len(self.delivery_set.all()) == 0:\n return ''\n else:\n this_delivery = self.delivery_set.all()[0].invoice\n return str(this_delivery.split('-')[0]) + str(this_delivery.split('-')[1]) + str(this_delivery.split('-')[2])\n\n def delivery_invoice_sms(self):\n invo = ''\n if len(self.delivery_set.all()) == 0:\n pass\n else:\n invo = self.delivery_set.all()[0].invoice\n return invo\n\n def shipping_due_date(self):\n artwork_code = self.o_artwork.split('_')[0]\n a = Artwork.objects.get(artwork_code=artwork_code)\n if a.division == 5:\n artwork_code = self.o_artwork.split('-')[0]\n a = Artwork.objects.get(artwork_code=artwork_code)\n\n due_date = self.order.updated_at + \\\n timedelta(days=a.related_com.max_period)\n\n if a.division == 1 or a.division == 3 or a.division == 4:\n stock = a.artwork_option.unitstock_set.all()[0].now_stock\n if stock < 0:\n pass\n else:\n due_date = self.order.updated_at + timedelta(days=5)\n\n elif a.division == 2:\n due_date = due_date = self.order.updated_at + timedelta(days=5)\n\n return due_date\n\n def storage_num(self):\n artwork_code = self.o_artwork.split('_')[0]\n a = Artwork.objects.get(artwork_code=artwork_code)\n storage = ''\n if a.division == 2:\n storage = '출력그림'\n else:\n if a.division == 5:\n if a.option_info[0] == 'B':\n unit = UnitStock.objects.get(option_code=a.option_info)\n if unit.cabinet:\n storage = str(unit.cabinet.num)\n else:\n storage = '보관함지정 필'\n else:\n storage = '출력그림'\n else:\n unit = UnitStock.objects.get(option_code=self.o_artwork)\n if unit.cabinet:\n storage = str(unit.cabinet.num)\n else:\n storage = '보관함지정 필'\n\n return storage\n\n def proforma_status(self):\n code = self.o_artwork.split('_')[0]\n po_num = 0\n t_value = ''\n pi = PurchaseItem.objects.filter(\n artwork_code=code, po__stocked_ox=False).order_by('created_at')\n for pp in pi:\n po_num += pp.order_quantity\n\n if len(pi) == 0:\n t_value = '발주 전'\n else:\n t_value = str(pi[0].po.storage_due_date)\n\n result = [po_num, t_value]\n return result\n\n def excel_frame_title(self):\n if self.main_artwork().division == 5:\n return self.main_artwork().frame.title\n else:\n if self.f_price == 0:\n return u'액자없음'\n else:\n return self.frame.title\n\n def excel_g_size(self):\n return str(int(self.option_artwork().width_cm * 10)) + 'X' + str(int(self.option_artwork().height_cm * 10))\n\n def excel_m_size(self):\n if self.main_artwork().division == 5:\n if int(self.main_artwork().mat_info.split('-')[0]) == 0:\n return u''\n else:\n add_mat_size = int(\n self.main_artwork().mat_info.split('-')[0]) * 20 - 10\n return str(int(self.option_artwork().width_cm * 10 + add_mat_size)) + 'X' + str(int(self.option_artwork().height_cm * 10 + add_mat_size))\n else:\n if int(self.mat_info.split('-')[0]) == 0:\n return u''\n else:\n add_mat_size = int(self.mat_info.split('-')[0]) * 20 - 10\n return str(int(self.option_artwork().width_cm * 10 + add_mat_size)) + 'X' + str(int(self.option_artwork().height_cm * 10 + add_mat_size))\n\n def mat_one_color(self):\n if self.main_artwork().division == 5:\n mat = self.main_artwork().mat_info.split('-')\n if mat[1] == '0':\n return u''\n else:\n if mat[2] == '0':\n return u'#FFFBF8'\n else:\n return mat[2]\n else:\n mat = self.mat_info.split('-')\n if mat[1] == '0':\n return u''\n else:\n if mat[2] == '0':\n return u'#FFFBF8'\n else:\n return mat[2]\n\n def mat_two_color(self):\n if self.main_artwork().division == 5:\n mat = self.main_artwork().mat_info.split('-')\n if mat[1] == '0' or mat[1] == '1':\n return u''\n else:\n if mat[3] == '0':\n return u'#FFFBF8'\n else:\n return mat[3]\n else:\n mat = self.mat_info.split('-')\n if mat[1] == '0' or mat[1] == '1':\n return u''\n else:\n if mat[3] == '0':\n return u'#FFFBF8'\n else:\n return mat[3]\n\n def mat_three_color(self):\n if self.main_artwork().division == 5:\n mat = self.main_artwork().mat_info.split('-')\n if mat[1] == '0' or mat[1] == '1' or mat[1] == '2':\n return u''\n else:\n if mat[4] == '0':\n return u'#FFFBF8'\n else:\n return mat[4]\n else:\n mat = self.mat_info.split('-')\n if mat[1] == '0' or mat[1] == '1' or mat[1] == '2':\n return u''\n else:\n if mat[4] == '0':\n return u'#FFFBF8'\n else:\n return mat[4]\n\n def excel_memo(self):\n if self.memo:\n make_order = self.memo.split(' ')\n result = \" \".join(make_order)\n return result\n else:\n return u''\n\n def excel_box_type(self):\n w = int(self.t_width * 10)\n h = int(self.t_height * 10)\n if not self.f_price == 0:\n if self.frame.frame_division.f_type_code == 'LS':\n w = int(self.t_width * 10 + 50)\n h = int(self.t_height * 10 + 50)\n if self.main_artwork().division == 5:\n if self.main_artwork().frame.frame_division.f_type_code == 'LS':\n w = int(self.t_width * 10 + 50)\n h = int(self.t_height * 10 + 50)\n\n if self.main_artwork().division == 3 or self.main_artwork().division == 4:\n if self.main_artwork().frame_sort.f_type_code == 'LS':\n w = int(self.t_width * 10 + 50)\n h = int(self.t_height * 10 + 50)\n\n if self.main_artwork().division == 1 or self.main_artwork().division == 2:\n if self.f_price == 0:\n return u'2'\n else:\n if w >= h:\n if w <= 480:\n return u'2'\n elif w > 480 and w <= 680:\n return u'4'\n elif w > 480 and w <= 1000:\n if h <= 700:\n return u'4'\n else:\n return u'5'\n else:\n return u'5'\n else:\n if h <= 480:\n return u'2'\n elif h > 480 and h <= 680:\n return u'4'\n elif h > 480 and h <= 1000:\n if w <= 700:\n return u'4'\n else:\n return u'5'\n else:\n return u'5'\n else:\n if w >= h:\n if w <= 480:\n return u'2'\n elif w > 480 and w <= 680:\n return u'4'\n elif w > 480 and w <= 1000:\n if h <= 700:\n return u'4'\n else:\n return u'5'\n else:\n return u'5'\n else:\n if h <= 480:\n return u'2'\n elif h > 480 and h <= 680:\n return u'4'\n elif h > 480 and h <= 1000:\n if w <= 700:\n return u'4'\n else:\n return u'5'\n else:\n return u'5'\n\n def a_image(self):\n code = self.o_artwork.split('_')[0]\n a = Artwork.objects.get(artwork_code=code)\n return format_html('\"product_images\"' % (a.image.url, a.image.url))\n\n def f_image(self):\n if self.frame:\n if self.frame.image_pack:\n return format_html('\"product_images\"' % (self.frame.image_pack.thumb_path, self.frame.image_pack.thumb_path))\n else:\n return format_html(u'

이미지없음

')\n else:\n return format_html(u'

이미지없음

')\n\n def who_ordered_it(self):\n if self.order.user:\n return format_html(u'

%s

' % self.order.user.username)\n else:\n return format_html(u'

비회원

')\n\n def date_registered(self):\n d = str(Artwork.objects.get(\n artwork_code=self.o_artwork.split('_')[0]).date)\n return format_html(u'

%s

' % d)\n\n def time_pass(self):\n t = datetime.date.today()\n order_date = self.order.updated_at + timedelta(hours=9)\n d = datetime.date(order_date.year, order_date.month, order_date.day)\n differ = t - d\n return differ.days\n\n def orderitem_type(self):\n oi_type = ''\n if self.main_artwork().division == 1:\n if self.frame:\n oi_type = '액자제작'\n else:\n oi_type = '그림만주문'\n elif self.main_artwork().division == 2:\n if self.frame:\n oi_type = '액자제작'\n else:\n oi_type = '그림만주문'\n elif self.main_artwork().division == 3:\n oi_type = '반품액자'\n elif self.main_artwork().division == 4:\n oi_type = '완제품'\n else:\n oi_type = '액자제작'\n return oi_type\n\n def print_or_import(self):\n re = ''\n indi = self.o_artwork[0]\n if indi == 'B':\n re = u'수입'\n return re\n\n def now_stock_status(self):\n return self.main_artwork().artwork_stock_counter()\n\n def now_order_quantity(self):\n artwork_code = self.o_artwork.split('_')[0]\n pi = PurchaseItem.objects.filter(\n artwork_code=artwork_code, po__stocked_ox=False)\n order_count = 0\n for p in pi:\n order_count += p.order_quantity\n return order_count\n\n def oi_artwork_last_size(self):\n return str(self.t_width) + ' X ' + str(self.t_height)\n\n a_image.short_description = '제품이미지'\n a_image.allow_tags = True\n f_image.short_description = '액자이미지'\n f_image.allow_tags = True\n artwork_print_type.short_description = '인쇄타입'\n artwork_print_type.allow_tags = True\n who_ordered_it.short_description = '회원명'\n who_ordered_it.allow_tags = True\n now_stock_status.short_description = '재고'\n now_stock_status.allow_tags = True\n now_order_quantity.short_description = '발주수량'\n now_order_quantity.allow_tags = True\n date_registered.short_description = '등록일'\n date_registered.allow_tags = True\n oi_artwork_last_size.short_description = '사이즈(cm)'\n oi_artwork_last_size.allow_tags = True\n\n\nclass Delivery(models.Model):\n item = models.ForeignKey(OrderItem, verbose_name='해당주문상품')\n agent = models.CharField(\n verbose_name='배송업체', max_length=10, default='CJ 대한통운')\n invoice = models.CharField(verbose_name='송장번호', max_length=20)\n\n class Meta:\n db_table = 'orderitem_delivery'\n verbose_name = '상품배송'\n verbose_name_plural = '상품배송 관리'\n\n\nclass CancelOrderItem(models.Model):\n B_FRAME = 1\n B_SEND = 2\n S_DONE = 3\n CANCEL_STATUS = (\n (B_FRAME, '액자제작전'),\n (B_SEND, '발송이전'),\n (S_DONE, '발송완료'),\n )\n\n NU = 0\n YES = 1\n NO = 2\n RETURN_ARTWORK = (\n (NU, '-'),\n (YES, 'YES'),\n (NO, 'NO'),\n\n )\n\n oi_id = models.IntegerField(verbose_name='주문상품(ID)', default=0)\n billing_name = models.CharField(verbose_name='주문인', max_length=100)\n status = models.IntegerField(\n verbose_name='취소(상태)', choices=CANCEL_STATUS, default=B_FRAME)\n new_code = models.CharField(\n verbose_name='반품작품코드', max_length=50, null=True, blank=True)\n ox_return = models.IntegerField(\n verbose_name='반품수거여부', choices=RETURN_ARTWORK, default=NU)\n memo = models.TextField(verbose_name='제작메모', null=True, blank=True)\n created_at = models.DateTimeField(verbose_name='생성일', auto_now_add=True)\n\n class Meta:\n db_table = 'cancel_order_item'\n ordering = ['-created_at']\n verbose_name = '취소상품'\n verbose_name_plural = '취소상품 관리'\n\n def __unicode__(self):\n return self.billing_name\n\n def oi_order_number(self):\n oi = OrderItem.objects.get(id=self.oi_id)\n return oi.order.order_number\n\n def oi_updated_at(self):\n oi = OrderItem.objects.get(id=self.oi_id)\n if oi.order.updated_at:\n order_date = oi.order.updated_at\n return order_date\n else:\n return '정보없음'\n\n def oi_cancel_at(self):\n cancel_date = self.created_at\n return cancel_date\n\n def oi_artwork_code(self):\n oi = OrderItem.objects.get(id=self.oi_id)\n return oi.main_artwork().artwork_code\n\n def oi_artwork_image(self):\n oi = OrderItem.objects.get(id=self.oi_id)\n return oi.a_image()\n\n def oi_artwork_last_size(self):\n oi = OrderItem.objects.get(id=self.oi_id)\n return str(oi.t_width) + ' X ' + str(oi.t_height) + 'cm'\n\n def oi_frame_name_or_type(self):\n oi = OrderItem.objects.get(id=self.oi_id)\n if oi.main_artwork().division == 1:\n if oi.frame:\n oi_type = oi.frame.title\n else:\n oi_type = '그림만주문'\n elif oi.main_artwork().division == 2:\n if oi.frame:\n oi_type = oi.frame.title\n else:\n oi_type = '그림만주문'\n elif oi.main_artwork().division == 3:\n oi_type = '반품액자'\n elif oi.main_artwork().division == 4:\n oi_type = '완제품'\n else:\n oi_type = oi.main_artwork().frame.title\n return oi_type\n\n def oi_mat_info(self):\n oi = OrderItem.objects.get(id=self.oi_id)\n if oi.frame:\n if oi.mat_info == '0-0-0-0-0':\n oi_mat = '매트정보없음'\n else:\n mi = oi.mat_info.split('-')\n mi_0 = mi[0]\n mi_1 = mi[1]\n mi_2 = mi[2]\n if mi_2 == '0':\n mi_2 = '#FFFBF8'\n mi_3 = mi[3]\n if mi_3 == '0':\n mi_3 = '#FFFBF8'\n mi_4 = mi[4]\n if mi_4 == '0':\n mi_4 = '#FFFBF8'\n\n if mi_1 == '1':\n mi_3 = ''\n mi_4 = ''\n if mi_1 == '2':\n mi_4 = ''\n return format_html(u'

%scm, %s단

' % (mi_0, mi_1, mi_2, mi_3, mi_4))\n else:\n if oi.main_artwork().division == 5:\n if oi.main_artwork().mat_info == '0-0-0-0-0':\n oi_mat = '매트정보없음'\n else:\n mi = oi.main_artwork().mat_info.split('-')\n mi_0 = mi[0]\n mi_1 = mi[1]\n mi_2 = mi[2]\n if mi_2 == '0':\n mi_2 = '#FFFBF8'\n mi_3 = mi[3]\n if mi_3 == '0':\n mi_3 = '#FFFBF8'\n mi_4 = mi[4]\n if mi_4 == '0':\n mi_4 = '#FFFBF8'\n\n if int(mi_1) == 1:\n mi_3 = ''\n mi_4 = ''\n if int(mi_1) == 2:\n mi_4 = ''\n return format_html(u'

%scm, %s단

' % (mi_0, mi_1, mi_2, mi_3, mi_4))\n else:\n oi_mat = '매트정보없음'\n return oi_mat\n\n def oi_this_artwork_price(self):\n oi = OrderItem.objects.get(id=self.oi_id)\n return oi.t_price\n\n def oi_is_active(self):\n if self.new_code == '':\n return ''\n else:\n try:\n a = Artwork.objects.get(artwork_code=self.new_code)\n if a.is_active:\n is_active_status = '/static/admin/img/icon-yes.gif'\n else:\n is_active_status = '/static/admin/img/icon-no.gif'\n return format_html(u'\"yes_no_sign\"' % (is_active_status))\n except Artwork.DoesNotExist:\n return '코드불일치'\n\n oi_order_number.short_description = '주문번호'\n oi_order_number.allow_tags = True\n oi_updated_at.short_description = '결제일'\n oi_updated_at.allow_tags = True\n oi_cancel_at.short_description = '취소일'\n oi_cancel_at.allow_tags = True\n\n oi_artwork_code.short_description = '취소작품코드'\n oi_artwork_code.allow_tags = True\n oi_artwork_image.short_description = '제품이미지'\n oi_artwork_image.allow_tags = True\n oi_artwork_last_size.short_description = '사이즈(cm)'\n oi_artwork_last_size.allow_tags = True\n oi_frame_name_or_type.short_description = '액자사양/주문타입'\n oi_frame_name_or_type.allow_tags = True\n oi_mat_info.short_description = '매트사양'\n oi_mat_info.allow_tags = True\n oi_this_artwork_price.short_description = '가격'\n oi_this_artwork_price.allow_tags = True\n oi_is_active.short_description = '전시여부'\n oi_is_active.allow_tags = True\n","sub_path":"checkout/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":36025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"422003897","text":"##\n## Imprima una tabla en formato CSV que contenga por registro,\n## la cantidad de elementos de las columnas 4 y 5\n## (filas en el archivo)\n##\n## E,3,5\n## A,3,4\n## B,4,4\n## ...\n## C,4,3\n## E,2,3\n## E,3,3\n##\n#Punto q11\nfile = open('data.csv','r').readlines()\nfile = [row[0:-1] for row in file]\nfile = [row.split('\\t') for row in file]\n\ndata =[]\ni = 0\n\nfor registro in file:\n data.append([])\n for e in registro:\n a = e.split(',')\n if (len(a) == 1):\n data[i].append(a[0])\n else:\n data[i].append(a)\n i +=1\n\nfor registro2 in data:\n print(registro2[0] + ',' + str(len(registro2[3])) + ',' + str(len(registro2[4])))","sub_path":"q11.py","file_name":"q11.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"529523329","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/python3\n\n\nfrom no import no\nfrom Grafo import Grafo\nfrom Controle import Controle\nimport string\n\n#Cria um objeto controle\nc = Controle()\n\ndef Configura_Rede(n, Alcance):\n\n\tnos = []\n\tIDs\t= []\n\n\t#Array com as 26 letras do alfabeto\n\tletras = list(string.ascii_lowercase)\n\n\t#Cria um objeto Grafo\n\tg = Grafo()\n\t\n\t#Cria N objetos nó\n\tfor i in range(n):\n\t\tnos.append(no(letras[i], Alcance, g, c))\n\t\tIDs.append(letras[i])\n\n\t#Preenche o grafo com referencias dos nós\n\tMatrizAdj = g.Gera_MatrizAdj(n, nos, Alcance, IDs)\n\n\t#Preeche o controle com referencias dos nós\n\t#Referencia = c.Cria_Controle(n, nos)\n\n\treturn nos, MatrizAdj\n\ndef EnviaMensagem(nos, Origem, Destino, mensagem):\n\n\tletras = list(string.ascii_lowercase)\n\n\tindice = 0\n\n\tfor i in range(len(letras)):\n\t\tif(Origem == letras[i]):\n\t\t\tindice = i\n\n\tnos[indice].EnviaMensagem(Destino, mensagem)\n\n\t_len = len(c._listaAcesso)\n\t\n\twhile(_len > 0):\n\t\tarquivo = open('Script.txt', 'a')\n\n\t\tarquivo.writelines(\"\\n\"+'T'+str(i)+\":\\n\")\n\n\t\tarquivo.close()\n\n\t\ti+=1\n\n\t\tc.Libera_Acesso(0)\n\n\t\t_len = len(c._listaAcesso)\n\ndef DescobreVizinhos(nNos, nos):\n\n\tfor i in range(nNos):\n\t\tnos[i].Hello()\n\n\t_len = len(c._listaAcesso)\n\t\n\twhile(_len > 0):\n\t\tarquivo = open('Script.txt', 'a')\n\n\t\tarquivo.writelines(\"\\n\"+'T'+str(i)+\":\\n\")\n\n\t\tarquivo.close()\n\n\t\ti+=1\n\n\t\tc.Libera_Acesso(0)\n\n\t\t_len = len(c._listaAcesso)\n\ndef AtualizaRotas(nNos, nos):\n\n\t########################################################### Atualiza as rotas atuais de cada Nó\n\n\tfor i in range(nNos):\n\t\tarquivo = open('Script.txt', 'a')\n\n\t\tarquivo.writelines(\"\\n\"+\"MAC: \"+str(nos[i]._MAC)+\" \"+str(nos[i]._Camada_Rede.TabelaRotas)+\"\\n\")\n\n\t\tarquivo.close()\n\ndef main():\n\n\t#nNos = 5\n\tnNos = input(\"nNos\t :\t\")\n\t#Alcance = 500\n\tAlcance = input(\"Alcance:\t\")\n\n\t#Matriz de Adjacência\n\tMatrizAdj = []\n\n\t#Cria uma rede com N roteadores de alcance X\n\tnos, MatrizAdj = Configura_Rede(nNos, Alcance)\n\n\t#Reseta o Script da simulação anterior\n\tarquivo = open('Script.txt', 'w')\n\tarquivo.close()\n\n\tAtualizaRotas(nNos, nos)\n\n\tDescobreVizinhos(nNos, nos)\n\n\tAtualizaRotas(nNos, nos)\n\n\twhile(True):\n\n\t\tOrigem \t= raw_input(\"Origem\t :\t\")\n\t\tDestino = raw_input(\"Destino : \t\")\n\t\tMsg\t\t= raw_input(\"Mensagem: \t\")\n\n\t\tEnviaMensagem(nos, Origem, Destino, Msg)\n\n\t\tAtualizaRotas(nNos, nos)\n\nif __name__==\"__main__\":\n\tmain()","sub_path":"Código/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"304669247","text":"from argparse import ArgumentParser\n\nfrom server.run import run_server\nfrom constants import LOG_FILE\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--port', action='append_const', const=int, default=8000,\n help='Specifies the server port number. Defaults to %(default)s.')\n parser.add_argument('--debug', action='store_true',\n help='Allows for the hot-swap of server code and prints verbose logging to STDERR.')\n args = parser.parse_args()\n run_server(**vars(args), host='localhost' if args.debug else '0.0.0.0', log_file=None if args.debug else LOG_FILE)\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"6968340","text":"if __name__ == \"__main__\":\n # part 1\n with open(\"input/day3.txt\") as fp:\n lines = fp.readlines()\n\n lines = [x.strip() for x in lines]\n\n pos = 0\n skip = 3\n trees = 0\n\n for line in lines:\n if line[pos % len(line)] == \"#\":\n trees += 1\n\n pos += skip\n\n print(f\"part 1: {trees}\")\n\n # part 2\n with open(\"input/day3.txt\") as fp:\n lines = fp.readlines()\n\n lines = [x.strip() for x in lines]\n\n skips = [(1, 1), (3, 1), (5, 1), (7, 1), (1, 2)]\n total = 1\n\n for right, down in skips:\n trees = 0\n pos = 0\n skip_next_line = 0\n\n for line in lines:\n skip_next_line += 1\n\n if down == 2 and skip_next_line % 2 == 0:\n continue\n\n if line[pos % len(line)] == \"#\":\n trees += 1\n\n pos += right\n\n total *= trees\n\n print(f\"part 2: {total}\")\n","sub_path":"day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"327815389","text":"import os\nimport torch\nimport torch.nn as nn\nimport torch.autograd as autograd\n\nimport utils.util as util\nimport utils.postclassifier as post_cls\nfrom utils.util import row_pairwise_distances, printx\nfrom utils.data import generate_syn_feature\nfrom utils.util import weights_init\n\n\nclass MLP_CRITIC(nn.Module):\n def __init__(self, opt): \n super(MLP_CRITIC, self).__init__()\n self.fc1 = nn.Linear(opt.resSize + opt.attSize, opt.ndh)\n self.fc2 = nn.Linear(opt.ndh, 1)\n self.lrelu = nn.LeakyReLU(0.2, True)\n self.apply(weights_init)\n def forward(self, x, att):\n h = torch.cat((x, att), 1)\n h = self.lrelu(self.fc1(h))\n h = self.fc2(h)\n return h\n\nclass MLP_G(nn.Module):\n def __init__(self, opt):\n super(MLP_G, self).__init__()\n self.fc1 = nn.Linear(opt.attSize + opt.nz, opt.ngh)\n self.fc2 = nn.Linear(opt.ngh, opt.resSize)\n self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace= True)\n self.relu = nn.ReLU(inplace=True)\n self.apply(weights_init)\n def forward(self, noise, att, index=0):\n h = torch.cat((noise, att), 1)\n h = self.lrelu(self.fc1(h))\n h = self.fc2(h)\n h = self.relu(h)\n return h\n\nclass MEAN_GRAPH(nn.Module):\n def __init__(self,opt):\n super(MEAN_GRAPH, self).__init__()\n self.opt = opt\n def forward(self, inputs):\n if self.opt.subtrick == 'mean':\n out = inputs.mean(dim = 0)\n elif self.opt.subtrick == 'min':\n out = inputs.min(dim = 0)[0]\n elif self.opt.subtrick == 'max':\n out = inputs.max(dim = 0)[0]\n else:\n assert False\n return out\n\nclass Graph_Cook(nn.Module):\n \n def __init__(self, opt):\n super(Graph_Cook, self).__init__()\n self.opt = opt\n self.model_graph = MEAN_GRAPH(opt)\n\n def _graph_making(self, ts_fea_input):\n ts_fea_input = ts_fea_input.reshape(\n self.opt.graph_size,\n self.opt.class_size*self.opt.sample_size, -1)\n list_graphs = []\n for g_idx in range(ts_fea_input.shape[0]):\n list_graphs.append(\n row_pairwise_distances(ts_fea_input[g_idx],\n sig= self.opt.graph_sigma))\n ts_graphs_vec = torch.cat(list_graphs).reshape(self.opt.graph_size, -1)\n return ts_graphs_vec\n\n def forward(self, x):\n ts_graphs_vec = self._graph_making(x)\n ts_graphs = self.model_graph(ts_graphs_vec)\n return ts_graphs\n\nclass LINEAR_LOGSOFTMAX(nn.Module):\n def __init__(self, input_dim, nclass):\n super(LINEAR_LOGSOFTMAX, self).__init__()\n self.fc = nn.Linear(input_dim, nclass)\n self.logic = nn.LogSoftmax(dim=1)\n def forward(self, x):\n o = self.logic(self.fc(x))\n return o\n\n# todo\ndef gen_dg_model(opt):\n # initialize generator and discriminator\n netG = MLP_G(opt)\n netD = MLP_CRITIC(opt)\n netD.to(opt.device)\n netG.to(opt.device)\n return netD, netG\n\ndef calc_gradient_penalty(opt, netD, real_data, fake_data, input_att):\n try:\n alpha = torch.rand(opt.batch_size, 1)\n alpha = alpha.expand(real_data.size())\n if opt.cuda: alpha = alpha.cuda()\n interpolates = alpha * real_data + ((1 - alpha) * fake_data)\n if opt.cuda:interpolates = interpolates.cuda()\n interpolates.requires_grad = True\n disc_interpolates = netD(interpolates, input_att)\n ones = torch.ones(disc_interpolates.size())\n if opt.cuda: ones = ones.cuda()\n gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,\n grad_outputs=ones,\n create_graph=True, retain_graph=True, only_inputs=True)[0]\n \n gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * opt.lambda1\n return gradient_penalty\n except Exception as e:\n printx('err in calc_gradient_penalty():{}'.format(e))\n raise e\n\ndef save_model(opt, best_g_model, best_cls_model ):\n try:\n netG = best_g_model\n postcls_model = best_cls_model\n opt.model_g_name = '{}/G_{}_pth.tar'.format(opt.outname, opt.uuid_id)\n opt.model_c_name = '{}/C_{}_pth.tar'.format(opt.outname, opt.uuid_id)\n torch.save(netG.state_dict(), opt.model_g_name)\n torch.save(postcls_model.state_dict(), opt.model_c_name)\n\n except Exception as e:\n printx('wrong save model', 'red')\n raise e\n\ndef load_model(opt):\n try:\n if opt.mode == 'test':\n if opt.gzsl:\n netG, postcls_model = MLP_G(opt), LINEAR_LOGSOFTMAX(opt.resSize, opt.nclass_all)\n else:\n netG, postcls_model = MLP_G(opt), LINEAR_LOGSOFTMAX(opt.resSize, opt.nclass_unseen)\n \n netG.load_state_dict(torch.load(opt.model_g_name))\n postcls_model.load_state_dict(torch.load(opt.model_c_name))\n if opt.cuda:\n netG = netG.cuda()\n postcls_model = postcls_model.cuda()\n return netG, postcls_model\n \n elif opt.mode == 'vis':\n netG = MLP_G(opt)\n netG.load_state_dict(torch.load(opt.model_g_name))\n if opt.cuda:\n netG = netG.cuda()\n return netG\n\n except Exception as e:\n printx('wrong load model')\n raise e\n\ndef model_eval(opt, data, train_flag = False, netG=None, postcls_model=None):\n try:\n if not train_flag: assert postcls_model is not None\n netG.eval()\n if opt.gzsl:\n syn_feature, syn_label = generate_syn_feature(opt,\n netG,\n data.unseenclasses,\n data.attribute,\n opt.syn_num)\n train_X = torch.cat((data.train_feature, syn_feature), 0)\n train_Y = torch.cat((data.train_label, syn_label), 0)\n nclass = opt.nclass_all\n cls = post_cls.POST_CLS(train_X, train_Y, data,\n nclass, opt.cuda,\n opt.cls_lr,0.5, 25, opt.syn_num, True,\n train_flag, postcls_model=postcls_model)\n else:\n syn_feature, syn_label = generate_syn_feature(opt, netG,\n data.unseenclasses,\n data.attribute,\n opt.syn_num)\n cls = post_cls.POST_CLS(syn_feature,\n util.map_label(syn_label, data.unseenclasses),\n data,\n data.unseenclasses.size(0), opt.cuda,\n opt.cls_lr, 0.5, 25, opt.syn_num, False,\\\n train_flag, postcls_model=postcls_model)\n netG.train()\n return cls\n except Exception as e:\n printx('err in model_eval():{}'.format(e))\n raise e\n \n\nif __name__ == '__main__':\n x = torch.randn((10, 10))\n x = x.reshape(2, 5, 10)\n list_graphs = []\n for g_idx in range(x.shape[0]):\n list_graphs.append(row_pairwise_distances(x[g_idx]))\n x = torch.cat(list_graphs).reshape(x.shape[0], -1)\n","sub_path":"utils/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":7490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"350456080","text":"\"\"\"\nCRAYimage - a toolkit for processing images from a mobile phones' cameras\n exposed to a radiocative source.\n\n Developed primarily as a toolkit for data analysis in CRAYFIS experiment.\n\"\"\"\n\nfrom setuptools import setup, find_packages, Extension\nfrom codecs import open\nimport os.path as osp\nimport numpy as np\n\nfrom Cython.Build import cythonize\n\nhere = osp.abspath(osp.dirname(__file__))\n\nwith open(osp.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name = 'crayimage',\n\n version='0.1.0',\n\n description=\"\"\"A toolkit for image analysis for Cosmic RAYs Found In Smartphones.\"\"\",\n\n long_description = long_description,\n\n url='https://github.com/maxim-borisyak/crayimage',\n\n author='CRAYFIS collaboration, Yandex School of Data Analysis and contributors.',\n author_email='mborisyak at yandex-team dot ru',\n\n maintainer = 'Maxim Borisyak',\n maintainer_email = 'mborisyak at yandex-team dot ru',\n\n license='MIT',\n\n classifiers=[\n 'Development Status :: 4 - Beta',\n\n 'Intended Audience :: Science/Research',\n\n 'Topic :: Scientific/Engineering :: Physics',\n 'Topic :: Scientific/Engineering :: Image Recognition',\n 'Topic :: Scientific/Engineering :: Astronomy',\n\n 'License :: OSI Approved :: MIT License',\n\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n ],\n\n keywords='CRAYFIS image toolkit',\n\n packages=find_packages(exclude=['contrib', 'examples', 'docs', 'tests']),\n\n extras_require={\n 'dev': ['check-manifest'],\n 'test': ['nose>=1.3.0'],\n },\n\n install_requires=[\n 'tqdm',\n 'numpy',\n 'scipy',\n 'joblib',\n 'matplotlib',\n 'theano',\n 'lasagne',\n 'cython',\n 'scikit-learn'\n ],\n\n include_package_data=True,\n\n package_data = {\n 'crayimage' : [\n 'index_files/*.json'\n ]\n },\n\n ext_modules = [\n module\n\n for target in [\n 'crayimage/imgutils/*.pyx',\n 'crayimage/hotornot/bayesian/*.pyx',\n 'crayimage/hotornot/em/*.pyx',\n 'crayimage/tracking/generation/*.pyx',\n 'crayimage/simulation/particle/*.pyx',\n 'crayimage/nn/updates/*.pyx'\n ]\n\n for module in cythonize(target)\n ],\n\n include_dirs = [np.get_include()]\n)\n\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"520815573","text":"import random\n#This class to establish what the cards are thier suit, rank, and the value that they hold\n# It also contains a function to get the value of any card for use later in the game\nclass Card:\n def __init__(self,rank, suit):\n self.rank = rank\n self.suit = suit\n self.value = self.getvalue(rank)\n\n def getvalue(self,rank):\n values = {'ace':1,'two':2, 'three': 3, 'four': 4, 'five': 5, 'six': 6, 'seven': 7, 'eight': 8, 'nine': 9, 'ten': 10, 'jack': 10, 'queen': 10, 'king': 10}\n return values[rank]\n\n def __str__(self):\n return f\"{self.rank} of {self.suit}\"\n#This function allows the player to chose on whether they would like to countinue playing or not\n# If the player says yes, then the functions clears the deack and hands and resets the round\n#If the player says no it gives the value that breaks all the loops\ndef play_again():\n global breaker\n again = input(\"do you want to play again [y] or [n]\").lower()\n if again == \"y\":\n cleardeck()\n game()\n elif again == \"n\":\n print(\"bye\")\n breaker = False\n return breaker\n# This function fills the deck using the card class created before. It makes it so there is 52 cards, with 13 of each suit.\ndef make_deck():\n deck = []\n for suit in ['spades', 'diamonds', 'hearts', 'clubs']:\n for rank in ['ace', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten', 'jack', 'queen', 'king']:\n deck.append(Card(rank,suit))\n return deck\n# this function is for dealing cards to the two hands. It shuffles the deck and takes one card out and plops it into the hand.\n#It does the process twice, and when writing this line you have to specifiy what person it is for.\n#this is so the function knows what cards to show in terminal, if it is player it will show the full hand, but if it is dealer it will only show the second card.\ndef deal(deck,person):\n hand = []\n for i in range(2):\n random.shuffle(deck)\n card = deck.pop()\n hand.append(card)\n if person == \"player\":\n print(card)\n if person == \"dealer\":\n print(hand[1])\n return hand\n#THis function is for calculating the total value of the hands using self.value from the card class\ndef total_value(hand):\n global total\n total= 0\n # The function is split in two for the two diferent hands, for player hand it allows the player to chose the value for the ace,\n #while for the dealer it is automatically 11\n if hand == player_hand:\n for card in hand:\n #In blackjack Ace could equal 11 or 1 this if statement allows the player to chose what value they want\n if card.rank == \"ace\":\n v = 0\n while v not in [1, 11]:\n v = int(input(\"1 or 11?\"))\n card.value = v\n #the card new value is then set equal to card and added to the total adn prints it out\n total += card.value\n print(f\"this is the value of the player's hand:{total}\")\n elif hand == dealer_hand:\n for card in hand:\n if card.rank == \"ace\":\n v= 11\n card.value = v\n total += card.value\n return total\n# the function watches the amount of cards in the deck and if it ever gets to 4 left it will empty and then refill the deck and then it will shuffle the deck\ndef deckshuffle(deck):\n while True:\n if len(deck) == 4:\n for suit in ['spades', 'diamonds', 'hearts', 'clubs']:\n for rank in ['ace', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten', 'jack', 'queen', 'king']:\n deck = []\n deck.append(Card(rank,suit))\n else:\n break\n random.shuffle(deck)\n#the function takes a card out of the deck list and puts it into the hand\n#this function is also split in two due the different printing of the hands\n# printing every card in player hands\n# printing the already printed card and the newly added card\ndef hit(hand,person):\n card = deck.pop()\n hand.append(card)\n if person == \"player\":\n print(\"Player's hand:\")\n for card in hand:\n print(card)\n elif person == \"dealer\":\n print(\"dealer's hand\")\n print (hand[1])\n print (hand[2])\n return hand\n# this function is for the end showing the results of the game\n# It shows what was in the players and dealers hand and shows what the players final total was for the player. It then procceds to clear the deck.\ndef results():\n global total\n print(\"This is was your hand:\")\n for card in player_hand:\n print(card)\n print(f\"and it adds up to: {total_value(player_hand)}\")\n print(\"This was the dealer's hand:\")\n for card in dealer_hand:\n print(card)\n cleardeck()\n#Checks the two hands right after the cards are delt if they got black jack it's when the total value equals 21 with and ace and face or ten card\n#Checks both player and dealer hand and if it triggers one it shows the results using the result function and then uses the play again function to restart.\ndef blackjack(dealer_hand,player_hand):\n if total_value(dealer_hand) == 21:\n results()\n print(\"The dealer had blackjack. Good game you lose\")\n play_again()\n elif total_value(player_hand) == 21:\n results()\n print(\"You had black jack congrats you won\")\n play_again()\n#this function checks the two hands for every winning, losing, and draw scenerio\n# for each if/elif statement it prints the results and a message letting the player know what happened\n# this only comes to play at the end of the round\ndef score(dealer_hand, player_hand):\n if total_value(player_hand) == 21:\n results()\n print(\"Congratulations! You got Blackjack!\")\n elif total_value(dealer_hand) == 21:\n results()\n print(\"Sorry, you lose. The dealer got Blackjack.\")\n elif total_value(player_hand) > 21:\n results()\n print(\"Sorry. You busted. You lose.\")\n elif total_value(dealer_hand) > 21:\n results()\n print(\"Dealer busts. You win!\")\n elif total_value(dealer_hand) > total_value(player_hand):\n results()\n print(\"Sorry. Your score is lower than the dealers. You lose\")\n elif total_value(player_hand) > total_value(dealer_hand):\n results()\n print (\"Congratulations. Your score is higher than the dealer. You win\")\n elif total_value(player_hand) == total_value(dealer_hand):\n results()\n print(\"It was a draw, Dealer wins\")\n#Like the black jack function this function checks each hand, however this functions checks to seeif they busted.\n# it comes in after hiting and if triggered will show the results() and a message and then asks you if you want to playagain()\ndef bust():\n global breaker\n if total_value(player_hand) > 21:\n results()\n print(\"Player Busts. You lose\")\n play_again()\n elif total_value(dealer_hand) > 21:\n results()\n print(\"Dealer Busts. You win\")\n play_again()\n else:\n pass\n# this function clears the hands by taking the cards in the hands and appending them to the garbage list\ndef cleardeck():\n garbage = []\n for card in player_hand:\n garbage.append(card)\n player_hand.pop(player_hand.index(card))\n for card in dealer_hand:\n garbage.append(card)\n dealer_hand.pop(dealer_hand.index(card))\n# this the final game function it combines all the other functions into one\ndef game():\n print(\" \") # a little intro line\n print(\"Welcome to blackjack. Your goal is to get your hand to get as close to 21 or to be 21. But if you go over you go bust.\")\n print(\" \")# Just incase the the player doesn't know how to play the game offers a review of the rules of black jack\n player_input = input(\"Do you want to review the basics of blackjack, [y]es or [n]o: \").lower()\n print(\" \")\n if player_input == \"y\":\n print(\"Now here are the basics,\")\n print(\" -All face cards equal 10 and an Ace can either equal 1 or 11 it is your choice\")\n print(\" -All number cards equal their number\")\n print(\" -When you are first delt the cards, your hand will show while the dealers hand has only one card showing\")\n print(\" -During the game you can hit, stay, or fold\")\n print(\" -Hitting adds a new deck to your hand, but it can be almost any card from an ace to king, so hit with caution\")\n print(\" -Staying means you dont want any more cards and youa and the dealer show cards and sees who wins\")\n print(\" -Folding means you gave up and the dealer automatically wins\")\n print(\" -To win the game you just have to have a higher value than the dealer or to get 21, but you go over you lose\")\n print(\" -If the game ends in a draw the dealer automatically wins\")\n print(\" \")\n global breaker\n choice = 0\n global deck\n global dealer_hand\n global player_hand\n breaker = True#sets up all the global variabples\n while breaker == True:#first loop to keep the game going\n deck = make_deck()# Here the game sets the deck and the two hands\n deckshuffle(deck)\n print(\"This is your hand:\")\n player_hand= deal(deck, \"player\")\n print(\"This is the dealer's hand:\")\n dealer_hand= deal(deck, \"dealer\")\n blackjack(dealer_hand, player_hand) # checks for black jack\n while breaker == True: #second loop allows the player to hit more than once\n choice = input(\"Do you want to [H]it, [S]tand, or [F]old: \").lower()# asks the player what they want to do hit stay or fold\n if choice == \"h\":#player gets a new card and if the dealer total is less than 17 it aoutmatically hits the dealer hand\n print(\"This is your new hand\")\n player_hand = hit(player_hand,\"player\")\n total_value(player_hand)\n while total_value(dealer_hand) < 17:\n hit(dealer_hand,\"dealer\")\n bust()# checks if plaher or dealer busts\n elif choice == \"s\":# player stays and it checks if the dealer needs to get hit\n while total_value(dealer_hand) < 17:\n hit(dealer_hand,\"dealer\")\n bust()\n score(dealer_hand, player_hand)#checks to see what happened and lets the player know\n play_again()# asks to re start the game\n elif choice == \"f\":# folding ends all the loops and restarts not the round but the game\n print(\"bye\")\n breaker = False# both loops are set to break when breaker = false\n\n\ngame() #the game\n","sub_path":"blackjack.py","file_name":"blackjack.py","file_ext":"py","file_size_in_byte":10806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"567607885","text":"import matplotlib.pyplot as plt\nfrom numpy import *\n\nimport kNN\n\n\ndef file2matrix(filename):\n fr = open(filename)\n arrayOLines = fr.readlines()\n reMatrix = zeros((len(arrayOLines), 3))\n classLabelVector = []\n index = 0\n for line in arrayOLines:\n line = line.strip()\n listFromLine = line.split('\\t')\n reMatrix[index, :] = listFromLine[0:3]\n classLabelVector.append(int(listFromLine[-1]))\n index += 1\n return reMatrix, classLabelVector\n\n\n# 归一化\ndef autoNorm(dataSet):\n minVals = dataSet.min(0)\n maxVals = dataSet.max(0)\n ranges = maxVals - minVals\n m = dataSet.shape[0]\n normalDataSet = zeros(shape(dataSet))\n normalDataSet = dataSet - tile(minVals, (m, 1))\n normalDataSet /= tile(ranges, (m, 1))\n return normalDataSet, ranges, minVals\n\n\n# 测试\ndef datingClassTest():\n hoRatio = 0.1\n datingDataMat, datingLabels = file2matrix('datingTestSet2.txt')\n normMat, ranges, minVals = autoNorm(datingDataMat)\n print(normMat)\n m = normMat.shape[0]\n numTestVecs = int(m * hoRatio)\n errorCount = 0.0\n for i in range(numTestVecs):\n classifierResult = kNN.classify0(normMat[i, :], normMat[numTestVecs:m, :],\n datingLabels[numTestVecs:m], 3)\n print(\"the classify call back with : %d ,the real answer is % d \"\n % (classifierResult, datingLabels[i]))\n if (classifierResult != datingLabels[i]):\n errorCount += 1\n print(\"the total error rate is %f \" % (errorCount / float(numTestVecs)))\n\n\n# 正式预测\ndef classifyPerson():\n resultList = ['not at all', 'in small doses', 'in large doses']\n percentTats = float(input('percentage of time spent playing video games?'))\n ffMiles = float(input('frequent filter miles earned per year?'))\n iceCream = float(input('liters of ice cream consumed per year?'))\n datingDataMat, datingLabels = file2matrix('datingTestSet2.txt')\n normMat, ranges, minVals = autoNorm(datingDataMat)\n inArr = array([ffMiles, percentTats, iceCream])\n\n classifierResult = kNN.classify0((inArr - minVals) / ranges, normMat, datingLabels, 3)\n\n print('You will probably like this person :', resultList[classifierResult - 1])\n\n\n# 绘制散点图\ndef showFigure():\n datingDataMat, datingLabels = file2matrix('datingTestSet2.txt')\n datingDataMat = autoNorm(datingDataMat)[0]\n\n # 显示散点图\n fig = plt.figure()\n ax = fig.add_subplot(111)\n # ax = fig.add_subplot(223)\n\n ax.scatter(datingDataMat[:, 1], datingDataMat[:, 2],\n 15 * array(datingLabels), 15 * array(datingLabels))\n plt.show()\n\n\nif __name__ == '__main__':\n # classifyPerson()\n showFigure()\n # datingClassTest()\n","sub_path":"kNN/datematch.py","file_name":"datematch.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"628894542","text":"from __future__ import print_function\nfrom __future__ import division\n\nimport torch\nimport numpy as np\nimport pandas as pd\nimport argparse\n\nimport logging \nfrom utils.logger import get_logger\nfrom tqdm import tqdm\nfrom glob import glob\n\nfrom torch.utils import tensorboard\nimport json, os, sys\nimport time\n\nimport errno\nimport os\n\nimport matplotlib.pyplot as plt\n\n\nsys.path.append('/')\nfrom utils.utils import LevelSetDataset\nfrom utils.metrics import iou_pytorch, pixel_segementation_evaluation \nfrom utils.PytorchEarlyStopping import EarlyStopping\nfrom utils.loss import weighted_binary_cross_entropy, generalised_loss\nfrom models.ConvRNN import CRNN, CESN\nfrom models.Conv3D import CNN3D\n\nif __name__==\"__main__\":\n\n # parse augments\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--run-name\", type=str, help=\"Name of the run [CNN3D]\")\n parser.add_argument(\"--data-path\", type=str, required=True, help=\"Path to where the data is located\")\n parser.add_argument(\"--save-path\", type=str, required=True, help=\"Path to where runs will be written\")\n parser.add_argument(\"--dataset\", type=str, required=True, help=\"Path to where runs will be written\")\n\n\n parser.add_argument(\"--num-epochs\", type=int, default=500, help=\"Number of training epochs [500]\")\n parser.add_argument(\"--batch-size\", type=int, default=32, help=\"Number of examples to use in a batch [32]\")\n parser.add_argument(\"--learning-rate\", type=float, default=0.1, help=\"Learning rate for training [0.1]\")\n\n parser.add_argument(\"--num-frames\", type=int, default=95, help=\"Length of the sequences for each image [95]\")\n parser.add_argument(\"--num-past-step\", type=int, default=1, help=\"Number of steps to use in input [1]\")\n parser.add_argument(\"--num-future-step\", type=int, default=1, help=\"Number of time steps in the future for predictions [1]\")\n parser.add_argument(\"--image-dimension\", type=int, default=32, help=\"Dimensions to resize the images [32]\")\n parser.add_argument(\"--threshold\", type=float, default=0.5, help=\"Pixel cutoff to create mask [0.5]\")\n\n\n parser.add_argument(\"--in-channels\", type=int, default=3, help=\"Input channel for the 1st conv layer [3]\")\n parser.add_argument(\"--hidden\", type=int, default=512, help=\"Number of hidden units in the 1st fully connected layer [512]\")\n parser.add_argument(\"--num-classes\", type=int, default=1024, help=\"Number of pixel classes to be predicted [1024]\")\n parser.add_argument(\"--num_layers\", type=int, default=1, help=\"Number of layers in the recurrent unit [1]\")\n# parser.add_argument(\"--sample-size\", type=int, default=128 , help=\" [128]\")\n# parser.add_argument(\"--sample-duration\", type=int, default=16, help=\" [16]\")\n parser.add_argument(\"--leaking-rate\", type=float, default=0.01, help=\"Leak rate for leaky ESN [0.01]\")\n parser.add_argument(\"--spectral-radius\", type=float, default=0.9, help=\"Scaling of reservoir matrix [0.9]\")\n parser.add_argument(\"--sparsity\", type=float, default=0.2, help=\"Percentage of neurons with zeros in the reservoir matrix [0.2]\")\n \n# parser.add_argument(\"--in-channels\", type=int, default=3, help=\"Input channel for the 1st conv layer [3]\")\n # parser.add_argument(\"--hidden-one\", type=int, default=512, help=\"Number of hidden units in the 1st fully connected layer [512]\")\n # parser.add_argument(\"--hidden-two\", type=int, default=256, help=\"Number of hidden units in the 2ndt fully connected layer [256]\")\n# parser.add_argument(\"--num-classes\", type=int, default=1024, help=\"Number of pixel classes to be predicted [1024]\")\n parser.add_argument(\"--dropout-prob\", type=float, default=0.5, help=\"Dropout probability [0.5]\")\n # parser.add_argument(\"--sample-size\", type=int, default=128 , help=\" [128]\")\n # parser.add_argument(\"--sample-duration\", type=int, default=16, help=\" [16]\")\n \n# parser.add_argument(\"--in-channels\", type=int, default=3, help=\"Input channel for the 1st conv layer [3]\")\n# parser.add_argument(\"--hidden\", type=int, default=512, help=\"Number of hidden units in the 1st fully connected layer [512]\")\n# parser.add_argument(\"--num-classes\", type=int, default=1024, help=\"Number of pixel classes to be predicted [1024]\")\n# parser.add_argument(\"--num-layers\", type=int, default=1, help=\"Number of layers in the recurrent unit [1]\")\n parser.add_argument(\"--sample-size\", type=int, default=128 , help=\" [128]\")\n parser.add_argument(\"--sample-duration\", type=int, default=16, help=\" [16]\")\n # parser.add_argument(\"--rnn-unit\", type=str, default='LSTM', required=True, help=\"Recurrent unit type [LSTM]\")\n parser.add_argument(\"--seed\", type=int, default=0, help=\"Seed for random number generation [0]\")\n\n\n\n args = parser.parse_args()\n run_path = os.path.join( args.save_path, args.run_name)\n\n # logging\n logger = get_logger(run_path)\n\n # set random seed\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n\n # get the number of classess\n args.num_classes = args.image_dimension*args.image_dimension\n\n # log all parameters\n logger.info(\"Commnad-line arguments\")\n for arg, value in sorted(vars(args).items()):\n logger.info(f\"arguments {args}: {value}\")\n \n # data loader\n logger.info(\"Creating dataset......\")\n ls_dataset = LevelSetDataset(\n input_image_path=os.path.join(args.data_path,\"images\"),\n target_image_path=os.path.join(args.data_path,\"labels\"),\n threshold=args.threshold,\n num_past_steps=args.num_past_step,\n num_future_steps=args.num_future_step,\n image_dimension=args.image_dimension,\n num_frames=args.num_frames ,\n valid_split= 0.1, \n train_split= 0.8,\n training_mode='train'\n )\n\n # device to perform computation (CPU or GPU)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n logger.info(f\"device: {device}\")\n \n # create test ds for final evaluation\n logger.info(f\"Creating test dataset for evaluating best model......\")\n ls_eval_ds = ls_dataset.create_set(batch_size=1, shuffle=True, pin_memory=True, num_workers=4, training_mode='test')\n\n # CNN\n model_cnn = CNN3D(\n in_channels=args.in_channels,\n sample_size=args.sample_size,\n sample_duration=args.sample_duration,\n drop_p=args.dropout_prob, \n hidden1=args.hidden,\n hidden2=args.hidden,\n num_classes=args.num_classes\n ).to(device)\n \n # LSTM\n model_lstm = CRNN(\n in_channels=args.in_channels,\n sample_size=args.sample_size,\n sample_duration=args.sample_duration,\n hidden_size=args.hidden,\n num_layers=args.num_layers,\n rnn_unit='LSTM',\n num_classes=args.num_classes\n ).to(device)\n \n \n # GRU\n model_gru = CRNN(\n in_channels=args.in_channels,\n sample_size=args.sample_size,\n sample_duration=args.sample_duration,\n hidden_size=args.hidden,\n num_layers=args.num_layers,\n rnn_unit='GRU',\n num_classes=args.num_classes\n ).to(device)\n \n # RNN\n model_rnn = CRNN(\n in_channels=args.in_channels,\n sample_size=args.sample_size,\n sample_duration=args.sample_duration,\n hidden_size=args.hidden,\n num_layers=args.num_layers,\n rnn_unit='RNN',\n num_classes=args.num_classes\n ).to(device)\n \n \n # ESN\n model_esn = CESN(\n in_channels=args.in_channels,\n sample_size=args.sample_size,\n sample_duration=args.sample_duration,\n hidden_size=args.hidden,\n num_layers=args.num_layers,\n num_classes=args.num_classes,\n leaking_rate=args.leaking_rate,\n spectral_radius=args.spectral_radius,\n sparsity=args.sparsity\n ).to(device)\n \n models ={\n 'convESN':model_esn,\n 'convLSTM':model_lstm,\n 'convGRU':model_gru,\n 'convRNN':model_rnn,\n 'conv3d':model_cnn\n }\n logger.info(f\"model: {models}\")\n \n for model_key in models:\n logger.info(f\"model name:{model_key}\")\n for ds in [args.dataset]:\n logger.info(f\"model: {ds}\")\n model= models[model_key]\n fp=f'/home-mscluster/tmashinini/MSC/Data/processed_data/{ds}/results/*{ds}-{model_key}*/checkpoints/*.pt'\n logger.info(f\"fp: {fp}\")\n\n model_fp = glob(fp)[0]\n\n\n\n\n\n logger.info(f\"model: {model_fp}\")\n\n logger.info(f\"Loading model for testing.....\")\n model.load_state_dict(torch.load(model_fp))\n best_iou = -np.inf\n\n model.eval()\n for batch_idx, (inputs, labels, names) in enumerate(ls_eval_ds):\n \n # load data and move data to GPU's\n inputs = inputs.to(device, non_blocking=True)\n labels = labels.to(device, non_blocking=True)\n inputs= inputs.squeeze(1)\n\n # forward-propogation\n outputs = model(inputs) \n outputs = (outputs >= args.threshold)*1\n\n outputs = outputs.view(-1, args.image_dimension, args.image_dimension)\n labels = labels.view(-1, args.image_dimension, args.image_dimension)\n inputs = inputs.to(device, non_blocking=True)\n # logger.info('shapes', inputs.shape, outputs.shape, labels.shape)\n # logger.info('shapes slice', inputs[0,0,0].shape, outputs[0].shape, labels[0].shape)\n\n # save the prediction and the labels\n # for t in np.arange(inputs.shape[2], 10):\n # label = labels[:, :, t]\n # output = outputs[:, :, t]\n \n\n # compute the metrics\n # print(outputs.shape, labels.shape)\n # outputs = (outputs >= args.threshold)*1\n # f1, precision, recall = pixel_segementation_evaluation(labels.cpu().detach().numpy().reshape(-1),\n # outputs.cpu().detach().numpy().reshape(-1))\n # iou = iou_pytorch(outputs, labels)\n # iou = iou.detach().item()\n\n \n # if iou>best_iou:\n # best_iou = iou\n\n t=81\n cp=model_fp.split('/')[-3]\n logger.info(f\"save cp: {cp}\")\n save_path = f'/home-mscluster/tmashinini/MSC/Data/processed_data/{ds}/results/{cp}/predictions/'\n logger.info(f\"save path: {save_path}\")\n os.makedirs(save_path, exist_ok=True)\n n = names[0].split('_')[0]\n plt.imsave(os.path.join(save_path,f'{n}_input.png'), inputs.detach().cpu().numpy()[0,0,0] )\n plt.imsave(os.path.join(save_path,f'{names[0]}_label_{t}.png'), labels.detach().cpu().numpy()[0] )\n plt.imsave(os.path.join(save_path,f'{names[0]}_output_{t}.png'), outputs.detach().cpu().numpy()[0])\n\n # if batch_idx==5:\n # break\n\n logger.info(f'========= DONE ========')\n","sub_path":"src/get_prediction_images.py","file_name":"get_prediction_images.py","file_ext":"py","file_size_in_byte":10932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"347028447","text":"import pandas as pd\n\nfrom common.config import summary_column_names\n\ndef how_much_missing(df, column):\n missing_count = df[column].isnull().sum()\n return [missing_count, 100*missing_count/len(df[column])]\n\ndef is_constant_column(df, column):\n if df[column].nunique() <= 1:\n return True\n else:\n return False\n\ndef find_type(df, column):\n return df[column].dtype.name\n\n\ndef get_mean(df, column):\n if df[column].dtype.kind in ['b', 'i', 'u', 'f', 'c']:\n return df[column].mean()\n else:\n return None\n\ndef count_uniques(df,column):\n return len(df[column].value_counts().index)\n\n\ndef summarise_df(df):\n\n cols = []\n missings = []\n constants = []\n types = []\n means = []\n uniques = []\n\n for col in df.columns:\n cols.append(col)\n missings.append(how_much_missing(df, col)[0])\n constants.append(str(is_constant_column(df, col)))\n types.append(find_type(df, col))\n means.append(get_mean(df, col))\n uniques.append(count_uniques(df, col))\n\n\n # column names also stored in config, a bit messy but easier for setting up dash tables to have column names in config\n stats_df = pd.DataFrame(\n data={\n 'Column': cols,\n 'Mean': means,\n 'Missing Value Count': missings,\n 'Is constant': constants,\n 'Type': types,\n 'Number of Unique Values': uniques}\n )\n\n # pd.concat([cols,means,missings,constants,types,uniques], axis=1, keys=summary_column_names)\n #stats_df.set_index('Column', inplace=True)\n\n return stats_df\n\n\n# function to recursively remove whitespace in a dictionary\ndef removew(d):\n newd = {}\n\n for k, v in d.items():\n if isinstance(v, dict):\n removew(v)\n else:\n if isinstance(k, str):\n k = k.strip()\n if isinstance(v, str):\n v = v.strip()\n newd[k] = v\n\n return newd","sub_path":"app/common/data_quality_summary.py","file_name":"data_quality_summary.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"339213159","text":"from __future__ import (print_function, division)\nimport os\nimport numpy as np\nimport netCDF4 as netcdf\nimport re\nimport maps2d_plot_util as maps2d_plot_util\nimport warnings\nimport logging\nimport datetime\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport itertools\n\nwarnings.filterwarnings('ignore')\nplt.rcParams['font.weight'] = 'bold'\nplt.rcParams['axes.labelsize'] = 15\nplt.rcParams['axes.labelweight'] = 'bold'\nplt.rcParams['xtick.labelsize'] = 15\nplt.rcParams['ytick.labelsize'] = 15\nplt.rcParams['axes.titlesize'] = 15\nplt.rcParams['axes.titleweight'] = 'bold'\nplt.rcParams['axes.formatter.useoffset'] = False\n###import cmocean\n###cmap_diff = cmocean.cm.balance\ncmap_diff = plt.cm.coolwarm\nnoaa_logo_img_array = matplotlib.image.imread(\n os.path.join(os.environ['USHverif_global'], 'plotting_scripts', 'noaa.png')\n)\n\n# Exit early if we don't need to run this\n# aka not running model2obs\ntype_list = os.environ['maps2d_type_list'].split(' ')\nif 'model2obs' not in type_list:\n print(\"model2obs verification no requested...\"\n +\"no need to calculate special variables\")\n exit()\n\n\n# Read in environment variables\nmachine = os.environ['machine']\nDATA = os.environ['DATA']\nRUN = os.environ['RUN']\nmake_met_data_by = os.environ['maps2d_make_met_data_by']\nplot_by = os.environ['maps2d_make_met_data_by']\nSTART_DATE = os.environ['start_date']\nEND_DATE = os.environ['end_date']\nforecast_to_plot_list = os.environ['maps2d_forecast_to_plot_list'].split(' ')\nregrid_to_grid = os.environ['maps2d_regrid_to_grid']\nlatlon_area = os.environ['maps2d_latlon_area'].split(' ')\ntype_list = os.environ['maps2d_type_list'].split(' ')\nuse_monthly_mean = os.environ['maps2d_model2obs_use_monthly_mean']\nuse_ceres = os.environ['maps2d_model2obs_use_ceres']\nhr_beg = os.environ['maps2d_hr_beg']\nhr_end = os.environ['maps2d_hr_end']\nhr_inc = os.environ['maps2d_hr_inc']\nmodel_list = os.environ['model_list'].split(' ')\nmodel_plot_name_list = os.environ['maps2d_model_plot_name_list'].split(' ')\n\n# Set up information\nverif_case_type = 'model2obs'\nvar_group_name = 'cloudsrad'\nif machine == 'WCOSS_C' or machine == 'WCOSS_DELL_P3':\n py_map_pckg = 'cartopy'\nelse:\n py_map_pckg = 'basemap' \nif py_map_pckg == 'cartopy':\n import cartopy.crs as ccrs\n from cartopy.util import add_cyclic_point\n from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter\nelif py_map_pckg == 'basemap':\n from mpl_toolkits.basemap import Basemap, addcyclic\nllcrnrlat_val = float(latlon_area[0])\nurcrnrlat_val = float(latlon_area[1])\nllcrnrlon_val = float(latlon_area[2])\nurcrnrlon_val = float(latlon_area[3])\nlat_ticks = np.linspace(llcrnrlat_val, urcrnrlat_val, 7, endpoint=True)\nlon_ticks = np.linspace(llcrnrlon_val, urcrnrlon_val, 7, endpoint=True)\nnmodels = int(len(model_list))\n\n# Plot title information\nSTART_DATE_dt = datetime.datetime.strptime(START_DATE, '%Y%m%d')\nEND_DATE_dt = datetime.datetime.strptime(END_DATE, '%Y%m%d')\ndates_title = (make_met_data_by.lower()+' '\n +START_DATE_dt.strftime('%d%b%Y')+'-'\n +END_DATE_dt.strftime('%d%b%Y'))\nmake_met_data_by_hrs = []\nhr = int(hr_beg) * 3600\nwhile hr <= int(hr_end)*3600:\n make_met_data_by_hrs.append(str(int(hr/3600)).zfill(2)+'Z')\n hr+=int(hr_inc)\nmake_met_data_by_hrs_title = ', '.join(make_met_data_by_hrs)\nforecase_to_plot_title_list = []\n \n# Get input and output directories\nseries_analysis_file_dir = os.path.join(DATA, RUN, 'metplus_output',\n 'make_met_data_by_'+make_met_data_by,\n 'series_analysis', verif_case_type,\n var_group_name)\nplotting_out_dir_imgs = os.path.join(DATA, RUN, 'metplus_output',\n 'plot_by_'+plot_by,\n verif_case_type, var_group_name,\n 'imgs')\nif not os.path.exists(plotting_out_dir_imgs):\n os.makedirs(plotting_out_dir_imgs)\n\n# Loop of variables lat-lon plots\nvar_info_forcast_to_plot_list = itertools.product(\n ['SWABSORB_atm', 'LWEMIT_atm', 'SWALBDO_sfc'], forecast_to_plot_list\n)\nfor var_info_forcast_to_plot in var_info_forcast_to_plot_list:\n var_name = var_info_forcast_to_plot[0].split('_')[0]\n var_level = var_info_forcast_to_plot[0].split('_')[1]\n forecast_to_plot = var_info_forcast_to_plot[1]\n print(\"Working on lat-lon error plots for \"+var_name+\" \"+var_level\n +\" \"+forecast_to_plot)\n if forecast_to_plot == 'anl':\n forecast_to_plot_title = 'analysis'\n elif forecast_to_plot[0] == 'f':\n forecast_to_plot_title = 'forecast hour '+forecast_to_plot[1:]\n elif forecast_to_plot[0] == 'd':\n forecast_day = int(forecast_to_plot[1:])\n forecast_day_fhr4 = forecast_day * 24\n forecast_day_fhr3 = str(forecast_day_fhr4 - 6).zfill(2)\n forecast_day_fhr2 = str(forecast_day_fhr4 - 12).zfill(2)\n forecast_day_fhr1 = str(forecast_day_fhr4 - 18).zfill(2)\n forecast_day_fhr4 = str(forecast_day_fhr4).zfill(2)\n forecast_to_plot_title = (\n 'forecast hours '+forecast_day_fhr1+', '+forecast_day_fhr2+', '\n +forecast_day_fhr3+', '+forecast_day_fhr4\n )\n if var_name == 'SWABSORB': #shortwave absorption\n var_info_title = (\n 'Atmospheric Absorbed Shortwave (W 'r'$\\mathregular{m^{-2}}$'')'\n )\n levels = np.array([10,30,50,70,90,110,120,130])\n levels_diff = np.array([-60,-40,-30,-20,-10,0,10,20,30,40,60])\n cmap = plt.cm.Wistia\n var_scale = 1\n files_needed_list = [\n forecast_to_plot+'_DSWRF_toa_obsonly.nc',\n forecast_to_plot+'_DSWRF_sfc.nc', \n forecast_to_plot+'_USWRF_toa.nc',\n forecast_to_plot+'_USWRF_sfc.nc',\n ]\n elif var_name == 'LWEMIT': #longwave emitted\n var_info_title = (\n 'Atmospheric Emitted Longwave (W 'r'$\\mathregular{m^{-2}}$'')'\n )\n levels = np.array([100,120,140,160,180,200,220,240])\n levels_diff = np.array([-60,-40,-30,-20,-10,0,10,20,30,40,60])\n cmap = plt.cm.cool\n var_scale = 1\n files_needed_list = [\n forecast_to_plot+'_DLWRF_sfc.nc', \n forecast_to_plot+'_ULWRF_toa.nc',\n forecast_to_plot+'_ULWRF_sfc.nc',\n ]\n elif var_name == 'SWALBDO': #shortwave surface albedo\n var_info_title = 'Shortwave Surface Albedo (fraction)'\n levels = np.array([0.1,0.2,0.4,0.6,0.8,1.0])\n levels_diff = np.array(\n [-0.06,-0.04,-0.03,-0.02,-0.01,0,0.01,0.02,0.03,0.04,0.06]\n )\n cmap = plt.cm.cubehelix_r\n var_scale = 1\n files_needed_list = [\n forecast_to_plot+'_DSWRF_sfc.nc', \n forecast_to_plot+'_USWRF_sfc.nc',\n ]\n for model in model_list:\n index = model_list.index(model)\n model_num = index + 1\n model_plot_name = model_plot_name_list[index]\n # Only dealing with radiation variables\n if use_ceres == 'YES':\n model_obtype = 'ceres'\n else:\n model_obtype = 'rad_srb2'\n # Set up plot\n if model_num == 1:\n nsubplots = nmodels + 1\n if nsubplots > 8:\n print(\"Too many subplots requested. Current maximum is 8.\")\n exit(1)\n if nsubplots == 1:\n fig = plt.figure(figsize=(10,12))\n gs = gridspec.GridSpec(1,1)\n elif nsubplots == 2:\n fig = plt.figure(figsize=(10,12))\n gs = gridspec.GridSpec(2,1)\n gs.update(hspace=0.2)\n elif nsubplots > 2 and nsubplots <= 4:\n fig = plt.figure(figsize=(20,12))\n gs = gridspec.GridSpec(2,2)\n gs.update(wspace=0.2, hspace=0.2)\n elif nsubplots > 4 and nsubplots <= 6:\n fig = plt.figure(figsize=(30,12))\n gs = gridspec.GridSpec(2,3)\n gs.update(wspace=0.2, hspace=0.2)\n elif nsubplots > 6 and nsubplots <= 9:\n fig = plt.figure(figsize=(30,18))\n gs = gridspec.GridSpec(3,3)\n gs.update(wspace=0.2, hspace=0.2)\n # Set up observation subplot map and title\n if py_map_pckg == 'cartopy':\n ax_obs = plt.subplot(gs[0],\n projection=ccrs.PlateCarree(\n central_longitude=180\n ))\n if urcrnrlon_val == 360:\n urcrnrlon_val_adjust = 359.9\n else:\n urcrnrlon_val_adjust = urcrnrlon_val\n ax_obs.set_extent(\n [llcrnrlon_val, urcrnrlon_val_adjust,\n llcrnrlat_val, urcrnrlat_val],\n ccrs.PlateCarree()\n )\n ax_obs.set_global()\n ax_obs.coastlines()\n ax_obs.set_xlabel('Longitude')\n ax_obs.set_xticks(lon_ticks, crs=ccrs.PlateCarree())\n ax_obs.set_ylabel('Latitude')\n ax_obs.set_yticks(lat_ticks, crs=ccrs.PlateCarree())\n lon_formatter = LongitudeFormatter(zero_direction_label=True)\n lat_formatter = LatitudeFormatter()\n ax_obs.xaxis.set_major_formatter(lon_formatter)\n ax_obs.yaxis.set_major_formatter(lat_formatter)\n elif py_map_pckg == 'basemap':\n ax_obs = plt.subplot(gs[subplot_num])\n mo = Basemap(projection='cyl', llcrnrlat=llcrnrlat_val,\n urcrnrlat=urcrnrlat_val, llcrnrlon=llcrnrlon_val,\n urcrnrlon=urcrnrlon_val, resolution='c', lon_0=180,\n ax=ax_obs)\n mo.drawcoastlines(linewidth=1.5, color='k', zorder=6)\n mo.drawmapboundary\n ax_obs.set_xlabel('Longitude')\n ax_obs.set_ylabel('Latitude')\n mo.drawmeridians(lon_ticks, labels=[False,False,False,True],\n fontsize=15)\n mo.drawparallels(lat_ticks, labels=[True,False,False,False],\n fontsize=15)\n obtype_subtitle = maps2d_plot_util.get_obs_subplot_title(\n model_obtype, use_monthly_mean\n )\n ax_obs.set_title(obtype_subtitle, loc='left')\n # Set up model subplot map\n subplot_num = model_num\n if py_map_pckg == 'cartopy':\n ax = plt.subplot(gs[subplot_num], \n projection=ccrs.PlateCarree(\n central_longitude=180\n ))\n if urcrnrlon_val == 360:\n urcrnrlon_val_adjust = 359.9\n else:\n urcrnrlon_val_adjust = urcrnrlon_val\n ax.set_extent(\n [llcrnrlon_val, urcrnrlon_val_adjust,\n llcrnrlat_val, urcrnrlat_val],\n ccrs.PlateCarree()\n )\n ax.set_global()\n ax.coastlines()\n ax.set_xlabel('Longitude')\n ax.set_xticks(lon_ticks, crs=ccrs.PlateCarree())\n ax.set_ylabel('Latitude')\n ax.set_yticks(lat_ticks, crs=ccrs.PlateCarree())\n lon_formatter = LongitudeFormatter(zero_direction_label=True)\n lat_formatter = LatitudeFormatter()\n ax.xaxis.set_major_formatter(lon_formatter)\n ax.yaxis.set_major_formatter(lat_formatter)\n elif py_map_pckg == 'basemap':\n ax = plt.subplot(gs[subplot_num])\n m = Basemap(projection='cyl', llcrnrlat=llcrnrlat_val,\n urcrnrlat=urcrnrlat_val, llcrnrlon=llcrnrlon_val,\n urcrnrlon=urcrnrlon_val, resolution='c', lon_0=180,\n ax=ax)\n m.drawcoastlines(linewidth=1.5, color='k', zorder=6)\n m.drawmapboundary\n ax.set_xlabel('Longitude')\n ax.set_ylabel('Latitude')\n m.drawmeridians(lon_ticks, labels=[False,False,False,True],\n fontsize=15)\n m.drawparallels(lat_ticks, labels=[True,False,False,False],\n fontsize=15)\n ax.set_title(model_plot_name+'-'+model_obtype, loc='left')\n # Read data\n all_model_files_exist = True\n missing_file_list = []\n for file in files_needed_list:\n model_series_analysis_netcdf_file = os.path.join(\n series_analysis_file_dir, model, file\n )\n if not os.path.exists(model_series_analysis_netcdf_file):\n all_model_files_exist = False\n missing_file_list.append(model_series_analysis_netcdf_file)\n if not all_model_files_exist:\n print(\"Missing files for \"+model+\" \"+', '.join(missing_file_list))\n if nmodel == 1:\n ax_obs.set_title(str(np.nan), loc='right')\n ax.set_title(str(np.nan), loc='right') \n else:\n if var_name == 'SWABSORB': #shortwave absorption\n # DWSRF toa\n DSWRF_toa_obsonly_file = os.path.join(\n series_analysis_file_dir, model,\n forecast_to_plot+'_DSWRF_toa_obsonly.nc'\n )\n DSWRF_toa_obsonly_data = netcdf.Dataset(\n DSWRF_toa_obsonly_file\n )\n DSWRF_toa_obsonly_data_lat = (\n DSWRF_toa_obsonly_data.variables['lat'][:]\n )\n DSWRF_toa_obsonly_data_lon = (\n DSWRF_toa_obsonly_data.variables['lon'][:]\n )\n lat = DSWRF_toa_obsonly_data_lat\n lon = DSWRF_toa_obsonly_data_lon\n DSWRF_toa_obsonly_data_variable_names = []\n for var in DSWRF_toa_obsonly_data.variables:\n DSWRF_toa_obsonly_data_variable_names.append(str(var))\n if 'series_cnt_FBAR' in DSWRF_toa_obsonly_data_variable_names:\n DSWRF_toa_obsonly_data_series_cnt_FBAR = (\n DSWRF_toa_obsonly_data.variables['series_cnt_FBAR'][:]\n * var_scale\n )\n else:\n print(\"WARNING: FBAR values for \"+model+\" \"\n +\"not in file \"+DSWRF_toa_obsonly_file\n +\"...setting to NaN\")\n DSWRF_toa_obsonly_data_series_cnt_FBAR = np.full(\n (len(DSWRF_toa_obsonly_data_lat),\n len(DSWRF_toa_obsonly_data_lon)), np.nan\n )\n if 'series_cnt_OBAR' in DSWRF_toa_obsonly_data_variable_names:\n DSWRF_toa_obsonly_data_series_cnt_OBAR = (\n DSWRF_toa_obsonly_data.variables['series_cnt_OBAR'][:]\n * var_scale\n )\n else:\n print(\"WARNING: OBAR values for \"+model+\" \"\n +\"not in file \"+DSWRF_toa_obsonly_file\n +\"...setting to NaN\")\n DSWRF_toa_obsonly_data_series_cnt_OBAR = np.full(\n (len(DSWRF_toa_obsonly_data_lat),\n len(DSWRF_toa_obsonly_data_lon)), np.nan\n )\n # DSWRF sfc\n DSWRF_sfc_file = os.path.join(\n series_analysis_file_dir, model,\n forecast_to_plot+'_DSWRF_sfc.nc'\n )\n DSWRF_sfc_data = netcdf.Dataset(\n DSWRF_sfc_file\n )\n DSWRF_sfc_data_lat = (\n DSWRF_sfc_data.variables['lat'][:]\n )\n DSWRF_sfc_data_lon = (\n DSWRF_sfc_data.variables['lon'][:]\n )\n DSWRF_sfc_data_variable_names = []\n for var in DSWRF_sfc_data.variables:\n DSWRF_sfc_data_variable_names.append(str(var))\n if 'series_cnt_FBAR' in DSWRF_sfc_data_variable_names:\n DSWRF_sfc_data_series_cnt_FBAR = (\n DSWRF_sfc_data.variables['series_cnt_FBAR'][:]\n * var_scale\n )\n else:\n print(\"WARNING: FBAR values for \"+model+\" \"\n +\"not in file \"+DSWRF_sfc_file\n +\"...setting to NaN\")\n DSWRF_sfc_data_series_cnt_FBAR = np.full(\n (len(DSWRF_sfc_data_lat),\n len(DSWRF_sfc_data_lon)), np.nan\n )\n if 'series_cnt_OBAR' in DSWRF_sfc_data_variable_names:\n DSWRF_sfc_data_series_cnt_OBAR = (\n DSWRF_sfc_data.variables['series_cnt_OBAR'][:]\n * var_scale\n )\n else:\n print(\"WARNING: OBAR values for \"+model+\" \"\n +\"not in file \"+DSWRF_sfc_file\n +\"...setting to NaN\")\n DSWRF_sfc_data_series_cnt_OBAR = np.full(\n (len(DSWRF_sfc_data_lat),\n len(DSWRF_sfc_data_lon)), np.nan\n )\n # USWRF toa\n USWRF_toa_file = os.path.join(\n series_analysis_file_dir, model,\n forecast_to_plot+'_USWRF_toa.nc'\n )\n USWRF_toa_data = netcdf.Dataset(\n USWRF_toa_file\n )\n USWRF_toa_data_lat = (\n USWRF_toa_data.variables['lat'][:]\n )\n USWRF_toa_data_lon = (\n USWRF_toa_data.variables['lon'][:]\n )\n USWRF_toa_data_variable_names = []\n for var in USWRF_toa_data.variables:\n USWRF_toa_data_variable_names.append(str(var))\n if 'series_cnt_FBAR' in USWRF_toa_data_variable_names:\n USWRF_toa_data_series_cnt_FBAR = (\n USWRF_toa_data.variables['series_cnt_FBAR'][:]\n * var_scale\n )\n else:\n print(\"WARNING: FBAR values for \"+model+\" \"\n +\"not in file \"+USWRF_toa_file\n +\"...setting to NaN\")\n USWRF_toa_data_series_cnt_FBAR = np.full(\n (len(USWRF_toa_data_lat),\n len(USWRF_toa_data_lon)), np.nan\n )\n if 'series_cnt_OBAR' in USWRF_toa_data_variable_names:\n USWRF_toa_data_series_cnt_OBAR = (\n USWRF_toa_data.variables['series_cnt_OBAR'][:]\n * var_scale\n )\n else:\n print(\"WARNING: OBAR values for \"+model+\" \"\n +\"not in file \"+USWRF_toa_file\n +\"...setting to NaN\")\n USWRF_toa_data_series_cnt_OBAR = np.full(\n (len(USWRF_toa_data_lat),\n len(USWRF_toa_data_lon)), np.nan\n )\n # USWRF sfc\n USWRF_sfc_file = os.path.join(\n series_analysis_file_dir, model,\n forecast_to_plot+'_USWRF_sfc.nc'\n )\n USWRF_sfc_data = netcdf.Dataset(\n USWRF_sfc_file\n )\n USWRF_sfc_data_lat = (\n USWRF_sfc_data.variables['lat'][:]\n )\n USWRF_sfc_data_lon = (\n USWRF_sfc_data.variables['lon'][:]\n )\n USWRF_sfc_data_variable_names = []\n for var in USWRF_sfc_data.variables:\n USWRF_sfc_data_variable_names.append(str(var))\n if 'series_cnt_FBAR' in USWRF_sfc_data_variable_names:\n USWRF_sfc_data_series_cnt_FBAR = (\n USWRF_sfc_data.variables['series_cnt_FBAR'][:]\n * var_scale\n )\n else:\n print(\"WARNING: FBAR values for \"+model+\" \"\n +\"not in file \"+USWRF_sfc_file\n +\"...setting to NaN\")\n USWRF_sfc_data_series_cnt_FBAR = np.full(\n (len(USWRF_sfc_data_lat),\n len(USWRF_sfc_data_lon)), np.nan\n )\n if 'series_cnt_OBAR' in USWRF_sfc_data_variable_names:\n USWRF_sfc_data_series_cnt_OBAR = (\n USWRF_sfc_data.variables['series_cnt_OBAR'][:]\n * var_scale\n )\n else:\n print(\"WARNING: OBAR values for \"+model+\" \"\n +\"not in file \"+USWRF_sfc_file\n +\"...setting to NaN\")\n USWRF_sfc_data_series_cnt_OBAR = np.full(\n (len(USWRF_sfc_data_lat),\n len(USWRF_sfc_data_lon)), np.nan\n )\n obs_calc_var = (\n DSWRF_toa_obsonly_data_series_cnt_OBAR\n - DSWRF_sfc_data_series_cnt_OBAR\n - USWRF_toa_data_series_cnt_OBAR\n + USWRF_sfc_data_series_cnt_OBAR\n )\n model_calc_var = (\n DSWRF_toa_obsonly_data_series_cnt_OBAR\n - DSWRF_sfc_data_series_cnt_FBAR\n - USWRF_toa_data_series_cnt_FBAR\n + USWRF_sfc_data_series_cnt_FBAR\n )\n elif var_name == 'LWEMIT': #longwave emitted\n # DLWRF sfc\n DLWRF_sfc_file = os.path.join(\n series_analysis_file_dir, model,\n forecast_to_plot+'_DLWRF_sfc.nc'\n )\n DLWRF_sfc_data = netcdf.Dataset(\n DLWRF_sfc_file\n )\n DLWRF_sfc_data_lat = (\n DLWRF_sfc_data.variables['lat'][:]\n )\n DLWRF_sfc_data_lon = (\n DLWRF_sfc_data.variables['lon'][:]\n )\n lat = DLWRF_sfc_data_lat\n lon = DLWRF_sfc_data_lon\n DLWRF_sfc_data_variable_names = []\n for var in DLWRF_sfc_data.variables:\n DLWRF_sfc_data_variable_names.append(str(var))\n if 'series_cnt_FBAR' in DLWRF_sfc_data_variable_names:\n DLWRF_sfc_data_series_cnt_FBAR = (\n DLWRF_sfc_data.variables['series_cnt_FBAR'][:]\n * var_scale\n )\n else:\n print(\"WARNING: FBAR values for \"+model+\" \"\n +\"not in file \"+DLWRF_sfc_file\n +\"...setting to NaN\")\n DLWRF_sfc_data_series_cnt_FBAR = np.full(\n (len(DLWRF_sfc_data_lat),\n len(DLWRF_sfc_data_lon)), np.nan\n )\n if 'series_cnt_OBAR' in DLWRF_sfc_data_variable_names:\n DLWRF_sfc_data_series_cnt_OBAR = (\n DLWRF_sfc_data.variables['series_cnt_OBAR'][:]\n * var_scale\n )\n else:\n print(\"WARNING: OBAR values for \"+model+\" \"\n +\"not in file \"+DLWRF_sfc_file\n +\"...setting to NaN\")\n DLWRF_sfc_data_series_cnt_OBAR = np.full(\n (len(DLWRF_sfc_data_lat),\n len(DLWRF_sfc_data_lon)), np.nan\n )\n # ULWRF toa\n ULWRF_toa_file = os.path.join(\n series_analysis_file_dir, model,\n forecast_to_plot+'_ULWRF_toa.nc'\n )\n ULWRF_toa_data = netcdf.Dataset(\n ULWRF_toa_file\n )\n ULWRF_toa_data_lat = (\n ULWRF_toa_data.variables['lat'][:]\n ) \n ULWRF_toa_data_lon = (\n ULWRF_toa_data.variables['lon'][:]\n )\n ULWRF_toa_data_variable_names = []\n for var in ULWRF_toa_data.variables:\n ULWRF_toa_data_variable_names.append(str(var))\n if 'series_cnt_FBAR' in ULWRF_toa_data_variable_names:\n ULWRF_toa_data_series_cnt_FBAR = (\n ULWRF_toa_data.variables['series_cnt_FBAR'][:]\n * var_scale\n )\n else:\n print(\"WARNING: FBAR values for \"+model+\" \"\n +\"not in file \"+ULWRF_toa_file\n +\"...setting to NaN\")\n ULWRF_toa_data_series_cnt_FBAR = np.full(\n (len(ULWRF_toa_data_lat),\n len(ULWRF_toa_data_lon)), np.nan\n )\n if 'series_cnt_OBAR' in ULWRF_toa_data_variable_names:\n ULWRF_toa_data_series_cnt_OBAR = (\n ULWRF_toa_data.variables['series_cnt_OBAR'][:]\n * var_scale\n )\n else:\n print(\"WARNING: OBAR values for \"+model+\" \"\n +\"not in file \"+ULWRF_toa_file\n +\"...setting to NaN\")\n ULWRF_toa_data_series_cnt_OBAR = np.full(\n (len(ULWRF_toa_data_lat),\n len(ULWRF_toa_data_lon)), np.nan\n )\n # ULWRF sfc\n ULWRF_sfc_file = os.path.join(\n series_analysis_file_dir, model,\n forecast_to_plot+'_ULWRF_sfc.nc'\n )\n ULWRF_sfc_data = netcdf.Dataset(\n ULWRF_sfc_file\n )\n ULWRF_sfc_data_lat = (\n ULWRF_sfc_data.variables['lat'][:]\n )\n ULWRF_sfc_data_lon = (\n ULWRF_sfc_data.variables['lon'][:]\n )\n ULWRF_sfc_data_variable_names = []\n for var in ULWRF_sfc_data.variables:\n ULWRF_sfc_data_variable_names.append(str(var))\n if 'series_cnt_FBAR' in ULWRF_sfc_data_variable_names:\n ULWRF_sfc_data_series_cnt_FBAR = (\n ULWRF_sfc_data.variables['series_cnt_FBAR'][:]\n * var_scale\n )\n else:\n print(\"WARNING: FBAR values for \"+model+\" \"\n +\"not in file \"+ULWRF_sfc_file\n +\"...setting to NaN\")\n ULWRF_sfc_data_series_cnt_FBAR = np.full(\n (len(ULWRF_sfc_data_lat),\n len(ULWRF_sfc_data_lon)), np.nan\n )\n if 'series_cnt_OBAR' in ULWRF_sfc_data_variable_names:\n ULWRF_sfc_data_series_cnt_OBAR = (\n ULWRF_sfc_data.variables['series_cnt_OBAR'][:]\n * var_scale\n )\n else:\n print(\"WARNING: OBAR values for \"+model+\" \"\n +\"not in file \"+ULWRF_sfc_file\n +\"...setting to NaN\")\n ULWRF_sfc_data_series_cnt_OBAR = np.full(\n (len(ULWRF_sfc_data_lat),\n len(ULWRF_sfc_data_lon)), np.nan\n )\n obs_calc_var = (\n DLWRF_sfc_data_series_cnt_OBAR\n + ULWRF_toa_data_series_cnt_OBAR\n - ULWRF_sfc_data_series_cnt_OBAR\n )\n model_calc_var = (\n DLWRF_sfc_data_series_cnt_FBAR\n + ULWRF_toa_data_series_cnt_FBAR\n - ULWRF_sfc_data_series_cnt_FBAR\n )\n elif var_name == 'SWALBDO': #shortwave surface albedo\n # DSWRF sfc\n DSWRF_sfc_file = os.path.join(\n series_analysis_file_dir, model,\n forecast_to_plot+'_DSWRF_sfc.nc'\n )\n DSWRF_sfc_data = netcdf.Dataset(\n DSWRF_sfc_file\n )\n DSWRF_sfc_data_lat = (\n DSWRF_sfc_data.variables['lat'][:]\n )\n DSWRF_sfc_data_lon = (\n DSWRF_sfc_data.variables['lon'][:]\n )\n lat = DSWRF_sfc_data_lat\n lon = DSWRF_sfc_data_lon\n DSWRF_sfc_data_variable_names = []\n for var in DSWRF_sfc_data.variables:\n DSWRF_sfc_data_variable_names.append(str(var))\n if 'series_cnt_FBAR' in DSWRF_sfc_data_variable_names:\n DSWRF_sfc_data_series_cnt_FBAR = (\n DSWRF_sfc_data.variables['series_cnt_FBAR'][:]\n * var_scale\n )\n else:\n print(\"WARNING: FBAR values for \"+model+\" \"\n +\"not in file \"+DSWRF_sfc_file\n +\"...setting to NaN\")\n DSWRF_sfc_data_series_cnt_FBAR = np.full(\n (len(DSWRF_sfc_data_lat),\n len(DSWRF_sfc_data_lon)), np.nan\n )\n if 'series_cnt_OBAR' in DSWRF_sfc_data_variable_names:\n DSWRF_sfc_data_series_cnt_OBAR = (\n DSWRF_sfc_data.variables['series_cnt_OBAR'][:]\n * var_scale\n )\n else:\n print(\"WARNING: OBAR values for \"+model+\" \"\n +\"not in file \"+DSWRF_sfc_file\n +\"...setting to NaN\")\n DSWRF_sfc_data_series_cnt_OBAR = np.full(\n (len(DSWRF_sfc_data_lat),\n len(DSWRF_sfc_data_lon)), np.nan\n )\n # USWRF sfc\n USWRF_sfc_file = os.path.join(\n series_analysis_file_dir, model,\n forecast_to_plot+'_USWRF_sfc.nc'\n )\n USWRF_sfc_data = netcdf.Dataset(\n USWRF_sfc_file\n )\n USWRF_sfc_data_lat = (\n USWRF_sfc_data.variables['lat'][:]\n )\n USWRF_sfc_data_lon = (\n USWRF_sfc_data.variables['lon'][:]\n )\n USWRF_sfc_data_variable_names = []\n for var in USWRF_sfc_data.variables:\n USWRF_sfc_data_variable_names.append(str(var))\n if 'series_cnt_FBAR' in USWRF_sfc_data_variable_names:\n USWRF_sfc_data_series_cnt_FBAR = (\n USWRF_sfc_data.variables['series_cnt_FBAR'][:]\n * var_scale\n )\n else:\n print(\"WARNING: FBAR values for \"+model+\" \"\n +\"not in file \"+USWRF_sfc_file\n +\"...setting to NaN\")\n USWRF_sfc_data_series_cnt_FBAR = np.full(\n (len(USWRF_sfc_data_lat),\n len(USWRF_sfc_data_lon)), np.nan\n )\n if 'series_cnt_OBAR' in USWRF_sfc_data_variable_names:\n USWRF_sfc_data_series_cnt_OBAR = (\n USWRF_sfc_data.variables['series_cnt_OBAR'][:]\n * var_scale\n )\n else:\n print(\"WARNING: OBAR values for \"+model+\" \"\n +\"not in file \"+USWRF_sfc_file\n +\"...setting to NaN\")\n USWRF_sfc_data_series_cnt_OBAR = np.full(\n (len(USWRF_sfc_data_lat),\n len(USWRF_sfc_data_lon)), np.nan\n )\n obs_calc_var = (\n USWRF_sfc_data_series_cnt_OBAR \n / DSWRF_sfc_data_series_cnt_OBAR\n )\n model_calc_var = (\n USWRF_sfc_data_series_cnt_FBAR\n / DSWRF_sfc_data_series_cnt_FBAR\n )\n if np.ma.is_masked(obs_calc_var):\n np.ma.set_fill_value(obs_calc_var, np.nan)\n obs_calc_var = obs_calc_var.filled()\n if np.ma.is_masked(model_calc_var):\n np.ma.set_fill_value(model_calc_var, np.nan)\n model_calc_var = model_calc_var.filled()\n # Plot observations\n if model_num == 1:\n print(\"Plotting \"+model_obtype+\" observations\")\n # Add cyclic point for obs data \n if py_map_pckg == 'cartopy':\n obs_calc_var_cyc, lon_cyc = (\n add_cyclic_point(obs_calc_var,\n coord=lon)\n )\n elif py_map_pckg == 'basemap':\n obs_calc_var_cyc, lon_cyc = addcyclic(\n obs_calc_var, lon\n )\n xo, yo = np.meshgrid(lon_cyc, lat)\n obs_area_avg = maps2d_plot_util.calculate_area_average(\n obs_calc_var, lat, lon,\n llcrnrlat_val, urcrnrlat_val, llcrnrlon_val, urcrnrlon_val\n )\n ax_obs.set_title(round(obs_area_avg, 3), loc='right')\n if np.all(np.isnan(levels)):\n if np.isnan(np.nanmax(obs_calc_var)):\n levels_max = 1\n else:\n levels_max = int(\n np.nanmax(obs_calc_var)\n ) + 1\n if np.isnan(np.nanmin(obs_calc_var)):\n levels_min = -1\n else:\n levels_min = int(\n np.nanmin(obs_calc_var)\n ) - 1\n levels = np.linspace(levels_min, levels_max, 11,\n endpoint=True)\n if np.count_nonzero(\n ~np.isnan(obs_calc_var)) != 0:\n if py_map_pckg == 'cartopy':\n CF1 = ax_obs.contourf(xo, yo,\n obs_calc_var_cyc,\n transform=ccrs.PlateCarree(),\n levels=levels, cmap=cmap,\n extend='both')\n # matplotlib/cartopy tries to close contour when\n # using cylic point, so need to plot contours\n # set contour labels, remove contour lines, and then\n # replot contour lines\n C1 = ax_obs.contour(xo, yo,\n obs_calc_var_cyc,\n transform=ccrs.PlateCarree(),\n levels=levels, colors='k',\n linewidths=1.0, extend='both')\n C1labels = ax_obs.clabel(C1, C1.levels,\n fmt='%g', colors='k')\n for c in C1.collections:\n c.set_visible(False)\n C1 = ax_obs.contour(xo, yo,\n obs_calc_var_cyc,\n transform=ccrs.PlateCarree(),\n levels=levels, colors='k',\n linewidths=1.0, extend='both')\n elif py_map_pckg == 'basemap':\n mox, moy = mo(xo, yo)\n CF1 = mo.contourf(mox, moy,\n obs_calc_var_cyc,\n levels=levels, cmap=cmap,\n extend='both')\n C1 = mo.contour(mox, moy,\n obs_calc_var_cyc,\n levels=levels, colors='k',\n linewidths=1.0, extend='both')\n C1labels = ax_obs.clabel(C1, C1.levels,\n fmt='%g', colors='k')\n # Plot model - obs\n print(\"Plotting \"+model+\" - \"+model_obtype)\n # Add cyclic point for model data\n if py_map_pckg == 'cartopy':\n model_calc_var_cyc, lon_cyc = (\n add_cyclic_point(model_calc_var,\n coord=lon)\n )\n elif py_map_pckg == 'basemap':\n model_calc_var_cyc, lon_cyc = addcyclic(\n model_calc_var, lon\n )\n model_obs_diff_calc_var = (\n model_calc_var - obs_calc_var\n )\n model_obs_diff_calc_var_cyc = (\n model_calc_var_cyc - obs_calc_var_cyc\n )\n x, y = np.meshgrid(lon_cyc, lat)\n model_obs_diff_area_avg = maps2d_plot_util.calculate_area_average(\n model_calc_var - obs_calc_var, lat, lon,\n llcrnrlat_val, urcrnrlat_val, llcrnrlon_val, urcrnrlon_val\n )\n ax.set_title(round(model_obs_diff_area_avg, 3), loc='right')\n if np.count_nonzero(\n ~np.isnan(model_calc_var - obs_calc_var)) != 0:\n if py_map_pckg == 'cartopy':\n CF = ax.contourf(x, y,\n model_obs_diff_calc_var_cyc,\n transform=ccrs.PlateCarree(),\n levels=levels_diff, cmap=cmap_diff,\n extend='both')\n elif py_map_pckg == 'basemap':\n mx, my = m(x, y)\n CF = m.contourf(mx, my,\n model_obs_diff_calc_var_cyc,\n levels=levels_diff, cmap=cmap_diff,\n extend='both')\n full_title = (\n var_info_title+' Mean Error\\n'\n +dates_title+' '+make_met_data_by_hrs_title+', '\n +forecast_to_plot_title\n )\n fig.suptitle(full_title, fontsize=18, fontweight='bold')\n noaa_img_axes = fig.add_axes([-0.01, 0.0, 0.01, 0.01])\n noaa_img_axes.axes.get_xaxis().set_visible(False)\n noaa_img_axes.axes.get_yaxis().set_visible(False)\n noaa_img_axes.axis('off')\n fig.figimage(noaa_logo_img_array, 1, 1, zorder=1, alpha=0.5)\n savefig_name = os.path.join(plotting_out_dir_imgs,\n verif_case_type+'_'+var_group_name\n +'_'+var_name+'_'+var_level\n +'_'+forecast_to_plot+'.png')\n print(\"Saving image as \"+savefig_name)\n plt.savefig(savefig_name, bbox_inches='tight')\n link_image_dir = os.path.join(\n DATA, RUN, 'metplus_output', 'images/.'\n )\n print(\"Linking image to \"+link_image_dir)\n os.system('ln -sf '+savefig_name+' '+link_image_dir)\n plt.close()\n","sub_path":"ush/plotting_scripts/plot_maps2d_model2obs_calc_vars_lat_lon_errors.py","file_name":"plot_maps2d_model2obs_calc_vars_lat_lon_errors.py","file_ext":"py","file_size_in_byte":39968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"433574573","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom sklearn.mixture import GaussianMixture\r\n\r\nimport time\r\nfrom sklearn.cluster import KMeans\r\n\r\n\r\nfrom sklearn.metrics import adjusted_mutual_info_score,adjusted_rand_score,homogeneity_completeness_v_measure\r\n\r\n\r\nimport LoadData\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn import metrics\r\n\r\n\r\nnp.random.seed(6)\r\n\r\nif __name__ == '__main__':\r\n X1, y1 = LoadData.load_wine_quality_data()\r\n labels1 = y1\r\n features1 = X1\r\n\r\n X2, y2 = LoadData.load_ozone_data()\r\n labels2 = y2\r\n features2 = X2\r\n\r\n scaler = StandardScaler()\r\n data_scaled1 = scaler.fit_transform(features1)\r\n features1 = data_scaled1\r\n\r\n scaler = StandardScaler()\r\n data_scaled2 = scaler.fit_transform(features2)\r\n features2 = data_scaled2\r\n\r\n #For Wine Data Set 1\r\n km_dic = {\"n_clusters\": 6, \"init\": \"k-means++\", \"max_iter\": 500}\r\n EM_dic = {\"n_components\": 6, \"init_params\": \"kmeans\", \"max_iter\": 500}\r\n\r\n #n_candidates = [i*5 + 1 for i in range(1,20)]\r\n n_candidates=[2,3,4,5,6,7,8,9,10,15,20,30,40,50]\r\n\r\n nclusters=[]\r\n KM_score=[]\r\n KM_homo=[]\r\n KM_com=[]\r\n KM_v=[]\r\n KM_s=[]\r\n\r\n EM_score = []\r\n EM_homo = []\r\n EM_com = []\r\n EM_v = []\r\n EM_s = []\r\n\r\n KM_time=[]\r\n EM_time=[]\r\n AIC=[]\r\n BIC=[]\r\n Inertia=[]\r\n round_start = time.time()\r\n for n in n_candidates:\r\n km_dic[\"n_clusters\"] = n\r\n EM_dic[\"n_components\"] = n\r\n\r\n row = [n]\r\n nclusters.append(n)\r\n #KMeans clustering\r\n model = KMeans(**km_dic)\r\n start_time = time.time()\r\n model.fit(features2)\r\n preds2 = model.predict(features2)\r\n #model.fit(X_train)\r\n #y_test_pred = model.predict(X_test)\r\n end_time = time.time()\r\n\r\n # Silhoutette score\r\n #sil = metrics.silhouette_score(X_test, y_test_pred, metric='euclidean')\r\n\r\n # Variance explained by the cluster\r\n #var = clf.score(X_test)\r\n #array_var.append(var)\r\n #homogeneity, completeness, v_measure = homogeneity_completeness_v_measure(y_test, y_test_pred)\r\n homogeneity, completeness, v_measure = homogeneity_completeness_v_measure(labels2, preds2)\r\n sil=metrics.silhouette_score(features2, preds2, metric='euclidean')\r\n KM_homo.append(homogeneity)\r\n KM_com.append(completeness)\r\n KM_v.append(v_measure)\r\n KM_s.append(sil)\r\n KM_time.append(end_time-start_time)\r\n KM_score.append(model.score(features2))\r\n Inertia.append(model.inertia_)\r\n\r\n\r\n #row += [-model.score(features), adjusted_mutual_info_score(labels, preds), adjusted_rand_score(labels, preds),\r\n #homogeneity, completeness, v_measure, end_time - start_time]\r\n\r\n model = GaussianMixture(**EM_dic)\r\n start_time = time.time()\r\n model.fit(features2)\r\n preds2 = model.predict(features2)\r\n #model.fit(X_train)\r\n #y_test_pred = model.predict(X_test)\r\n end_time = time.time()\r\n #homogeneity, completeness, v_measure = homogeneity_completeness_v_measure(y_test, y_test_pred)\r\n #sil = metrics.silhouette_score(X_test, y_test_pred, metric='euclidean')\r\n homogeneity, completeness, v_measure = homogeneity_completeness_v_measure(labels2, preds2)\r\n sil = metrics.silhouette_score(features2, preds2, metric='euclidean')\r\n EM_homo.append(homogeneity)\r\n EM_com.append(completeness)\r\n EM_v.append(v_measure)\r\n EM_s.append(sil)\r\n AIC.append(model.aic(features2))\r\n BIC.append(model.bic(features2))\r\n\r\n EM_time.append(end_time-start_time)\r\n EM_score.append(model.score(features2))\r\n\r\n round_end = time.time()\r\n\r\n print(\"Totoal Time:\", round_end - round_start)\r\n\r\n # score\r\n plt.figure()\r\n plt.grid(True)\r\n plt.plot(nclusters, KM_homo, color='blue', label=\"KM homogeneity\")\r\n plt.plot(nclusters, KM_com, color='green', label=\"KM completeness\")\r\n plt.plot(nclusters, KM_v, color='red', label=\"KM v-measure\")\r\n plt.plot(nclusters, KM_s, color='orange', label=\"KM silhouette\")\r\n\r\n\r\n plt.legend(loc='best')\r\n plt.title(\"KM performance evaluation on dataset2\")\r\n plt.xlabel(\"Number of clusters\")\r\n plt.ylabel(\"Score\")\r\n plt.savefig(\"KM performance Data2\")\r\n\r\n plt.figure()\r\n plt.grid(True)\r\n plt.plot(nclusters, EM_homo, linestyle='dashed', color='blue', label=\"EM homogeneity\")\r\n plt.plot(nclusters, EM_v, linestyle='dashed', color='red', label=\"EM v-measure\")\r\n plt.plot(nclusters, EM_com, linestyle='dashed', color='green', label=\"EM completeness\")\r\n plt.plot(nclusters, EM_s, linestyle='dashed', color='orange', label=\"EM silhouette\")\r\n plt.legend(loc='best')\r\n plt.title(\"EM performance evaluation on dataset2\")\r\n plt.xlabel(\"Number of clusters\")\r\n plt.ylabel(\"Score\")\r\n plt.savefig(\"EM performance Data2\")\r\n\r\n #Runtime\r\n plt.figure()\r\n plt.plot(nclusters,KM_time, label=\"KM\")\r\n plt.plot(nclusters, EM_time, label=\"EM\", linestyle='dashed')\r\n plt.title(\"KM/EM running time on dataset2\")\r\n plt.legend(loc='best')\r\n plt.xlabel(\"Number of clusters\")\r\n plt.ylabel(\"Running time (s)\")\r\n plt.grid(True)\r\n plt.savefig(\"KMvsEM Time Data2\")\r\n\r\n #KM score\r\n plt.figure()\r\n plt.plot(nclusters, KM_score,marker='o')\r\n plt.title(\"KM score for dataset2\")\r\n plt.grid(True)\r\n plt.xlabel(\"Number of clusters\")\r\n plt.ylabel(\"Sum of squared distance\")\r\n plt.savefig(\"KM Score Data2\")\r\n\r\n # KM inertia\r\n plt.figure()\r\n plt.plot(nclusters, Inertia, marker='o')\r\n plt.title(\"KM Inertia Data2\")\r\n plt.grid(True)\r\n plt.xlabel(\"Number of clusters\")\r\n plt.ylabel(\"Inertia\")\r\n plt.savefig(\"KM Inertia Data2\")\r\n\r\n #EM score\r\n plt.figure()\r\n plt.plot(nclusters, EM_score, marker='o',linestyle='dashed')\r\n plt.title(\"EM score for dataset2\")\r\n plt.grid(True)\r\n plt.xlabel(\"Number of clusters\")\r\n plt.ylabel(\"Likelihood\")\r\n plt.tight_layout(True)\r\n plt.savefig(\"EM Score Data2\")\r\n\r\n #BIC.AIC score\r\n plt.figure()\r\n plt.plot(nclusters,BIC, marker='o',linestyle='dashed')\r\n #plt.plot(nclusters, AIC, label=\"AIC\")\r\n plt.xlabel(\"Number of clusters\")\r\n plt.title(\"EM BIC score for dataset2\")\r\n plt.grid(True)\r\n plt.ylabel(\"BIC score\")\r\n plt.savefig(\"EM BIC Data2\")","sub_path":"Assignment3/Code/KMEM_Data2.py","file_name":"KMEM_Data2.py","file_ext":"py","file_size_in_byte":6395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"311623106","text":"\"\"\"Calculates a Checksum from CSV\"\"\"\n\nimport csv\n\ndef get_values(row):\n \"\"\"Gets the largest and smallest values from a list of numbers\"\"\"\n smallest = row[0]\n largest = row[0]\n for curr_num in row:\n if int(curr_num) < int(smallest):\n smallest = curr_num\n if int(curr_num) > int(largest):\n largest = curr_num\n return smallest, largest\n\ndef main():\n \"\"\"Calculates a Checksum from CSV\"\"\"\n with open(\"../puzzle_input.csv\") as file:\n csv_reader = csv.reader(file, delimiter='\\t')\n num_list = []\n for row in csv_reader:\n smallest, largest = get_values(row)\n difference = int(largest) - int(smallest)\n num_list.append(int(difference))\n checksum = sum(num_list)\n print(\"Checksum: %s\" % checksum)\n\nif __name__ == '__main__':\n main()\n ","sub_path":"day2/python/solve_puzzle_1.py","file_name":"solve_puzzle_1.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"131521796","text":"\"\"\"\n# Exercise: Assignment-2\n# Write a Python function, sumofdigits, that takes in one number\n#and returns the sum of digits of given number.\n# This function takes in one number and returns one number.\n\"\"\"\ndef sumofdigits(var_n):\n '''\n n is positive Integer\n returns: a positive integer, the sum of digits of n.\n '''\n # Your code here\n while var_n >= 0:\n if var_n == 0:\n return 0\n return (var_n % 10) + sumofdigits(var_n // 10)\ndef main():\n \"\"\"\n This function is to call the sumofdigits function\n \"\"\"\n var_a = input()\n print(sumofdigits(int(var_a)))\nif __name__ == \"__main__\":\n main()\n","sub_path":"cspp1-assignments/m8/p2/assignment2.py","file_name":"assignment2.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"462511249","text":"import requests\nfrom decimal import Decimal\nfrom telegram import InlineQueryResultArticle, ParseMode,InputTextMessageContent\nfrom uuid import uuid4\n\nclass Coin:\n\n\tdef __init__(self, query, rank, noCommas):\n\n\t\tself.data = requests.get('https://api.coinmarketcap.com/v1/ticker/?limit=10000',\n\t\t headers={'Cache-Control': 'no-cache'}).json()\n\n\t\tfor x in range (0, len(self.data)):\n\n\t\t\tif query.upper() == self.data[x]['symbol'] or \\\n\t\t\t\tquery.lower() == self.data[x]['id'] or \\\n\t\t\t\tquery.lower() == (self.data[x]['name']).lower() or \\\n\t\t\t\tquery == self.data[x]['rank']:\n\n\t\t\t\tself.symbol = self.data[x]['symbol']\n\t\t\t\tself.id = self.data[x]['id']\n\t\t\t\tself.rank = str(int(x) + 1)\n\t\t\t\tself.name = self.data[x]['name']\n\t\t\t\tself.supply = str(\"{:,}\".format(Decimal(float(self.data[x]['available_supply']))))\n\t\t\t\tself.percentChange = str(self.data[x]['percent_change_24h'])\n\t\t\t\tself.marketCap = str(\"{:,}\".format(Decimal(\\\n\t\t\t\t\tfloat(self.data[x]['market_cap_usd']))))\n\n\t\t\t\tif noCommas == True or float(self.data[x]['price_usd']) < 1.00:\n\t\t\t\t\tself.price_USD = str(self.data[x]['price_usd'])\n\n\t\t\t\telse:\n\t\t\t\t\tprice = Decimal((self.data[x]['price_usd'])).\\\n\t\t\t\t\tquantize(Decimal('1.00'), rounding = 'ROUND_HALF_DOWN')\n\t\t\t\t\tself.price_USD = str(\"{:,}\".format(price))\n\n\t\t\t\tself.summary = (\"***\" + self.name + \"***\" + \" (\" + self.symbol + \")\" + '\\n \\n' + \\\n\t\t\t\t\t'***Rank***: #' + str(self.rank) + \" out of \" + str(len(self.data)) + \"\\n\" + \\\n\t\t\t\t\t'***Price***: $' + str(self.price_USD) + '\\n' + '***Market Capitalization***: $' \\\n\t\t\t\t\t+ str(self.marketCap) + '\\n' + '***Circulating Supply***: ' + self.supply + \" \" + \\\n\t\t\t\t\tself.symbol + '\\n' + '***24 Hour Percent Change***: ' + \\\n\t\t\t\t\tself.percentChange + \"% \\n\")\n\ndef get_coin_info(query):\n\n\tcoin = Coin(query, None, False)\n\n\tresults = [\n\n\t\t# Summary\n\t\tInlineQueryResultArticle(\n \tid=uuid4(),\n \ttitle=(coin.name + \" (\" + coin.symbol + \")\"),\n \tdescription=\"#\" + coin.rank + \" out of \" + str(len(coin.data)),\n \tthumb_url='https://files.coinmarketcap.com/static/img/coins/128x128/' \\\n \t+ coin.id + '.png',\n \tinput_message_content=InputTextMessageContent(coin.summary, \\\n \t\tParseMode.MARKDOWN)),\n\n\t\t# USD Price\n\t\tInlineQueryResultArticle(\n \t\tid=uuid4(),\n \t\ttitle=(\"Price\"),\n \t\tdescription=\"$\" + coin.price_USD,\n \t\tthumb_url=\"https://imgur.com/7RCGCoc.png\",\n \t\tinput_message_content=InputTextMessageContent(\"1 \" + coin.symbol + \" = $\" \\\n \t\t\t+ coin.price_USD)),\n\n\t\t# Market Capitalization (USD)\n \tInlineQueryResultArticle(\n \tid=uuid4(),\n \ttitle=(\"Market Capitalization\"),\n \tdescription=\"$\" + coin.marketCap,\n \tthumb_url=\"https://i.imgur.com/UMczLVP.png\",\n \tinput_message_content=InputTextMessageContent(\"Market Capitalization of \" \\\n \t\t+ coin.name + \" (\" + coin.symbol + \")\" + \": $\" + coin.marketCap)),\n\n \t# Circulating Supply \n \tInlineQueryResultArticle(\n \tid=uuid4(),\n \ttitle=(\"Circulating Supply\"),\n \tdescription=coin.supply + \" \" + coin.symbol,\n \tthumb_url=(\"https://i.imgur.com/vXAN23U.png\"),\n \tinput_message_content=InputTextMessageContent(\"Circulating Supply of \" \\\n \t\t+ coin.name + \" (\" + coin.symbol + \")\" + \": \" + coin.supply + \" \" \\\n \t\t+ coin.symbol)),\n\n \t# 24 Hour Percent Change\n \tInlineQueryResultArticle(\n \tid=uuid4(),\n \ttitle=(\"Percent Change (24 hours)\"),\n \tdescription=coin.percentChange + \"%\",\n \tthumb_url=(\"https://imgur.com/iAoXFQc.png\"),\n \tinput_message_content=InputTextMessageContent(\"24 Hour Change in \" \\\n \t\t+ coin.name + \" (\" + coin.symbol + \")\" + \" Price: \" + coin.percentChange \\\n \t\t+ \"%\"))\n\t]\n\n\treturn results","sub_path":"coin.py","file_name":"coin.py","file_ext":"py","file_size_in_byte":3690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"64581852","text":"\n# coding: utf-8\n\n# In[1]:\n\n\n\nimport xml.etree.cElementTree as ET\nimport re\nimport codecs\nimport json\n\n\nCREATED = [ \"version\", \"changeset\", \"timestamp\", \"user\", \"uid\"]\nPOS=[\"lat\",\"lon\"]\nexpected_street_types = [\"Street\", \"Avenue\", \"Boulevard\", \"Drive\", \"Court\", \"Place\", \"Square\", \"Lane\", \"Road\", \n \"Trail\", \"Parkway\", \"Commons\"]\n\n#street mappings\nstreet_mapping = { \"St\": \"Street\",\n \"st\": \"Street\",\n \"St.\": \"Street\",\n \"st.\": \"Street\",\n \"Ave\":\"Avenue\",\n \"ave\":\"Avenue\",\n \"Ave.\":\"Avenue\",\n \"ave.\":\"Avenue\",\n \"Rd.\":\"Road\",\n \"Rd\" :\"Road\",\n \"rd\" :\"Road\"\n }\n\n\n#regular expression for tags\nlower = re.compile(r'^[a-z]*$')\nlower_colon = re.compile(r'^addr:[a-z]*$')\n\n#regular expression for streets\nstreet_type_re = re.compile(r'\\b\\S+\\.?$', re.IGNORECASE)\n\n#regular expression for phone numbers\nlandline_number=re.compile('^(\\+91)?(-|\\s)?(080|80)?(-|\\s)?\\d{8}$')\nmobile_number=re.compile('(\\+91)?(-|\\s)?\\d{10}')\n\n\n\n#auditing and updating the street types\ndef audit_street_type(street_name):\n street_type=street_name.rsplit(' ',1)\n try:\n if street_type[1] not in expected_street_types:\n if street_type[1] in street_mapping:\n street_type[1]=street_mapping[street_type[1]]\n street_name=street_type[0]+' '+street_type[1]\n except:\n pass\n\n return street_name\n\n\n#updating the state\ndef audit_state(state):\n return \"Karnataka\"\n\n#updating the city\ndef audit_city(city):\n return \"Bengaluru\"\n\n\n#updating the phone numbers\ndef audit_phone_number(phonenumber):\n l=landline_number.match(phonenumber) #landline numbers\n m=mobile_number.match(phonenumber) #mobile numbers\n if m:\n return \"+91 \"+phonenumber[-10:-8]+\" \"+phonenumber[-8:-4]+\" \"+phonenumber[-4:]\n \n elif l:\n return \"+91 80 \"+ phonenumber[-8:]\n\n else:\n phonenumber=''.join(e for e in phonenumber if e.isalnum())\n return \"+91 \"+phonenumber[-10:-8]+\" \"+phonenumber[-8:-4]+\" \"+phonenumber[-4:]\n \n\ndef shape_element(element):\n node={}\n node['created']={}\n p=1\n k=1\n nd=1\n \n if element.tag==\"way\" or element.tag==\"node\":\n \n #firstlevel atrributes\n node['type']=element.tag\n for attributes in element.attrib:\n if attributes in CREATED:\n node['created'][attributes]=element.attrib[attributes]\n \n \n elif attributes in POS:\n if p==1:\n node['pos']=[]\n \n #adding latitude and longitude to node position \n node['pos'].append(float(element.attrib['lat']))\n node['pos'].append(float(element.attrib['lon']))\n p+=1\n \n elif attributes == \"id\":\n node['_id']=element.attrib['id']\n \n else: \n node[attributes]=element.attrib[attributes]\n \n \n #for second level tags tag and nd \n for secondlevel in element.iter():\n if secondlevel.tag==\"tag\":\n m=lower_colon.match(secondlevel.attrib['k'])\n if m:\n if k==1:\n node['address']={}\n k+=1\n \n address_type=m.group().split(':')[1] \n if address_type==\"street\":\n node['address'][address_type]=audit_street_type(secondlevel.attrib['v'])\n \n elif address_type==\"state\":\n node['address'][address_type]=audit_state(secondlevel.attrib['v'])\n \n elif address_type==\"city\":\n node['address'][address_type]=audit_city(secondlevel.attrib['v'])\n \n else:\n node['address'][address_type]=secondlevel.attrib['v']\n \n \n elif lower.match(secondlevel.attrib['k']):\n if secondlevel.attrib['k']==\"phone\":\n node[secondlevel.attrib['k']]=audit_phone_number(secondlevel.attrib['v'])\n else: \n node[secondlevel.attrib['k']]=secondlevel.attrib['v']\n \n elif secondlevel.tag==\"nd\":\n if nd==1:\n node['node_refs']=[]\n nd+=1\n node['node_refs'].append(secondlevel.attrib['ref'])\n \n return node \n \n else:\n return None\n \n#opening the input file and then writing updated data to json file \ndef process_map(file_in):\n file_out=\"{0}.json\".format(file_in)\n with codecs.open(file_out,\"w\") as fo:\n context = ET.iterparse(file_in, events=('start', 'end'))\n _, root = next(context)\n for event,element in context:\n if event==\"end\":\n root.clear()\n elif event==\"start\":\n el=shape_element(element)\n if el:\n fo.write(json.dumps(el)+\"\\n\")\n \n\nprocess_map(\"bengaluru_india.osm\")\n\n","sub_path":"p3 Data Wrangling.py","file_name":"p3 Data Wrangling.py","file_ext":"py","file_size_in_byte":5233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"243545732","text":"from __future__ import absolute_import, division, print_function\n\nfrom operator import add\nfrom itertools import chain\n\ndef inc(x):\n return x + 1\n\ndef ishashable(x):\n \"\"\" Is x hashable?\n\n Example\n -------\n\n >>> ishashable(1)\n True\n >>> ishashable([1])\n False\n \"\"\"\n try:\n hash(x)\n return True\n except TypeError:\n return False\n\n\ndef istask(x):\n \"\"\" Is x a runnable task?\n\n A task is a tuple with a callable first argument\n\n Example\n -------\n\n >>> inc = lambda x: x + 1\n >>> istask((inc, 1))\n True\n >>> istask(1)\n False\n \"\"\"\n return isinstance(x, tuple) and x and callable(x[0])\n\n\ndef get(d, key, get=None, concrete=True, **kwargs):\n \"\"\" Get value from Dask\n\n Example\n -------\n\n >>> inc = lambda x: x + 1\n >>> d = {'x': 1, 'y': (inc, 'x')}\n\n >>> get(d, 'x')\n 1\n >>> get(d, 'y')\n 2\n\n See Also\n --------\n set\n \"\"\"\n get = get or _get\n if isinstance(key, list):\n v = (get(d, k, get=get, concrete=concrete) for k in key)\n if concrete:\n v = list(v)\n elif ishashable(key) and key in d:\n v = d[key]\n elif istask(key):\n v = key\n else:\n return key\n\n if istask(v):\n func, args = v[0], v[1:]\n args2 = [get(d, arg, get=get, concrete=False) for arg in args]\n return func(*[get(d, arg, get=get) for arg in args2])\n else:\n return v\n\n_get = get\n\n\ndef _deps(dsk, arg):\n \"\"\" Get dependencies from keys or tasks\n\n Helper function for get_dependencies.\n\n >>> dsk = {'x': 1, 'y': 2}\n\n >>> _deps(dsk, 'x')\n ['x']\n >>> _deps(dsk, (add, 'x', 1))\n ['x']\n\n >>> _deps(dsk, (add, 'x', (inc, 'y'))) # doctest: +SKIP\n ['x', 'y']\n \"\"\"\n if istask(arg):\n result = []\n for a in arg[1:]:\n result.extend(_deps(dsk, a))\n return result\n try:\n if arg not in dsk:\n return []\n except TypeError: # not hashable\n return []\n return [arg]\n\n\ndef get_dependencies(dsk, task, as_list=False):\n \"\"\" Get the immediate tasks on which this task depends\n\n >>> dsk = {'x': 1,\n ... 'y': (inc, 'x'),\n ... 'z': (add, 'x', 'y'),\n ... 'w': (inc, 'z'),\n ... 'a': (add, (inc, 'x'), 1)}\n\n >>> get_dependencies(dsk, 'x')\n set([])\n\n >>> get_dependencies(dsk, 'y')\n set(['x'])\n\n >>> get_dependencies(dsk, 'z') # doctest: +SKIP\n set(['x', 'y'])\n\n >>> get_dependencies(dsk, 'w') # Only direct dependencies\n set(['z'])\n\n >>> get_dependencies(dsk, 'a') # Ignore non-keys\n set(['x'])\n \"\"\"\n args = [dsk[task]]\n result = []\n while args:\n arg = args.pop()\n if istask(arg):\n args.extend(arg[1:])\n elif isinstance(arg, list):\n args.extend(arg)\n else:\n try:\n result.append(arg)\n except TypeError:\n pass\n if not result:\n return [] if as_list else set()\n rv = []\n for x in result:\n rv.extend(_deps(dsk, x))\n return rv if as_list else set(rv)\n\n\ndef flatten(seq):\n \"\"\"\n\n >>> list(flatten([1]))\n [1]\n\n >>> list(flatten([[1, 2], [1, 2]]))\n [1, 2, 1, 2]\n\n >>> list(flatten([[[1], [2]], [[1], [2]]]))\n [1, 2, 1, 2]\n\n >>> list(flatten(((1, 2), (1, 2)))) # Don't flatten tuples\n [(1, 2), (1, 2)]\n\n >>> list(flatten((1, 2, [3, 4]))) # support heterogeneous\n [1, 2, 3, 4]\n \"\"\"\n for item in seq:\n if isinstance(item, list):\n for item2 in flatten(item):\n yield item2\n else:\n yield item\n\n\ndef reverse_dict(d):\n \"\"\"\n\n >>> a, b, c = 'abc'\n >>> d = {a: [b, c], b: [c]}\n >>> reverse_dict(d) # doctest: +SKIP\n {'a': set([]), 'b': set(['a']}, 'c': set(['a', 'b'])}\n \"\"\"\n terms = list(d.keys()) + list(chain.from_iterable(d.values()))\n result = dict((t, set()) for t in terms)\n for k, vals in d.items():\n for val in vals:\n result[val].add(k)\n return result\n\n\ndef cull(dsk, keys):\n \"\"\" Return new dask with only the tasks required to calculate keys.\n\n In other words, remove unnecessary tasks from dask.\n ``keys`` may be a single key or list of keys.\n\n Example\n -------\n\n >>> d = {'x': 1, 'y': (inc, 'x'), 'out': (add, 'x', 10)}\n >>> cull(d, 'out') # doctest: +SKIP\n {'x': 1, 'out': (add, 'x', 10)}\n \"\"\"\n if not isinstance(keys, list):\n keys = [keys]\n nxt = set(keys)\n seen = nxt\n while nxt:\n cur = nxt\n nxt = set()\n for item in cur:\n for dep in get_dependencies(dsk, item):\n if dep not in seen:\n nxt.add(dep)\n seen.update(nxt)\n return dict((k, v) for k, v in dsk.items() if k in seen)\n\n\ndef subs(task, key, val):\n \"\"\" Perform a substitution on a task\n\n Example\n -------\n\n >>> subs((inc, 'x'), 'x', 1) # doctest: +SKIP\n (inc, 1)\n \"\"\"\n if not istask(task):\n if task == key:\n return val\n elif isinstance(task, list):\n return [subs(x, key, val) for x in task]\n else:\n return task\n newargs = []\n for arg in task[1:]:\n if istask(arg):\n arg = subs(arg, key, val)\n elif isinstance(arg, list):\n arg = [subs(x, key, val) for x in arg]\n elif arg == key:\n arg = val\n newargs.append(arg)\n return task[:1] + tuple(newargs)\n\n\ndef fuse(dsk):\n \"\"\" Return new dask with linear sequence of tasks fused together.\n\n This may be used as an optimization step.\n\n Example\n -------\n\n >>> d = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')}\n >>> fuse(d) # doctest: +SKIP\n {'c': (inc, (inc, 1))}\n \"\"\"\n # locate all members of linear chains\n parents = {}\n deadbeats = set()\n for parent in dsk:\n deps = get_dependencies(dsk, parent, as_list=True)\n for child in deps:\n if child in parents:\n del parents[child]\n deadbeats.add(child)\n elif len(deps) > 1:\n deadbeats.add(child)\n elif child not in deadbeats:\n parents[child] = parent\n\n # construct the chains from ancestor to descendant\n chains = []\n children = dict(map(reversed, parents.items()))\n while parents:\n child, parent = parents.popitem()\n chain = [child, parent]\n while parent in parents:\n parent = parents.pop(parent)\n del children[parent]\n chain.append(parent)\n chain.reverse()\n while child in children:\n child = children.pop(child)\n del parents[child]\n chain.append(child)\n chains.append(chain)\n\n # create a new dask with fused chains\n rv = {}\n fused = set()\n for chain in chains:\n child = chain.pop()\n val = dsk[child]\n while chain:\n parent = chain.pop()\n val = subs(dsk[parent], child, val)\n fused.add(child)\n child = parent\n fused.add(child)\n rv[child] = val\n\n for key, val in dsk.items():\n if key not in fused:\n rv[key] = val\n return rv\n\n","sub_path":"dask/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":7196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"11707079","text":"# implementation of binary search tree\n# where all left nodes are smaller than root\n# and all right nodes are greater than root\n\n\nclass BinarySearchTreeNode:\n def __init__(self, data, left=None, right=None):\n # data for root node\n self.data = data\n # left child\n self.left = left\n # right child\n self.right = right\n\n def get_data(self):\n return self.data\n\n def set_data(self, data):\n self.data = data\n\n def get_left(self):\n return self.left\n\n def get_right(self):\n return self.right\n\n def insert(self, data):\n\n if self.data:\n if data < self.data:\n if self.left is None:\n self.left = BinarySearchTreeNode(data)\n else:\n self.left.insert(data)\n elif data > self.data:\n if self.right is None:\n self.right = BinarySearchTreeNode(data)\n else:\n self.right.insert(data)\n else:\n self.data = data\n\n def pre_order_traversal(self, root):\n result = []\n if root:\n result.append(root.data)\n result = result + self.pre_order_traversal(root.get_left())\n result = result + self.pre_order_traversal(root.get_right())\n\n return result\n\n def in_order_traversal(self, root):\n result = []\n if root:\n result = self.in_order_traversal(root.get_left())\n result.append(root.data)\n result = result + self.in_order_traversal(root.get_right())\n\n return result\n\n def post_order_traversal(self, root):\n result = []\n if root:\n result = self.post_order_traversal(root.get_left())\n result = result + self.post_order_traversal(root.get_right())\n result.append(root.data)\n\n return result\n\n def level_order_traversal(self, root):\n if root == None:\n return []\n result = []\n nodes = [root]\n while nodes:\n for node in nodes:\n result.append(node.data)\n\n new_nodes = []\n for node in nodes:\n if node.left:\n new_nodes.append(node.get_left())\n if node.right:\n new_nodes.append(node.get_right())\n\n nodes = new_nodes\n return result\n\n\nroot = BinarySearchTreeNode(27)\nroot.insert(14)\nroot.insert(35)\nroot.insert(10)\nroot.insert(19)\nroot.insert(31)\nroot.insert(42)\n\n\nprint(root.pre_order_traversal(root))\nprint(root.in_order_traversal(root))\nprint(root.post_order_traversal(root))\nprint(root.level_order_traversal(root))\n","sub_path":"DSApp/PythonFiles/trees.py","file_name":"trees.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"225835294","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n # @param {ListNode} l1\n # @param {ListNode} l2\n # @return {ListNode}\n def addTwoNumbers(self, l1, l2):\n result = ListNode(0)\n one = l1\n two = l2\n add = result\n flag = 0\n while True:\n if one is None and two is None and flag == 0:\n break\n num1 = 0 if one is None else one.val\n num2 = 0 if two is None else two.val\n flag, add_sum = divmod(num1+num2+flag, 10)\n add.next = ListNode(add_sum)\n add = add.next\n one = None if one is None else one.next\n two = None if two is None else two.next\n return result.next\n\n","sub_path":"algorithms/2.AddTwoNumbers.py","file_name":"2.AddTwoNumbers.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"62170512","text":"import argparse\nimport logging\nimport os\nimport sys\nimport pickle\nimport time\n\nimport gbd.constants as gbd\nfrom db_tools import ezfuncs\nfrom cluster_utils.loggers import create_logger_in_memory\n\nimport dalynator.app_common as ac\nfrom dalynator.constants import FILE_PERMISSIONS, UMASK_PERMISSIONS\nfrom dalynator.makedirs_safely import makedirs_safely\nfrom dalynator.get_yld_data import get_como_folder_structure\nimport dalynator.argument_pool as arg_pool\nimport dalynator.tool_objects as to\n\n# Contains all command line parsers for burdenator and dalynator.\n# Functions named construct_parser_X return a new parser object\n# Functions named add_to_parser_X take a parser as input, add more arguments\n# to it, and return that enhanced parser\n# Functions named construct_args_X take a parser and a string\n# (default is stdin) and return the parsed arguments\n\n# There are four consumers of this file, the cross product of\n# {pipeline, run_all}, and {dalynator, burdenator}\n# The run_all functions take a superset of the pipeline arguments\n\n# \"shared\" arguments are used by all four\n# \"run_all\" arguments are only used by the run_all functions\n\n# Valid DALYnator and burdenator measures\nVALID_DALYNATOR_MEASURES = [gbd.measures.DALY]\nVALID_BURDENATOR_MEASURES = [gbd.measures.DEATH, gbd.measures.YLD,\n gbd.measures.YLL, gbd.measures.DALY]\n\n# File permissions for output files, in python 3 octal syntax\nos.umask(UMASK_PERMISSIONS)\n\n# Linux user group to own the files\nIHME_CENTRAL_COMP_GROUP = 'GROUP'\n\n\ndef calculate_filenames(output_dir, log_dir, measure_id, location_id, year_id):\n \"\"\"Returns the output file name (just the basename), plus the full path to\n the log file\n \"\"\"\n output_file = calculate_output_filename(output_dir, measure_id,\n location_id, year_id)\n stdout_log = calculate_log_file_name(\n os.path.join(log_dir, str(location_id)), location_id, year_id)\n return output_file, stdout_log\n\n\ndef calculate_output_filename(output_dir, measure_id, location_id, year_id):\n output_file = os.path.join(output_dir, \"FILEPATH\".format(\n measure_id, location_id, year_id))\n return output_file\n\n\ndef calculate_log_file_name(log_dir, location_id, year_id):\n return os.path.join(log_dir, 'FILEPATH'.format(location_id, year_id))\n\n\ndef set_folder_permissions(path, logger):\n \"\"\"Enforces permissions on the folder structure.\n \"\"\"\n for root, dirs, files in os.walk(path):\n for d in dirs:\n chmod_quietly(root, d, logger)\n for f in files:\n chmod_quietly(root, f, logger)\n\n\ndef construct_extra_paths(out_dir_without_version, log_dir,\n tool_name, output_version):\n \"\"\"\n Create the paths to out_dir, log_dir, cache_dir.\n This just computes the paths, no directories are actually created.\n\n Args:\n out_dir_without_version: the root directory WITHOUT the version\n number\n log_dir: The value of the --log_dir argument\n tool_name: dalynator or burdenator\n version: The dalynator or burdenator version\n\n Returns:\n out_dir, log_dir, cache_dir n as strings\n \"\"\"\n # Are they using the default output dir?\n # Always append the version number\n if not out_dir_without_version:\n out_dir = 'FILEPATH'.format(tool_name, output_version)\n else:\n out_dir = 'FILEPATH'.format(out_dir_without_version, output_version)\n\n # Did they override the log directory?\n if not log_dir:\n log_dir = out_dir + \"FILEPATH\"\n\n cache_dir = 'FILEPATH'.format(out_dir)\n\n return out_dir, log_dir, cache_dir\n\n\ndef chmod_quietly(root, path, logger):\n p = os.path.join(root, path)\n try:\n logger.debug(\"chmod 775 on {}\".format(p))\n os.chmod(p, FILE_PERMISSIONS)\n except Exception as e:\n logger.info(\"chmod failed to set {} permissions on {}: {}\".\n format(FILE_PERMISSIONS, p, e.message))\n pass\n\n\ndef rotate_logs(out_dir, log_dir):\n \"\"\"\n Move the existing daly_run_all.log and the stderr directories to be\n timestamped versions.\n Useful during resume, so that we don't keep appending to the same log.\n\n :param out_dir: The root directory WITH the version number\n :param log_dir: The path to the log directory\n \"\"\"\n t = time.localtime()\n time_stamp = \"{}-{:02d}-{:02d}_{:02d}:{:02d}:{:02d}\". \\\n format(t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min,\n t.tm_sec)\n main_log = os.path.join(log_dir, \"FILEPATH\")\n if os.path.exists(main_log):\n os.rename(main_log, \"FILEPATH\".format(main_log, time_stamp))\n\n stderr_dir = os.path.join(out_dir, \"stderr\")\n if os.path.exists(stderr_dir):\n os.rename(stderr_dir, \"FILEPATH\".format(stderr_dir, time_stamp))\n # And re-recreate the normal stderr directory just to be sure\n makedirs_safely(stderr_dir)\n\n\ndef construct_directories(out_dir, log_dir, cache_dir, resume):\n \"\"\"\n Create the output directory and the run_all logger. Used by both\n burdenator and dalynator.\n Check that both directories are empty. If they are not-empty then only\n continue if we are in resume mode.\n\n :param out_dir: The root directory WITH the version number\n :param log_dir: The path to the log directory\n :param cache_dir: The path to the cache directory\n :param resume: True if this is running in resume mode\n \"\"\"\n if os.path.isdir(out_dir):\n if os.listdir(out_dir) and not resume:\n raise ValueError(\n \"Output directory {} contains files and NOT running in \"\n \"resume mode\".format(out_dir))\n\n if os.path.isdir(log_dir):\n if os.listdir(log_dir) and not resume:\n raise ValueError(\"Log directory {} contains files and NOT \"\n \"running in resume mode\".format(log_dir))\n\n makedirs_safely(out_dir)\n makedirs_safely(log_dir)\n makedirs_safely(cache_dir)\n if resume:\n # If resuming then rotate (rename) the main log, daly_run_all.log\n rotate_logs(out_dir, log_dir)\n stderr_dir = os.path.join(out_dir, \"stderr\")\n makedirs_safely(stderr_dir)\n\n\ndef create_logger(out_dir, log_dir, verbose, resume):\n \"\"\"\n Create the logger object, and rotate the logs\n :param out_dir: The root directory WITH the version number\n :param log_dir: The path to the log directory\n :param verbose: The verbose flag. If True, run the logger at\n DEBUG level\n :param resume: True if this is running in resume mode\n :return:\n \"\"\"\n\n log_level = logging.DEBUG if verbose else logging.INFO\n create_logger_in_memory(\"dalynator\", log_level,\n log_dir + \"FILEPATH\",\n ['aggregator.aggregators', 'jobmon'])\n\n\ndef prepare_with_side_effects(out_dir=None, log_dir=None,\n cache_dir=None, verbose=None, resume=None):\n \"\"\"\n Has side effects - creates files and directories, initializes loggers.\n No parsing or other manipulation of the arguments. You probably do not\n want to call this from a unit test.\n :return:\n \"\"\"\n construct_directories(out_dir, log_dir, cache_dir, resume)\n create_logger(out_dir, log_dir, verbose, resume)\n\n\ndef get_args_and_create_dirs(parser, cli_args=None):\n \"\"\"Parses the command line using the parser and creates output directory\n and logger. Called by run_pipeline_*. Not used by run_all.\n \"\"\"\n if cli_args is None:\n cli_args = sys.argv[1:]\n args = parser.parse_args(cli_args)\n\n cod_object = to.cod_or_faux_correct(args.input_data_root,\n args.codcorrect_version,\n args.fauxcorrect_version)\n # resolve defaults for cod and epi versions\n if args.codcorrect_version == 'best':\n args.codcorrect_version = ac.best_version(\n 'codcorrect', args.gbd_round_id, args.decomp_step)\n if args.fauxcorrect_version == 'best':\n args.fauxcorrect_version = ac.best_version(\n 'fauxcorrect', args.gbd_round_id, args.decomp_step)\n if args.epi_version is None:\n args.epi_version = ac.best_version('como', args.gbd_round_id,\n args.decomp_step)\n\n # Store all years for each location in one directory\n top_out_dir = args.out_dir\n args.cache_dir = 'FILEPATH'.format(args.out_dir)\n makedirs_safely(os.path.join(top_out_dir, 'log_most_detailed'))\n args.log_dir = os.path.join(top_out_dir, 'log_most_detailed',\n str(args.location_id))\n args.out_dir = os.path.join(top_out_dir, 'draws', str(args.location_id))\n\n makedirs_safely(args.out_dir)\n makedirs_safely(args.log_dir)\n\n log_level = logging.DEBUG if args.verbose else logging.INFO\n args.logger = create_logger_in_memory(\n \"dalynator\", log_level,\n args.log_dir + \"FILEPATH\".format(args.location_id,\n args.year_id),\n ['aggregator.aggregators', 'jobmon'])\n\n args.cod_dir = cod_object.abs_path_to_draws\n args.cod_pattern = cod_object.file_pattern\n\n # this had daly_version before but I think we still want this\n # differentiated file path\n if hasattr(args, 'tool_name') and args.tool_name == \"dalynator\":\n args.daly_dir = \"FILEPATH\".format(args.input_data_root,\n args.output_version)\n else:\n args.daly_dir = None\n # our customers want the flag to be named \"epi\" not como\"\n args.epi_dir = get_como_folder_structure(os.path.join(\n args.input_data_root, 'como', str(args.epi_version)))\n\n if hasattr(args, 'paf_version'):\n # PAF directory structure has no \"draws\" sub-folder\n args.paf_dir = \"FILEPATH\".format(args.input_data_root,\n args.paf_version)\n else:\n args.paf_dir = None\n\n return args\n\n\ndef load_args_from_file(args, tool_name):\n if not args[0].out_dir_without_version:\n raise ValueError(\"In Resume Mode, must pass the root path to your \"\n \"output directory, i.e. \"\n \"FILEPATH\")\n cache_file = (\"FILEPATH\"\n .format(args[0].out_dir_without_version,\n args[0].output_version))\n if not os.path.exists(cache_file):\n raise RuntimeError(\"Nator has been run in --resume mode, but \"\n \"no {} file exists\".format(cache_file))\n with open(cache_file, \"rb\") as f:\n file_args = pickle.load(f)\n # overwrite the few defined arguments that can be different in a\n # resume case\n resume = args[0]\n if resume.start_at:\n file_args.start_at = resume.start_at\n if resume.end_at:\n file_args.end_at = resume.end_at\n if resume.verbose:\n file_args.verbose = resume.verbose\n file_args.resume = resume.resume\n return file_args\n\n\ndef write_args_to_file(args):\n cache_file = \"FILEPATH\".format(args.cache_dir)\n with open(cache_file, \"wb\") as f:\n pickle.dump(args, f)\n\n\ndef set_phase_defaults(args):\n if not args.start_at:\n args.start_at = 'most_detailed'\n if not args.end_at:\n args.end_at = 'pct_change'\n return args\n\n\ndef construct_parser_run_all_tool(tool_name):\n \"\"\" Used by run_all_burdenator and run_all_dalynator \"\"\"\n parser = argparse.ArgumentParser(description='Run all {tool}'.format(tool=tool_name))\n parser = arg_pool.add_resume(parser)\n parser = arg_pool.add_out_dir_with_destination(parser, \"out_dir_without_version\")\n parser = arg_pool.add_output_version(parser)\n parser = arg_pool.add_verbose(parser)\n if tool_name == 'dalynator':\n choices = ['most_detailed', 'pct_change', 'upload']\n else:\n choices = ['most_detailed', 'loc_agg', 'cleanup', 'pct_change',\n 'upload']\n parser = arg_pool.add_start_and_end_at(parser, choices)\n parser = arg_pool.add_raise_on_paf_error(parser)\n parser = arg_pool.add_do_not_execute(parser)\n\n return parser\n\n\ndef add_to_parser_burdenator_specific(parser):\n \"\"\"Arguments specific to the Burdenator for Most Detailed phase\"\"\"\n parser = arg_pool.add_paf_version(parser)\n parser = arg_pool.add_cause_set_ids(parser)\n parser = arg_pool.add_star_ids(parser)\n parser = arg_pool.add_raise_on_paf_error(parser)\n parser = arg_pool.add_measure_ids(parser)\n\n return parser\n\n\ndef add_non_resume_args_burdenator(parser, tool_name):\n \"\"\"add non resume args from dalynator, then add burdenator specific non-resume args,\n for most detailed phase\"\"\"\n parser = add_non_resume_args(parser, tool_name)\n parser = arg_pool.add_paf_version(parser)\n parser = arg_pool.add_cause_set_ids(parser)\n parser = arg_pool.add_star_ids(parser)\n return parser\n\n\ndef add_non_resume_args(parser, tool_name):\n \"\"\"\n The parse for run_all_dalynator, nothing shared with other parsers.\n However, this is reused (by explicit delegation) from\n run_all_burdenator.\n\n :return: parser\n \"\"\"\n parser = arg_pool.add_input_data_root(parser)\n parser = arg_pool.add_cod(parser)\n parser = arg_pool.add_epi(parser)\n parser = arg_pool.add_gbd_round_id(parser)\n parser = arg_pool.add_decomp_step(parser)\n parser = arg_pool.add_log_dir(parser)\n parser = arg_pool.add_turn_off_null_nan(parser)\n parser = arg_pool.add_upload_to_test(parser)\n parser = arg_pool.add_skip_cause_agg(parser)\n parser = arg_pool.add_read_from_prod(parser)\n parser = arg_pool.add_dual_upload(parser)\n parser = arg_pool.add_loc_set_ids(parser)\n\n if tool_name == 'dalynator':\n default_measures = ['daly']\n else:\n default_measures = ['death', 'daly', 'yld', 'yll']\n parser = arg_pool.add_measures(parser, default_measures)\n parser = arg_pool.add_years(parser)\n parser = arg_pool.add_n_draws(parser)\n parser = arg_pool.add_sge_project(parser)\n parser = arg_pool.add_do_nothing(parser)\n parser = arg_pool.add_start_and_end_years(parser)\n return parser\n\n\ndef construct_parser_burdenator():\n \"\"\"Create a parser for all arguments used by burdenator from pipeline but\n not from run_all, Used for Burdenator Most Detailed\"\"\"\n parser = construct_parser_shared('Burdenator most detailed')\n parser = arg_pool.add_loc_id(parser)\n parser = arg_pool.add_year_and_n_draws_group(parser)\n parser = add_to_parser_burdenator_specific(parser)\n return parser\n\n\ndef construct_parser_dalynator():\n \"\"\"Used for Dalynator Most Detailed\"\"\"\n parser = construct_parser_shared('Dalynator most detailed')\n parser = arg_pool.add_loc_id(parser)\n parser = arg_pool.add_year_id(parser)\n parser = arg_pool.add_n_draws(parser)\n return parser\n\n\ndef construct_parser_shared(description):\n \"\"\"Used by the pipelines\"\"\"\n parser = argparse.ArgumentParser(description=description)\n parser = arg_pool.add_input_data_root(parser)\n parser = arg_pool.add_out_dir(parser)\n parser = arg_pool.add_log_dir(parser)\n parser = arg_pool.add_turn_off_null_nan(parser)\n parser = arg_pool.add_verbose(parser)\n parser = arg_pool.add_cod(parser)\n parser = arg_pool.add_epi(parser)\n parser = arg_pool.add_output_version(parser)\n parser = arg_pool.add_gbd_round_id(parser)\n parser = arg_pool.add_decomp_step(parser)\n parser = arg_pool.add_dual_upload(parser)\n\n # Needed by mock_framework\n valid_tool_names = [\"dalynator\", \"burdenator\"]\n parser = arg_pool.add_tool_names(parser, valid_tool_names, False)\n\n return parser\n\n\ndef construct_parser_burdenator_loc_agg():\n \"\"\"Create parser for burdenator location aggregation\"\"\"\n parser = argparse.ArgumentParser(\n description='Run location aggregation after burdenation')\n parser = arg_pool.add_data_root(parser)\n parser = arg_pool.add_year_id(parser)\n parser = arg_pool.add_rei_id(parser)\n parser = arg_pool.add_sex_id(parser)\n parser = arg_pool.add_measure_id(parser, VALID_BURDENATOR_MEASURES)\n parser = arg_pool.add_region_locs(parser)\n parser = arg_pool.add_loc_set_id(parser)\n parser = arg_pool.add_output_version(parser)\n parser = arg_pool.add_verbose(parser)\n parser = arg_pool.add_gbd_round_group(parser)\n parser = arg_pool.add_decomp_step(parser)\n parser = arg_pool.add_n_draws(parser)\n parser = arg_pool.add_star_ids(parser)\n return parser\n\n\ndef get_args_burdenator_loc_agg(parser, cli_args=None):\n \"\"\"Creates arguments from parser for burdenator location aggregation\"\"\"\n if cli_args is None:\n cli_args = sys.argv[1:]\n args = parser.parse_args(cli_args)\n args.gbd_round, args.gbd_round_id = ac.populate_gbd_round_args(\n args.gbd_round, args.gbd_round_id)\n\n # Create log directory\n top_out_dir = args.data_root\n args.cache_dir = 'FILEPATH'.format(args.data_root)\n args.log_dir = os.path.join(top_out_dir, 'log_loc_agg',\n str(args.year_id), str(args.measure_id))\n\n log_filename = \"FILEPATH\".format(\n args.measure_id, args.rei_id, args.year_id, args.sex_id)\n\n makedirs_safely(args.log_dir)\n\n log_level = logging.DEBUG if args.verbose else logging.INFO\n args.logger = create_logger_in_memory(\n \"dalynator\", log_level, args.log_dir + \"/\" + log_filename,\n ['aggregator.aggregators', 'jobmon'])\n\n return args\n\n\ndef construct_parser_burdenator_cleanup():\n \"\"\"Create parser for burdenator cleanup\"\"\"\n parser = argparse.ArgumentParser(description='Run burdenator cleanup')\n parser = arg_pool.add_input_data_root(parser)\n parser = arg_pool.add_out_dir(parser)\n parser = arg_pool.add_loc_id(parser)\n parser = arg_pool.add_year_id(parser)\n parser = arg_pool.add_measure_id(parser, VALID_BURDENATOR_MEASURES)\n parser = arg_pool.add_gbd_round_group(parser)\n parser = arg_pool.add_decomp_step(parser)\n parser = arg_pool.add_cod(parser)\n parser = arg_pool.add_epi(parser)\n parser = arg_pool.add_turn_off_null_nan(parser)\n parser = arg_pool.add_output_version(parser)\n parser = arg_pool.add_star_ids(parser)\n parser = arg_pool.add_skip_cause_agg(parser)\n parser = arg_pool.add_verbose(parser)\n parser = arg_pool.add_dual_upload(parser)\n\n\n valid_tool_names = [\"burdenator\"]\n parser = arg_pool.add_tool_names(parser, valid_tool_names)\n parser = arg_pool.add_n_draws(parser)\n return parser\n\n\ndef construct_args_burdenator_cleanup(parser, cli_args=None):\n \"\"\"Creates arguments from parser for rearranging the draw files at the end\n of the burdenator run\"\"\"\n if cli_args is None:\n cli_args = sys.argv[1:]\n args = parser.parse_args(cli_args)\n args.tool_name = 'burdenator'\n args.gbd_round, args.gbd_round_id = ac.populate_gbd_round_args(\n args.gbd_round, args.gbd_round_id)\n\n # Create log directory\n top_out_dir = args.out_dir\n args.cache_dir = 'FILEPATH'.format(args.out_dir)\n args.log_dir = os.path.join(top_out_dir, 'log_cleanup',\n str(args.year_id), str(args.measure_id))\n\n log_filename = \"FILEPATH\".format(\n args.measure_id, args.location_id, args.year_id)\n\n makedirs_safely(args.log_dir)\n\n log_level = logging.DEBUG if args.verbose else logging.INFO\n args.logger = create_logger_in_memory(\n \"dalynator\", log_level, args.log_dir + \"/\" + log_filename,\n ['aggregator.aggregators', 'jobmon'])\n\n # Get cod/epi env directories\n if args.codcorrect_version == 'best':\n args.codcorrect_version = ac.best_version(\n 'codcorrect', args.gbd_round_id, args.decomp_step)\n if args.fauxcorrect_version == 'best':\n args.fauxcorrect_version = ac.best_version(\n 'fauxcorrect', args.gbd_round_id, args.decomp_step)\n if args.epi_version is None:\n args.epi_version = ac.best_version('como', args.gbd_round_id,\n args.decomp_step)\n args.epi_dir = get_como_folder_structure(os.path.join(\n args.input_data_root, 'como', str(args.epi_version)))\n cod_object = to.cod_or_faux_correct(\n args.input_data_root,\n codcorrect_version=args.codcorrect_version,\n fauxcorrect_version=args.fauxcorrect_version)\n args.cod_dir = cod_object.abs_path_to_draws\n args.cod_pattern = cod_object.file_pattern\n return args\n\n\ndef construct_parser_upload():\n \"\"\"Create parser for burdenator upload\"\"\"\n parser = argparse.ArgumentParser(description='Run upload')\n parser = arg_pool.add_out_dir(parser) # didnt have the store before\n parser = arg_pool.add_gbd_process_version_id(parser)\n parser = arg_pool.add_loc_ids(parser)\n\n valid_table_types = [\"single_year\", \"multi_year\"]\n parser = arg_pool.add_table_types(parser, valid_table_types)\n\n valid_storage_engines = [\"INNODB\", \"COLUMNSTORE\"]\n parser = arg_pool.add_storage_engines(parser, valid_storage_engines)\n parser = arg_pool.add_upload_to_test(parser)\n parser = arg_pool.add_dual_upload(parser)\n parser = arg_pool.add_verbose(parser)\n\n valid_tool_names = [\"burdenator\", \"dalynator\"]\n parser = arg_pool.add_tool_names(parser, valid_tool_names)\n\n return parser\n\n\ndef construct_args_upload(parser, cli_args=None):\n \"\"\"Creates arguments from parser for uploading data\"\"\"\n if cli_args is None:\n cli_args = sys.argv[1:]\n args = parser.parse_args(cli_args)\n\n # Create log directory\n top_out_dir = args.out_dir\n args.cache_dir = 'FILEPATH'.format(args.out_dir)\n args.log_dir = os.path.join(\n top_out_dir, 'log_upload', args.table_type)\n\n log_filename = \"FILEPATH\".format(\n args.gbd_process_version_id, args.table_type)\n\n makedirs_safely(args.log_dir)\n\n log_level = logging.DEBUG if args.verbose else logging.INFO\n args.logger = create_logger_in_memory(\n \"dalynator\", log_level, args.log_dir + \"/\" + log_filename,\n ['aggregator.aggregators', 'jobmon'])\n\n return args\n\n\ndef construct_parser_pct_change():\n \"\"\"Create parser for pct change calculation\"\"\"\n parser = argparse.ArgumentParser(\n description='Run pct change for DALYs')\n\n parser = arg_pool.add_input_data_root(parser)\n parser = arg_pool.add_out_dir(parser)\n parser = arg_pool.add_loc_id(parser)\n parser = arg_pool.add_start_and_end_year(parser)\n\n valid_measures = [gbd.measures.DALY, gbd.measures.YLL, gbd.measures.YLD,\n gbd.measures.DEATH]\n parser = arg_pool.add_measure_id(parser, valid_measures)\n parser = arg_pool.add_cod(parser)\n parser = arg_pool.add_epi(parser)\n valid_tool_names = [\"dalynator\", \"burdenator\"]\n parser = arg_pool.add_tool_names(parser, valid_tool_names)\n parser = arg_pool.add_output_version(parser)\n parser = arg_pool.add_gbd_round_group(parser)\n parser = arg_pool.add_decomp_step(parser)\n parser = arg_pool.add_n_draws(parser)\n parser = arg_pool.add_star_ids(parser)\n parser = arg_pool.add_verbose(parser)\n parser = arg_pool.add_dual_upload(parser)\n\n return parser\n\n\ndef get_args_pct_change(parser, cli_args=None):\n \"\"\"Creates arguments from parser for pct change calculation\"\"\"\n if cli_args is None:\n cli_args = sys.argv[1:]\n args = parser.parse_args(cli_args)\n args.gbd_round, args.gbd_round_id = ac.populate_gbd_round_args(\n args.gbd_round, args.gbd_round_id)\n\n args.log_dir = os.path.join(args.out_dir, 'log_pct_change',\n str(args.location_id))\n makedirs_safely(args.log_dir)\n logfn = \"FILEPATH\".format(args.start_year, args.end_year)\n log_level = logging.DEBUG if args.verbose else logging.INFO\n args.logger = create_logger_in_memory(\"dalynator\", log_level,\n \"FILEPATH\".format(args.log_dir, logfn),\n ['aggregator.aggregators', 'jobmon'])\n # Get cod/epi env directories\n if args.codcorrect_version == 'best':\n args.codcorrect_version = ac.best_version(\n 'codcorrect', args.gbd_round_id, args.decomp_step)\n if args.fauxcorrect_version == 'best':\n args.fauxcorrect_version = ac.best_version(\n 'fauxcorrect', args.gbd_round_id, args.decomp_step)\n if args.epi_version is None:\n args.epi_version = ac.best_version('como', args.gbd_round_id,\n args.decomp_step)\n args.epi_dir = get_como_folder_structure(os.path.join(\n args.input_data_root, 'como', str(args.epi_version)))\n cod_object = to.cod_or_faux_correct(\n args.input_data_root,\n codcorrect_version=args.codcorrect_version,\n fauxcorrect_version=args.fauxcorrect_version)\n args.cod_dir = cod_object.abs_path_to_draws\n args.cod_pattern = cod_object.file_pattern\n return args\n\n\ndef create_logging_directories():\n parser = argparse.ArgumentParser()\n parser.add_argument('--out_dir', type=str)\n parser.parse_known_args()\n\n\ndef construct_parser_cs_sort():\n \"\"\"Create parser for burdenator upload\"\"\"\n parser = argparse.ArgumentParser(\n description='Consolidate summary files for CS upload')\n parser = arg_pool.add_out_dir(parser, True)\n parser = arg_pool.add_loc_id(parser)\n parser = arg_pool.add_measure_ids(parser, VALID_BURDENATOR_MEASURES)\n parser = arg_pool.add_year_ids(parser)\n parser = arg_pool.add_start_and_end_year_ids(parser)\n\n valid_tool_names = [\"burdenator\", \"dalynator\"]\n parser = arg_pool.add_tool_names(parser, valid_tool_names)\n\n return parser\n\n\ndef construct_args_cs_sort(parser, cli_args=None):\n \"\"\"Creates arguments from parser for uploading data\"\"\"\n if cli_args is None:\n cli_args = sys.argv[1:]\n args = parser.parse_args(cli_args)\n return args\n","sub_path":"gbd_2019/shared_code/central_comp/dalys_hale/dalynator/dalynator/dalynator/get_input_args.py","file_name":"get_input_args.py","file_ext":"py","file_size_in_byte":25745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"58634719","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport logging\nimport os\nfrom datetime import datetime, timezone\n\nfrom medcat.cat import CAT\nfrom medcat.cdb import CDB\nfrom medcat.meta_cat import MetaCAT\nfrom medcat.utils.vocab import Vocab\n\n\nclass NlpProcessor:\n \"\"\"\n This class defines an interface for NLP Processor\n \"\"\"\n def __init__(self):\n self.log = logging.getLogger(self.__class__.__name__)\n\n def get_app_info(self):\n pass\n\n def process_content(self, content):\n pass\n\n def process_content_bulk(self, content):\n pass\n\n @staticmethod\n def _get_timestamp():\n \"\"\"\n Returns the current timestamp in ISO 8601 format. Formatted as \"yyyy-MM-dd'T'HH:mm:ss.SSSXXX\".\n :return: timestamp string\n \"\"\"\n return datetime.now(tz=timezone.utc).isoformat(timespec='milliseconds')\n\n\nclass MedCatProcessor(NlpProcessor):\n \"\"\"\"\n MedCAT Processor class is wrapper over MedCAT that implements annotations extractions functionality\n (both single and bulk processing) that can be easily exposed for an API.\n \"\"\"\n def __init__(self):\n super().__init__()\n\n self.log.info('Initializing MedCAT processor ...')\n\n # TODO: use a config file instead of env variables\n #\n self.app_name = 'MedCAT'\n self.app_lang = 'en'\n self.app_version = MedCatProcessor._get_medcat_version()\n self.app_model = os.getenv(\"APP_MODEL_NAME\", 'unknown')\n\n self.cat = self._create_cat()\n self.cat.spacy_cat.train = os.getenv(\"APP_TRAINING_MODE\", False)\n\n self.bulk_nproc = int(os.getenv('APP_BULK_NPROC', 8))\n\n self.log.info('MedCAT processor is ready')\n\n def get_app_info(self):\n \"\"\"\n Returns general information about the application\n :return: application information stored as KVPs\n \"\"\"\n return {'name': self.app_name,\n 'language': self.app_lang,\n 'version': self.app_version,\n 'model': self.app_model}\n\n def process_content(self, content):\n \"\"\"\n Processes a single document extracting the annotations.\n :param content: document to be processed, containing 'text' field.\n :return: processing result containing document with extracted annotations stored as KVPs.\n \"\"\"\n if 'text' not in content:\n error_msg = \"'text' field missing in the payload content.\"\n nlp_result = {'success': False,\n 'errors': [error_msg],\n 'timestamp': NlpProcessor._get_timestamp()\n }\n return nlp_result, False\n\n text = content['text']\n\n # assume an that a blank document is a valid document and process it only\n # when it contains any non-blank characters\n if text is not None and len(text.strip()) > 0:\n entities = self.cat.get_entities(text)\n else:\n entities = []\n\n nlp_result = {'text': text,\n 'annotations': entities,\n 'success': True,\n 'timestamp': NlpProcessor._get_timestamp()\n }\n\n # append the footer\n if 'footer' in content:\n nlp_result['footer'] = content['footer']\n\n return nlp_result\n\n def process_content_bulk(self, content):\n \"\"\"\n Processes an array of documents extracting the annotations.\n :param content: document to be processed, containing 'text' field.\n :return: processing result containing documents with extracted annotations,stored as KVPs.\n \"\"\"\n\n # process at least 10 docs per thread and don't bother with starting\n # additional threads when less documents were provided\n min_doc_per_thread = 10\n num_slices = max(1, int(len(content) / min_doc_per_thread))\n batch_size = min(300, num_slices)\n\n if batch_size >= self.bulk_nproc:\n nproc = self.bulk_nproc\n else:\n batch_size = min_doc_per_thread\n nproc = max(1, num_slices)\n if len(content) > batch_size * nproc:\n nproc += 1\n\n # use generators both to provide input documents and to provide resulting annotations\n # to avoid too many mem-copies\n invalid_doc_ids = []\n ann_res = self.cat.multi_processing(MedCatProcessor._generate_input_doc(content, invalid_doc_ids),\n nproc=nproc, batch_size=batch_size)\n\n return MedCatProcessor._generate_result(content, ann_res, invalid_doc_ids)\n\n # helper MedCAT methods\n #\n def _create_cat(self):\n \"\"\"\n Loads MedCAT resources and creates CAT instance\n \"\"\"\n if os.getenv(\"APP_MODEL_VOCAB_PATH\") is None:\n raise ValueError(\"Vocabulary (env: APP_MODEL_VOCAB_PATH) not specified\")\n\n if os.getenv(\"APP_MODEL_CDB_PATH\") is None:\n raise Exception(\"Concept database (env: APP_MODEL_CDB_PATH) not specified\")\n\n # Vocabulary and Concept Database are mandatory\n self.log.debug('Loading VOCAB ...')\n vocab = Vocab()\n vocab.load_dict(path=os.getenv(\"APP_MODEL_VOCAB_PATH\"))\n\n self.log.debug('Loading CDB ...')\n cdb = CDB()\n cdb.load_dict(path=os.getenv(\"APP_MODEL_CDB_PATH\"))\n\n # Apply CUI filter if provided\n if os.getenv(\"APP_MODEL_CUI_FILTER_PATH\") is not None:\n self.log.debug('Applying CDB CUI filter ...')\n with open(os.getenv(\"APP_MODEL_CUI_FILTER_PATH\")) as cui_file:\n all_lines = (line.rstrip() for line in cui_file)\n selected_cuis = [line for line in all_lines if line] # filter blank lines\n cdb.filter_by_cui(selected_cuis)\n\n # Meta-annotation models are optional\n meta_models = []\n if os.getenv(\"APP_MODEL_META_PATH_LIST\") is not None:\n self.log.debug('Loading META annotations ...')\n for model_path in os.getenv(\"APP_MODEL_META_PATH_LIST\").split(':'):\n m = MetaCAT(save_dir=model_path)\n m.load()\n meta_models.append(m)\n\n return CAT(cdb=cdb, vocab=vocab, meta_cats=meta_models)\n\n # helper generator functions to avoid multiple copies of data\n #\n @staticmethod\n def _generate_input_doc(documents, invalid_doc_idx):\n \"\"\"\n Generator function returning documents to be processed as a list of tuples:\n (idx, text), (idx, text), ...\n Skips empty documents and reports their ids to the invalid_doc_idx array\n :param documents: array of input documents that contain 'text' field\n :param invalid_doc_idx: array that will contain invalid document idx\n :return: consecutive tuples of (idx, document)\n \"\"\"\n for i in range(0, len(documents)):\n # assume the document to be processed only when it is not blank\n if 'text' in documents[i] and documents[i]['text'] is not None and len(documents[i]['text'].strip()) > 0:\n yield i, documents[i]['text']\n else:\n invalid_doc_idx.append(i)\n\n @staticmethod\n def _generate_result(in_documents, annotations, invalid_doc_idx):\n \"\"\"\n Generator function merging the resulting annotations with the input documents.\n The result for documents that were invalid will not contain any annotations.\n :param in_documents: array of input documents that contain 'text' field\n :param annotations: array of annotations extracted from documents\n :param invalid_doc_idx: array of invalid document idx\n :return:\n \"\"\"\n # generate output for valid annotations\n for i in range(len(annotations)):\n res = annotations[i]\n res_idx = res[0]\n in_ct = in_documents[res_idx]\n\n # parse the result\n out_res = {'text': res[1][\"text\"],\n 'annotations': res[1][\"entities\"],\n 'success': True,\n 'timestamp': NlpProcessor._get_timestamp()\n }\n # append the footer\n if 'footer' in in_ct:\n out_res['footer'] = in_ct['footer']\n\n yield out_res\n\n # generate output for invalid documents\n for i in invalid_doc_idx:\n in_ct = in_documents[i]\n\n out_res = {'text': in_ct[\"text\"],\n 'annotations': [],\n 'success': True,\n 'timestamp': NlpProcessor._get_timestamp()\n }\n # append the footer\n if 'footer' in in_ct:\n out_res['footer'] = in_ct['footer']\n\n yield out_res\n\n @staticmethod\n def _get_medcat_version():\n \"\"\"\n Returns the version string of the MedCAT module as reported by pip\n :return:\n \"\"\"\n try:\n import subprocess\n result = subprocess.check_output(['pip', 'show', 'medcat'], universal_newlines=True)\n version_line = list(filter(lambda v: 'Version' in v, result.split('\\n')))\n return version_line[0].split(' ')[1]\n except Exception:\n raise Exception(\"Cannot read the MedCAT library version\")\n","sub_path":"medcat_service/nlp_processor/medcat_processor.py","file_name":"medcat_processor.py","file_ext":"py","file_size_in_byte":9313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"53497121","text":"import matplotlib.pyplot as plt\nimport os\nimport tldextract\nfrom collections import Counter,OrderedDict\nfrom lib.utils import path,time_delta,dict_sort\nfrom lib.sqlite3_operate import SQLite\ndef query(table='nvd_cve',column='CVE_Items_impact_baseMetricV2_severity',time=2014,top=10):\n \"\"\"\n 取数\n \"\"\"\n so = SQLite(\"data/nvd.db\")\n sql = \"select {column},count(CVE_Items_cve_CVE_data_meta_ID) as ct from {table} \\\n where CVE_Items_publishedDate like '%{time}%' \\\n group by {column} \\\n order by ct DESC\".format(column=column,table=table,time=time)\n r = so.query(sql)\n\n od = OrderedDict()\n for i in r:\n od[i[0]]=i[1]\n \n od_pec=dict()\n if len(od.keys())>top:\n i=0\n for k,v in od.items():\n if i1:\n for t in split_tag:\n domain=tldextract.extract(t).domain\n domains.append(domain)\n else:\n domain=tldextract.extract(tag).domain\n domains.append(domain)\n sort_tags=Counter(domains)\n\n return sort_tags\n\ndef cpe_count():\n \"\"\"\n 计算总cve数据的cpe top N\n \"\"\"\n sql=\"select CVE_Items_configurations_nodes_children_cpe_match_cpe23Uri from nvd_cve\"\n sql2=\"select CVE_Items_configurations_nodes_cpe_match_cpe23Uri from nvd_cve\"\n r=execute(sql)\n r2=execute(sql2)\n cpe=[i[0].split(':')[3] for i in r if i[0]]\n #print(cpe)\n cpe2=[i[0] for i in r2 if i[0]]\n for j in cpe2:\n if ';' in j:\n js=j.split(';')\n for k in js:\n cpe.append(k.split(':')[3])\n cpe_count=Counter(cpe) # Counter是无序的\n \n return dict(cpe_count)\n\ndef get_top(od,top=25):\n od_pec=dict()\n od=dict_sort(od,key=False,value=True)\n if len(od.keys())>top:\n i=0\n for k,v in od.items():\n if i=1.10 User.is_authenticated is a property, not a method but a\n # strange one : CallbableBool. \n # - If default User is used you can use it as a boolean either a method. \n # - If this property is overrided you may have a bool instead and an\n # exception.\n # Fix it by checking if the property is callable or not. \n def is_authenticated(self, request):\n if hasattr(request, \"session\") and hasattr(request, \"user\"):\n auth = request.user.is_authenticated\n return auth() if callable(auth) else auth\n else:\n return False\n\n def process_response(self, request, response):\n \"\"\"\n Check for messages for this user and, if it exists,\n call the messages API with it\n \"\"\"\n if self.is_authenticated(request):\n msgs = get_messages(request.user)\n if msgs:\n for msg, level in msgs:\n messages.add_message(request, level, msg)\n return response\n","sub_path":"async_messages/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"472145121","text":"\"\"\"\nDescription: File takes a set of player data and processes it into a player dataset of scraped information of interest for model input. \n\"\"\"\n\n# ------------------------------------------------------------------------\n# IMPORTS\n# ------------------------------------------------------------------------\n# Scraping/API Dependencies\nimport time\nfrom bs4 import BeautifulSoup\nfrom uszipcode import SearchEngine\n\n# Data Science Dependencies\nimport pandas as pd\nimport numpy as np\nfrom pandas.io.json import json_normalize\nimport json\n\n# System Imports\nimport sys\nimport os\nimport glob\nimport re\n\n# ------------------------------------------------------------------------\n# FUNCTIONS\n# ------------------------------------------------------------------------\ndef getMeta(file):\n # print(file)\n # print(file.split('_'))\n date, playerID, viewType = file.split('_')\n viewType = viewType.replace('.txt', '')\n return(date, playerID, viewType)\n\ndef getSoup(file):\n soup = BeautifulSoup(open(file), \"html.parser\")\n return(soup)\n\ndef getDataJSON(soup, pos):\n scripts = soup.findAll('script')\n dataDict = scripts[pos].contents[0]\n dataDict = dataDict[dataDict.find('{'):dataDict.rfind('}')+1]\n dataDict = json.loads(dataDict)\n return(dataDict)\n\ndef getPlayerBasics(dataDict):\n playerBasics = json_normalize(dataDict['header'])\n playerBasics['scrapeDate'], playerBasics['playerID'], playerBasics['viewType'] = meta\n return(playerBasics)\n\ndef getBio(soup):\n bios = soup.findAll('table', {'class':'bio'})\n return(bios)\n\ndef getPos(bios, soup):\n temp2 = [tbod.findAll('tbody', recursive=False) for tbod in bios]\n temp3 = [t[0].find('tr').text for t in temp2]\n \n scripts = soup.findAll('script')\n dataDictPos = [ i for i, x in enumerate(scripts) if bool(re.search(r'var page', x.text)) ]\n \n weeklyRankings = [ i for i, x in enumerate(temp3) if bool(re.search(r'WEEKLY RANKINGS', x)) ]\n highestRankings = [ i for i, x in enumerate(temp3) if bool(re.search(r'HIGHEST RANKINGS', x)) ]\n schoolsOfInterest = [ i for i, x in enumerate(temp3) if bool(re.search(r'SCHOOLS OF INTEREST', x)) ]\n numberOfPhotos = [ i for i, x in enumerate(temp3) if bool(re.search(r'PLAYER PHOTOS', x)) ]\n clothingEquipment = [ i for i, x in enumerate(temp3) if bool(re.search(r'CLOTHING/EQUIPMENT', x)) ]\n activitySummary = [ i for i, x in enumerate(temp3) if bool(re.search(r'ACTIVITY OVERVIEW', x)) ]\n newsArticles = [ i for i, x in enumerate(temp3) if bool(re.search(r'NEWS ARTICLES', x)) ]\n \n return({'weeklyRankings':weeklyRankings,\n 'highestRankings':highestRankings,\n 'schoolsOfInterest':schoolsOfInterest,\n 'numberOfPhotos':numberOfPhotos,\n 'clothingEquipment':clothingEquipment,\n 'activitySummary': activitySummary,\n 'newsArticles':newsArticles,\n 'dataDictPos':dataDictPos})\n\ndef progress_bar(iteration, total, prefix = 'Percent of player profiles scraped', \n suffix = '', decimals = 1, length = 80, fill = '█'):\n \"\"\"\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n length - Optional : character length of bar (Int)\n fill - Optional : bar fill character (Str)\n @author Adapted from: https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console\n \"\"\"\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r', flush=True)\n # Print New Line on Complete\n if iteration == total: \n print()\n\ndef get_housing_pricing_estimate(street, city_state):\n \"\"\"\n Description: Function hits uszipcode API and attempts to go from a players city and state to a latitude/longitude/housing estimate.\n Inputs: \n - street (String): A street name for this player.\n Outputs: \n - city_state (String): A city and state (abbreviated i.e. Vermont ---> VT) for this player.\n - mean_median (float): The mean-median Housing Price for all zip codes found in this city. \n - latitude_estimate (float): An estimated latitude value for this location.\n - longitude_estimate (float): An estimated longitude value for this location.\n \"\"\"\n # With the API search engine running.\n # ----------------------------------------------------------------------------------------\n with SearchEngine() as search:\n try:\n locations_found = search.by_city_and_state(city=city, state=state) \n \n # If no data is found, returning None. The output dataset will thus not be entirely clean!\n # ----------------------------------------------------------------------------------------\n except KeyError:\n return None, None, None\n if len(locations_found) == 0:\n return None, None, None\n\n # Getting Home Value Information (mean-median, for now) if there were Multiple Zip Codes for this City\n # ----------------------------------------------------------------------------------------\n median_home_values = [location.median_home_value for location in locations_found if location.median_home_value != None]\n if len(median_home_values) != 0:\n mean_median = np.mean(median_home_values)\n else:\n mean_median = None\n\n # Cannot get the mean latitude, so this is even more of an estimate. But, its fine for now if its in the city we want.\n base_location = locations_found[0]\n latitude_estimate = base_location.lat \n longitude_estimate = base_location.lng\n\n return mean_median, latitude_estimate, longitude_estimate \n\n# ------------------------------------------------------------------------\n# MAINLINE LOGIC\n# ------------------------------------------------------------------------\n# If file is called from mainline.\nif __name__ == \"__main__\":\n # Setting up outfile for saving processed data to.\n # ------------------------------------------------------------------------\n public = glob.glob('./raw-html-test/*public.txt')\n if os.path.isdir('./output'):\n pass\n else:\n os.mkdir('./output')\n \n # Setting up Data Structures\n # ------------------------------------------------------------------------\n playerBasics = []\n weeklyRankings = []\n highestRankings = []\n schoolsOfInterest = []\n numberOfPhotos = []\n clothingEquipment = []\n newsArticles = []\n activitySummary = []\n \n # Incrementing Through Player Data Files\n # ------------------------------------------------------------------------\n current_player_index = 1\n total = len(public)\n for file in public:\n progress_bar(current_player_index, total) # User update of run time.\n meta = getMeta(file)\n soup = getSoup(file)\n bios = getBio(soup)\n pos = getPos(bios, soup)\n if len(pos['dataDictPos']) > 0: \n player_basics_data = getPlayerBasics(getDataJSON(soup, pos['dataDictPos'][0]))\n \n # Getting Location / Housing Estimate Data \n # ------------------------------------------------------------------------\n if player_basics_data['country'][0] == 'USA': # Only considering American players for this toy model.\n try:\n city = str(player_basics_data['city'][0])\n state = str(player_basics_data['state'][0])\n except IndexError: # No location data is available for this player, so we continue to the next.\n current_player_index += 1\n continue\n\n # Getting the housing and location estimates. \n housing_estimate, latitude_estimate, longitude_estimate = get_housing_pricing_estimate(city, state)\n player_basics_data['housing_estimate'] = housing_estimate\n player_basics_data['latitude'] = latitude_estimate\n player_basics_data['longitude'] = longitude_estimate\n\n else: # No data found for this player.\n current_player_index += 1\n continue\n\n # Updating Variables\n # ------------------------------------------------------------------------\n playerBasics.append(player_basics_data)\n current_player_index += 1\n time.sleep(1) # The API documentation requests no more than one request per second.\n\n # Saving Compiled Data\n # ------------------------------------------------------------------------\n playerBasics = pd.concat(playerBasics)\n playerBasics.to_csv('./output/player_dataset_test.csv', index=False) # Saving the raw dataset as it is before further changes.","sub_path":"IN_data/players/SCRAPE_housing_address.py","file_name":"SCRAPE_housing_address.py","file_ext":"py","file_size_in_byte":9044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"424789244","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport os\nimport time\nimport hashlib\nimport subprocess\n\nfrom django.conf import settings\nfrom django.core.mail import EmailMessage\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\n\n\ndef send_html_mail(subject, html_content, recipient_list):\n msg = EmailMessage(subject, html_content, settings.EMAIL_HOST_USER, recipient_list)\n msg.content_subtype = 'html' # main content is now text/html\n msg.send(fail_silently=False)\n\n\ndef file_md5(fname):\n hash_md5 = hashlib.md5()\n with open(fname, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()\n\n\ndef backup_database(host, port, user, pwd, db):\n bak = os.path.join('/tmp/', 'bak-{0}-{1}.sql'.format(db, time.strftime('%Y%m%d%H%M%S')))\n cmd = 'mysqldump -h{host} --port={port} -u{user} --password={pwd} {db} > {bak}'.format(\n host=host, port=port, user=user, pwd=pwd, db=db, bak=bak\n )\n ret = os.system(cmd)\n if ret:\n raise RuntimeError('mysqldump error')\n else:\n return os.path.abspath(bak)\n\n\ndef execute_sql(host, port, user, passwd, db, sql):\n proc = subprocess.Popen(\n ['mysql', '--default-character-set=utf8', '-h', host,\n '--port=%s' % port, db, '-u', user, '--password=%s' % passwd],\n stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT\n )\n out, _ = proc.communicate('source ' + sql)\n try:\n out = out[out.index(os.linesep)+1:]\n except ValueError: # substring not found\n out = ''\n retcode = proc.poll()\n return retcode, out\n\n\ndef human_size(_bytes, traditional=((1024 ** 5, 'P'),\n (1024 ** 4, 'T'),\n (1024 ** 3, 'G'),\n (1024 ** 2, 'M'),\n (1024 ** 1, 'K'),\n (1024 ** 0, 'B'))):\n \"\"\"Human-readable size\"\"\"\n for factor, suffix in traditional:\n if _bytes >= factor:\n amount = round(_bytes/factor, 2)\n return str(amount) + suffix\n else:\n return str(_bytes)\n\n\ndef serialize_form_errors(form):\n errors = []\n for field in form:\n if field.errors:\n errors.append(field.label + ':' + ','.join([err for err in field.errors]))\n return '\\n'.join(errors)\n\n\ndef paginate(data, current_page=1, page_num=20):\n paginator = Paginator(data, page_num)\n try:\n show_lines = paginator.page(current_page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n show_lines = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n show_lines = paginator.page(paginator.num_pages)\n return show_lines\n","sub_path":"tags/v2.15.0/marmot/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"451492659","text":"import os\nimport numpy as np\nimport re\nimport csv\nimport pdb\n\n\ndef elems_processor(node_lines, elem_lines, nset_lines, stress_processing_path, stress_part_values, dat_path,\n file_identifiers, headers_on):\n # This function associates each element to the labels of the nodes that make it up and saves a .csv file for each\n # part and then a larger list for all parts as for the nodes.\n\n # Define the lines which denote the parts of the file where we have to search for the element nodes; however,\n # towards the end of the part of the file we are interested in there is a series of lines containing the keyword\n # which is supposed to tell the algorithm to stop looking for element nodes. Therefore we have to filter out all\n # of these lines and then we transform the lists containing the lines into arrays.\n large_elem_matrix_unsorted = []\n large_elem_matrix_unsorted_reset = []\n new_elem_lines = elem_lines[0:len(node_lines) - 1]\n new_nset_lines = np.zeros((len(node_lines) - 1))\n for i in range(0, len(new_elem_lines)):\n for j in range(i, len(nset_lines)):\n if nset_lines[j] > new_elem_lines[i]:\n new_nset_lines[i] = nset_lines[j]\n break\n new_elem_lines = np.array(new_elem_lines)\n new_elem_lines = new_elem_lines.astype(int)\n new_nset_lines = new_nset_lines.astype(int)\n # Define all relevant paths and create the relevant folders if necessary\n large_elem_matrix_path = os.path.join(stress_processing_path, 'Large_Element_Matrix' + '.csv')\n individual_element_paths = os.path.join(stress_part_values, 'Elements')\n if not os.path.exists(individual_element_paths):\n os.makedirs(individual_element_paths)\n # Define the headers and decide whether to run the code or not based on the presence of the last file to be created\n headers = ['Label', 'Node_1', 'Node_2', 'Node_3', 'Node_4', 'Node_5', 'Node_6', 'Node_7', 'Node_8']\n # Skip the main program if the last file to be generated is already there\n if os.path.isfile(large_elem_matrix_path):\n print('The files containing the element nodes already exist, moving on to centroid calculation and stress '\n 'association.')\n else:\n print('Processing nodes to extract coordinates...')\n # Open the dat file, this time to save all element nodes \n with open(dat_path, \"r\") as read_dat:\n dat_file = read_dat.readlines()\n for k in range(len(new_elem_lines)):\n # Only focus on the areas of the file where information about the elements is present\n elem_vector = np.zeros((new_nset_lines[k] - new_elem_lines[k], 9))\n # Iterate over each of the lines in the interval found and scan for float values\n for line in range(new_elem_lines[k], new_nset_lines[k]):\n s = dat_file[line].strip()\n data = [float(s) for s in re.findall(r\"[+-]? *(?:\\d+(?:\\.\\d*)?|\\.\\d+)(?:[ee][+-]?\\d+)?\", s)]\n data = np.array([float(x) for x in data])\n # 4 cases have to be taken into account: two because elements can be made up of 8 or 6 elements, and\n # 2 more because to this we have to occasionally take into account and filter out the counter every\n # 5 lines that ABAQUS leaves in the dat file.\n if len(data) > 6:\n if len(data) == 10:\n data = data[1:]\n elif len(data) == 8:\n data = data[1:]\n if len(data) == 9:\n elem_vector[line - new_elem_lines[k] + 1, :] = data\n else:\n elem_vector[line - new_elem_lines[k] + 1, 0:7] = data\n # Filter out possible numerical errors\n if elem_vector[line - new_elem_lines[k] + 1, 1] == elem_vector[line - new_elem_lines[k] + 1, 2]:\n elem_vector[line - new_elem_lines[k] + 1, :] = 0\n\n # Only keep the non-zero values in the resulting vectors\n elem_vector = elem_vector[~np.all(elem_vector == 0, axis=1)]\n individual_elem_matrix = elem_vector\n\n # Save individual element matrices in csv format and append them to the large element lists\n if headers_on == 1:\n with open(os.path.join(individual_element_paths, 'Elements_Part_' + file_identifiers[k] + \".csv\"),\n 'wb') as f_write:\n writer = csv.writer(f_write)\n writer.writerow(headers)\n writer.writerows(individual_elem_matrix)\n else:\n with open(os.path.join(individual_element_paths, 'Elements_Part_' + file_identifiers[k] + \".csv\"),\n 'wb') as f_write:\n writer = csv.writer(f_write)\n writer.writerows(individual_elem_matrix)\n\n large_elem_matrix_unsorted_reset.append(individual_elem_matrix)\n large_elem_matrix_unsorted.append(elem_vector)\n\n # Sort the matrix containing all elements\n element_dictionary = {}\n for elem_matrix in large_elem_matrix_unsorted:\n for j in range(len(elem_matrix)):\n dictionary_matrix = elem_matrix[j]\n element_dictionary[dictionary_matrix[0]] = dictionary_matrix[1:]\n element_ids = list(element_dictionary.keys())\n sorted_matrix = np.zeros((len(element_ids), 9))\n sorted_element_ids = list(sorted(element_ids))\n for element in range(len(sorted_element_ids)):\n sorted_matrix[element, 0] = sorted_element_ids[element]\n sorted_matrix[element, 1:] = element_dictionary[sorted_element_ids[element]]\n # Save the larger matrices with or without headers\n if headers_on == 1:\n with open(large_elem_matrix_path, 'wb') as f_write:\n writer = csv.writer(f_write)\n writer.writerow(headers)\n writer.writerows(sorted_matrix)\n else:\n with open(large_elem_matrix_path, 'wb') as f_write:\n writer = csv.writer(f_write)\n writer.writerows(sorted_matrix)\n\n return individual_element_paths\n","sub_path":"python_files_python_3/elems_processor.py","file_name":"elems_processor.py","file_ext":"py","file_size_in_byte":6418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"403228799","text":"import webapp2\n\nform = \"\"\"\n
\nWhat is your birthday?\n
\n\n\nyear\n
\n\n
\n\"\"\"\n\n\nclass MainPage(webapp2.RequestHandler):\n def get(self):\n self.response.out.write(form)\n \n def post(self):\n self.response.out.write('year')\n\n\napp = webapp2.WSGIApplication([\n ('/', MainPage)\n], debug=True)\n\n\ndef main():\n from paste import httpserver\n httpserver.serve(app, host='127.0.0.1', port='8080')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"udacity web dev/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"433836629","text":"import csv\nfrom math import log\nfrom Tbl import Tbl\nfrom Col import Col\nfrom Num import Num\nfrom Row import Row\nfrom Sym import Sym\nfrom sys import path\nimport os, random, math\nfrom collections import defaultdict\npath.append(os.path.abspath(\"..\") + \"\\\\3\")\nfrom lib import THE,Pretty,same,first,last,ordered\n\nseed = random.seed\n\nclass Hw7:\n def __init__(self,fle):\n seed(1)\n self.tbl = Tbl(1)\n self.tbl.readData(fle)\n self.tbls = {}\n self.things = {}\n self.m = 2\n self.k = 1\n self.n = -1\n self.classes = []\n self.count_tbl = 2\n self.tree = self.split(self.tbl, 0)\n\n showt(self.tree)\n\n def fastMap(self, tbl):\n cols = [tbl.cols[col] for col in tbl.xs]\n randompt = random.randint(0, len(tbl.rows)-1)\n firstPivotpts = []\n for row in range(0, len(tbl.rows)):\n dist = distance(tbl.rows[randompt], tbl.rows[row], cols)\n firstPivotpts.append((row, dist))\n firstPivotpts.sort(key=lambda x: x[1])\n firstPivotptsLength=len(firstPivotpts)\n firstPivotidx = firstPivotpts[math.floor(firstPivotptsLength * 0.9)][0]\n secondPivotpts = []\n for row in range(0, len(tbl.rows)):\n dist = distance(tbl.rows[firstPivotidx], tbl.rows[row], cols)\n secondPivotpts.append((row, dist))\n secondPivotpts.sort(key=lambda x: x[1])\n secondPivotptsLength=len(firstPivotpts)\n secondPivotidx = secondPivotpts[math.floor(secondPivotptsLength * 0.9)][0]\n dist = secondPivotpts[math.floor(secondPivotptsLength * 0.9)][1]\n # print(\"frstidx\",firstPivotidx)\n # print(\"scndidx\",secondPivotidx)\n # print(\"dist\",dist)\n return (firstPivotidx, secondPivotidx, dist)\n\n def bestpivotpts(self, tbl):\n n = 10\n initial = len(tbl.rows)\n\n besttupl = None\n bestpts = None\n while n > 0:\n n -= 1\n pivotTupl = self.fastMap(tbl)\n rwdistList = []\n cols = [tbl.cols[col] for col in tbl.xs]\n for row in range(0, len(tbl.rows)):\n dist = cosine(tbl.rows[pivotTupl[0]], tbl.rows[pivotTupl[1]], tbl.rows[row], pivotTupl[2], cols)\n rwdistList.append((row, dist))\n rwdistList.sort(key=lambda x: x[1])\n mediandist = None\n index = (len(rwdistList) - 1) // 2\n if (len(rwdistList) % 2):\n mediandist = rwdistList[index][1]\n else:\n mediandist = (rwdistList[index][1] + rwdistList[index + 1][1]) / 2.0\n pointset = set()\n for point in rwdistList:\n if point[1] < mediandist:\n pointset.add(point[0])\n right = abs(len(pointset) - (len(rwdistList) - len(pointset)))\n\n if right < initial:\n initial = right\n bestpts = pointset\n besttupl = pivotTupl\n\n return besttupl, bestpts\n\n\n def split(self, tbl, lvl):\n\n node = Random_Projectiontree()\n left_tbl = Tbl(1)\n right_tbl = Tbl(1)\n if (len(tbl.rows) < 2 * pow(len(self.tbl.rows), 1 / 2)):\n for each in tbl.headers:\n\n if tbl.cols[each].txt[0]==\"<\" or tbl.cols[each].txt[0]==\">\":\n node.leaves.append(tbl.cols[each])\n\n node.lvl,node.splitCount = lvl,len(tbl.rows)\n return node\n else:\n besttupl, bestpts = self.bestpivotpts(tbl)\n left_tbl.addcol([col.txt for col in tbl.cols])\n right_tbl.addcol([col.txt for col in tbl.cols])\n for idx, each in enumerate(tbl.rows):\n if idx in bestpts:\n right_tbl.addrow(each.lst)\n else:\n left_tbl.addrow(each.lst)\n splitCount = len(left_tbl.rows) + len(right_tbl.rows)\n\n node.child.append(self.split(left_tbl, lvl + 1))\n node.child.append(self.split(right_tbl, lvl + 1))\n node.splitCount = splitCount\n node.lvl = lvl\n return node\n\nclass Random_Projectiontree:\n def __init__(self):\n self.child = []\n self.leaves = []\n self.lvl = 0\n self.isRoot = False\n self.splitCount = 0\n\ndef showt(root):\n if root.isRoot:\n for col in root.leaves:\n print(col.txt + \" = \", end=\" \")\n if (isinstance(col, Num)):\n print(\"{0} ({1})\".format(col.mean, col.sd), end=\" \")\n else:\n print(\"{0} ({1})\".format(col.mode, col.variety()), end=\" \")\n if not root.isRoot:\n for _ in range(root.lvl):\n print(\"|. \", end=\" \")\n print(root.splitCount)\n if len(root.child) == 0:\n for _ in range(root.lvl - 1):\n print(\"|. \", end=\" \")\n for col in root.leaves:\n print(col.txt + \" = \", end=\" \")\n if (isinstance(col, Num)):\n print(\"{0} ({1})\".format(col.mean, col.sd), end=\" \")\n else:\n print(\"{0} ({1})\".format(col.mode, col.variety()), end=\" \")\n print(\"\")\n else:\n for each in root.child:\n showt(each)\n\ndef distance(row1, row2, cols):\n d, n, p = 0, 0, 2\n for col in cols:\n n += 1\n d0 = col.dist(row1.lst[col.pos], row2.lst[col.pos])\n d += d0 ** p\n return d ** (1 / p) / n ** (1 / p)\n\ndef cosine(x, y, z, dist, cols):\n return (distance(x, z, cols) ** 2 + dist ** 2 - distance(y, z, cols) ** 2) / (2 * dist)\n","sub_path":"hw/7/Hw7.py","file_name":"Hw7.py","file_ext":"py","file_size_in_byte":5520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"251353438","text":"import httplib2\nimport json\nimport sys\n\nclass RestAPI:\n\tdef __init__(self, token):\n\t\tself.token = token\n\t\tself.baseUrl = \"https://discordapp.com/api/\";\n\t\tself.headers = {'Content-Type': 'application/json; charset=UTF-8',\n\t\t\t\t'User-Agent': 'DiscordBot (url, versionNumber)',\n\t\t\t\t'Authorization': self.token}\n\n\tdef getBotName(self):\n\t\turl = self.baseUrl + \"users/@me\";\n\t\tmethod = \"GET\"\n\t\tbody = \"\"\n\t\tresponse, content = httplib2.Http().request(url, method, body, self.headers)\n\t\tself.data = json.loads(content)\n\t\tself.username = self.data[\"username\"]\n\t\treturn self.username\n\n\tdef getGuildData(self):\n\t\turl = self.baseUrl + \"users/@me/guilds\"\n\t\tmethod = \"GET\"\n\t\tbody = \"\"\n\t\tresponse, content = httplib2.Http().request(url, method, body, self.headers)\n\t\tguildData = json.loads(content)\n\t\treturn guildData\n\n\tdef getGuildNames(self):\n\t\tguildData = self.getGuildData()\n\t\tmessage = \"Guilds:\\n\"\n\t\tfor guild in guildData:\n\t\t\tmessage += guild[\"name\"] + \"\\n\"\n\t\treturn message\n\n\tdef getGuildIdFromName(self, guildName):\n\t\tguildData = self.getGuildData()\n\t\tfor guild in guildData:\n\t\t\tif guild[\"name\"] == guildName:\n\t\t\t\tguildId = guild[\"id\"]\n\n\t\treturn guildId\n\t\n\tdef getGuildChannelData(self, guildId):\n\t\turl = self.baseUrl + \"guilds/\" + guildId + \"/channels\"\n\t\tmethod = \"GET\"\n\t\tbody = \"\"\n\t\tresponse, content = httplib2.Http().request(url, method, body, self.headers)\n\t\tchannelData = json.loads(content)\n\t\treturn channelData\n\n\tdef getGuildChannelNames(self, guildName):\n\t\tguildId = self.getGuildIdFromName(guildName)\n\t\tchannelData = self.getGuildChannelData(guildId)\n\t\tmessage = \"Channels:\\n\"\n\t\tfor channel in channelData:\n\t\t\tmessage += channel[\"name\"] + \" Type: \" + channel[\"type\"] + \"\\n\"\n\t\treturn message\n\t\n\tdef getGuildChannelIdFromName(self, guildName, channelName):\n\t\tguildId = self.getGuildIdFromName(guildName)\n\t\tchannelData = self.getGuildChannelData(guildId)\n\t\tfor channel in channelData:\n\t\t\tif channel[\"name\"] == channelName:\n\t\t\t\tchannelId = channel[\"id\"]\n\t\treturn channelId\n\t\n\tdef sendMessage(self, guildName, channelName, enableTTS, message):\n\t\tchannelId = self.getGuildChannelIdFromName(guildName, channelName)\n\t\turl = self.baseUrl + \"channels/\" + channelId + \"/messages\"\n\t\tmethod = \"POST\"\n\t\tbody = json.dumps({\"content\": message, \"tts\": enableTTS})\n\t\tresponse, content = httplib2.Http().request(url, method, body, self.headers)\n\t\treturn \"Message \\\"\" + message + \"\\\" has been sent\"\n\t\n\tdef getMessages(self, guildName, channelName):\n\t\tchannelId = self.getGuildChannelIdFromName(guildName, channelName)\n\t\turl = self.baseUrl + \"channels/\" + channelId + \"/messages\"\n\t\tmethod = \"GET\"\n\t\tbody = \"\"\n\t\tresponse, content = httplib2.Http().request(url, method, body, self.headers)\n\t\tself.data = json.loads(content)\n\t\tmessage = \"Channel: \" + channelName + \" \\n\"\n\t\tfor line in reversed(self.data):\n\t\t\tmessage += line[\"content\"] + \"\\n\"\n\t\treturn message;\n\n\tdef getGatewayData(self):\n\t\turl = self.baseUrl + \"gateway/bot\";\n\t\tmethod = \"GET\";\n\t\tbody = \"\";\n\t\tresponse, content = httplib2.Http().request(url, method, body, self.headers);\n\t\tgatewayData = json.loads(content);\n\t\treturn gatewayData\n","sub_path":"DiscordRequestAPI/RestAPI.py","file_name":"RestAPI.py","file_ext":"py","file_size_in_byte":3073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"143298873","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\nimport gensim\nimport re\nfrom itertools import groupby\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n\n\nclass DataTransform(object):\n def __init__(self):\n \"\"\"\n Constructor\n \"\"\"\n self.__rx = re.compile(u'[\\d]+|/+|[_+#=%><№$€…«»|/*{}\\[\\]‘]|[(\\d)]|[\\[\\]]')\n\n def __del__(self):\n \"\"\"\n Destructor\n \"\"\"\n\n def punctuation_replacement(self, inline):\n line = gensim.utils.to_unicode(inline).lower()\n line = self.__rx.sub(u' ', line)\n return line.replace(u'\\n', u' ').replace(u'\\\\', u' ').split()\n\n @staticmethod\n def array2file(fname, corpus):\n out_file = open(fname, \"w\")\n for s in corpus:\n out_file.write(\" \".join(s).encode('utf-8'))\n out_file.write(\"\\n\")\n out_file.close()\n\n def file2vec(self, fname, start=0, end=0):\n sentences = []\n\n sentences = []\n assert (start >= 0)\n assert (end >= 0)\n if end == 0:\n with open(fname, \"r\") as in_file:\n for inline in in_file.readlines()[start:]:\n l = self.punctuation_replacement(inline)\n if len(l) > 0:\n sentences.append([el for el, _ in groupby(l)])\n else:\n with open(fname, \"r\") as in_file:\n for inline in in_file.readlines()[start:end]:\n l = self.punctuation_replacement(inline)\n if len(l) > 0:\n sentences.append([el for el, _ in groupby(l)])\n return sentences\n\n def str2vec(self, inline):\n sentences = []\n l = self.punctuation_replacement(inline)\n if len(l) > 0:\n sentences.append([el for el, _ in groupby(l)])\n return sentences\n\n @staticmethod\n def labelize_reviews(reviews, class_label, label_type):\n LabeledSentence = gensim.models.doc2vec.TaggedDocument\n labelized = []\n l_type = \"\"\n if len(class_label) > 0:\n for i, v in enumerate(reviews):\n if class_label[i] == 0:\n l_type = label_type + \"_NEGATIVE\"\n elif class_label[i] == 1:\n l_type = label_type + \"_POSITIVE\"\n label = '%s_%s' % (l_type, i)\n labelized.append(LabeledSentence(v, [label]))\n else:\n for i, v in enumerate(reviews):\n label = '%s_%s' % (label_type, i)\n labelized.append(LabeledSentence(v, [label]))\n return labelized\n\n @staticmethod\n def get_vecs(model, corpus, r_size):\n vecs = [np.array(model.docvecs[z.tags]).reshape((1, r_size)) for z in corpus]\n return np.concatenate(vecs)\n\n @staticmethod\n # Get vectors from our models\n def get_vecs_by_words(model, corpus, r_size):\n vecs = [np.array(model.infer_vector(z.words)).reshape((1, r_size)) for z in corpus]\n return np.concatenate(vecs)\n\n @staticmethod\n def split_learn_set(sentences_positive, sentences_negative, test_size=0.2):\n y = np.concatenate(\n (np.ones(len(sentences_positive), dtype=np.int), np.zeros(len(sentences_negative), dtype=np.int)))\n x_train, x_test, y_train, y_test = train_test_split(np.concatenate((sentences_positive, sentences_negative)), y,\n test_size=test_size)\n return x_train, x_test, y_train, y_test\n\n def create_learn_sets(self, sent_positive, sent_negative, sent_unsup, t_size=0.2):\n x_train, x_test, y_train, y_test = self.split_learn_set(sentences_positive=sent_positive,\n sentences_negative=sent_negative,\n test_size=t_size)\n x_train = self.labelize_reviews(x_train, y_train, \"TRAIN\")\n x_test = self.labelize_reviews(x_test, y_test, \"TEST\")\n x_unsup = self.labelize_reviews(sent_unsup, [], \"UNSUP\")\n return dict(x_train=x_train, x_test=x_test, y_train=y_train, y_test=y_test, x_unsup=x_unsup)\n","sub_path":"src/data_transform.py","file_name":"data_transform.py","file_ext":"py","file_size_in_byte":4157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"557147571","text":"import string\nfrom sqlalchemy import text\nfrom flaskApp import db\nfrom flaskApp.error.error_handlers import *\n\ndef _asdict(obj):\n result = dict()\n for key in obj.__mapper__.c.keys():\n result[key] = getattr(obj, key)\n return result\n\n'''DB querying helper methods to get data'''\ndef getProfInfo(courseID, semester):\n queryOne = \"select distinct C.CSID, C.CName, P.FName, P.LName, P.PEmail \\\n From Course as C, Teaches as T, Professor as P \\\n WHERE C.CNum = '%s' AND C.Semester = '%s' \\\n AND T.CSID = C.CSID AND T.ProfID = P.ProfID GROUP BY P.FName, P.LName\" % \\\n (courseID, semester)\n sql = text(queryOne)\n courseInfo = db.engine.execute(sql)\n cur = courseInfo.fetchall()\n print(cur)\n cID = str(cur[0][0])\n cName = str(cur[0][1])\n profName = str(cur[0][2]).lstrip() + \" \" + str(cur[0][3])\n return cID, cName, profName\n\ndef executeOfficeHoursQuery(helpType, courseID, semester):\n print(courseID)\n print(semester)\n queryTAs = \" select distinct CONCAT('TA', ' Office Hours') as Help, \\\n TH.DayTime as DayTime, L.Building, L.Room \\\n \tFROM Course as C, TA_OH as TH, Location as L \\\n \tWHERE C.CNum = '%s' AND C.Semester = '%s' AND TH.CSID = C.CSID \\\n \tAND TH.LocID = L.LocID\" % \\\n (courseID, semester)\n\n queryProf = \"select distinct 'Professor Office Hours' as Help, P.DayTime as DayTime, L.Building, L.Room \\\n\tFROM Course as C, Prof_OH as P, Location as L \\\n WHERE C.CNum = '%s' AND C.Semester = '%s' AND P.CSID = C.CSID AND P.LocID = L.LocID\" % \\\n (courseID, semester)\n\n queryHelp = \"select distinct He.HName as Help, HI.DayTime as DayTime, L.Building, L.Room \\\n\tFROM Course as C, Has_Help as HH, Help as He, Happens_In as HI, Location as L \\\n\tWHERE C.CNum = '%s' AND C.Semester = '%s' AND HH.CSID = C.CSID AND He.HelpID = HH.HelpID \\\n\tAND HI.HelpID = He.HelpID AND L.LocID = HI.LocID\" % \\\n (courseID, semester)\n\n queryTypes = {'prof' : queryProf, 'TA' : queryTAs, 'Other resource' : queryHelp}\n sql = text(queryTypes[helpType])\n results = db.engine.execute(sql)\n return results\n\ndef getcourses(courseID, semester):\n helpTypes = ['prof', 'TA', 'Other resource']\n result = []\n for type in helpTypes:\n times = executeOfficeHoursQuery(type, courseID, semester)\n for time in times:\n loc = str(time[2]) + \" \" + str(time[3])\n datetimes = time[1]\n datetimes = datetimes[:-1] #remove extra char from Scrapper\n datetimes = datetimes[:-1] #remove extra char from Scrapper\n\n resultElem = {'type' : time[0], 'times': datetimes, 'location' : loc}\n result.append(resultElem)\n return result\n\ndef getCourseIDSemester(param):\n query = \"select distinct C.CNum, C.Semester \\\n From Course as C \\\n WHERE C.CName = '%s'\" % \\\n (param)\n sql = text(query)\n cur = db.engine.execute(sql)\n resTuple = cur.fetchone()\n if resTuple is None:\n print(\"ERROR IN GET COURSE ID SEM\")\n raise NotFound\n print(resTuple)\n cNum = str(resTuple[0])\n Semester = str(resTuple[1])\n return cNum, Semester\n\ndef getOHData(param):\n print(param)\n param = param.lstrip().rstrip() #remove trailing and leading spaces\n print(param)\n if \"null\" in param:\n raise NotFound\n #cursor = db.get_db().cursor()\n if \"Spring\" in param:\n semester = \"Spring\" + (param.split(\"Spring\"))[1]\n courseID = (param.split(\"Spring\"))[0]\n elif \"Fall\" in param:\n semester = \"Fall\" + (param.split(\"Fall\"))[1]\n courseID = (param.split(\"Fall\"))[0]\n else:\n print(param)\n print(\"ERROR IN GET DATA OH\")\n raise NotFound\n print(semester)\n print(courseID)\n id, cName, prof = getProfInfo(courseID, semester)\n print(id)\n print(cName)\n print(prof)\n courses = getcourses( courseID, semester)\n result = {\"id\": id, \"name\" : cName, \"prof\" : prof, \"support\" : courses}\n #cursor.close()\n return result\n\n'''End of DB querying methods'''\n\n'''Below is some dummy data until we add the sql to\npull exam info from datapipeline db'''\ndef getExamData(courseID):\n print(\"hi\")\n\n #engine = db.create_engine('mysql://root:Qazsewq1!@localhost/Parsy')\n #select distinct E.CSID, E.Date, E.Time, E.Name from Exam_Data as E where E.CSID = 'EN.601.421'\n\n res = []\n query = \"select distinct E.Date, E.Time, E.Name \\\n FROM Exam_Data as E \\\n WHERE E.CSID = '%s'\" % \\\n (courseID)\n query_loc = \"select distinct LocID \\\n FROM Class_Times \\\n WHERE CSID = '%s'\" % \\\n (courseID)\n query_time = \"select distinct DayTime \\\n FROM Class_Times \\\n WHERE CSID = '%s'\" % \\\n (courseID)\n if \"Spring\" in courseID:\n semester = \"Spring\" + (courseID.split(\"Spring\"))[1]\n courseID = (courseID.split(\"Spring\"))[0]\n elif \"Fall\" in courseID:\n semester = \"Fall\" + (courseID.split(\"Fall\"))[1]\n courseID = (courseID.split(\"Fall\"))[0]\n else:\n print(courseID)\n print(\"ERROR IN GET DATA OH\")\n raise NotFound\n sql = text(query)\n sql_loc = text(query_loc)\n sql_time = text(query_time)\n results = db.engine.execute(sql)\n results_loc = db.engine.execute(sql_loc)\n results_time = db.engine.execute(sql_time)\n res_exam = []\n exam_location = \"\"\n exam_time = \"\"\n for loca in results_loc:\n exam_location = loca[0]\n for t in results_time:\n temp = t[0].index(' ')\n temp = t[0][temp:]\n if ', ' in temp:\n temp = temp[:temp.index(', ')]\n exam_time = temp\n Type = ''\n for exam in results:\n Date = exam[0].strip().capitalize()\n Time = exam[1].strip()\n if Time == '':\n Time = exam_time\n if exam[2].lower() != 'midterm' or exam[2].lower() != 'final':\n if exam[2].lower() == 'final exam':\n Type = 'Final'\n else:\n Type = 'Midterm'\n #Type = exam[2]\n Time = Time.replace(' - ', '-')\n print(\"Date: \", Date)\n print(\"Time: \", Time)\n print(\"Type: \", Type)\n resultExam = {\"type\": Type, \"datetime\": Date + Time, \"location\": exam_location}\n res_exam.append(resultExam)\n\n id, cName, prof = getProfInfo(courseID, semester)\n\n res = {\"id\": id, \"name\": cName, \"prof\": prof, \"exams\": res_exam}\n return res\n\n '''\n if courseID != \"EN.601.320Spring 2019\":\n return {\"exams\" : []}\n res = {\"id\":\"EN.601.320Spring 2019\",\n \"name\":\"Parallel Programming\",\n \"prof\":\"Randal Burns\",\n \"exams\":[\n {\\\n \"type\":\"Midterm\",\n \"datetime\":\"Monday March 11 1:00pm-2:00pm\",\n \"location\":\"Malone 274\"\n },\n {\\\n \"type\":\"Midterm\",\n \"datetime\":\"Thursday April 2 12:00pm-2:00pm\",\n \"location\":\"Malone 218\"\n },\n {\n \"type\":\"Final\",\n \"datetime\":\"Tuesday May 6 1:00pm-2:00pm\",\n \"location\":\"Hodson 235\"\n }]}'''\n return res\n\n\n'''Below is some dummy data until we add the sql to\npull exam info from datapipeline db'''\ndef getAssignmentData(courseID):\n res = []\n query = \"select distinct CSID, Date, Name \\\n FROM Assignment_Data \\\n WHERE CSID = '%s'\" % \\\n (courseID)\n query_loc = \"select distinct LocID \\\n FROM Class_Times \\\n WHERE CSID = '%s'\" % \\\n (courseID)\n query_time = \"select distinct DayTime \\\n FROM Class_Times \\\n WHERE CSID = '%s'\" % \\\n (courseID)\n if \"Spring\" in courseID:\n semester = \"Spring\" + (courseID.split(\"Spring\"))[1]\n courseID = (courseID.split(\"Spring\"))[0]\n elif \"Fall\" in courseID:\n semester = \"Fall\" + (courseID.split(\"Fall\"))[1]\n courseID = (courseID.split(\"Fall\"))[0]\n else:\n print(courseID)\n print(\"ERROR IN GET DATA OH\")\n raise NotFound\n sql = text(query)\n sql_loc = text(query_loc)\n sql_time = text(query_time)\n results = db.engine.execute(sql)\n results_loc = db.engine.execute(sql_loc)\n results_time = db.engine.execute(sql_time)\n res_hw = []\n hw_location = \"\"\n hw_time = \"\"\n for loca in results_loc:\n hw_location = loca[0]\n for t in results_time:\n temp = t[0].index(' ')\n temp = t[0][temp:]\n if ', ' in temp:\n temp = temp[:temp.index(', ')]\n hw_time = temp\n for hw in results:\n Date = hw[1].strip().capitalize()\n Name = hw[2].strip()\n Time = hw_time\n Time = Time.replace(' - ', '-')\n #Type = exam[2]\n print(\"Date: \", Date)\n print(\"Name: \", Name)\n resultHW = {\"type\": 'Homework', \"datetime\": Date + Time, \"location\": hw_location}\n res_hw.append(resultHW)\n\n id, cName, prof = getProfInfo(courseID, semester)\n\n res = {\"id\": id, \"name\": cName, \"prof\": prof, \"assignments\": res_hw}\n return res\n\n\n '''\n if courseID != \"EN.601.320Spring 2019\":\n return {\"assignments\" : []}\n res = {\"id\":\"EN.601.320Spring 2019\",\n \"name\":\"Parallel Programming\",\n \"prof\":\"Randal Burns\",\n \"assignments\":[\n {\\\n \"type\":\"Homework\",\n \"datetime\":\"March 12 1:00pm-2:00pm\",\n \"location\":\"Malone 274\"\n },\n {\\\n \"type\":\"Homework\",\n \"datetime\":\"April 3 12:00pm-2:00pm\",\n \"location\":\"Remsen 217\"\n },\n {\n \"type\":\"Homework\",\n \"datetime\":\"May 7 1:00pm-2:00pm\",\n \"location\":\"Hodson 237\"\n }]}\n return res'''\n\n'''Below is some dummy data until we add the sql to\npull class Meeting info from datapipeline db'''\ndef getClassMeetingData(courseID):\n res = []\n query = \"select distinct CSID, DayTime, LocID, Type \\\n FROM Class_Times \\\n WHERE CSID = '%s'\" % \\\n (courseID)\n if \"Spring\" in courseID:\n semester = \"Spring\" + (courseID.split(\"Spring\"))[1]\n courseID = (courseID.split(\"Spring\"))[0]\n elif \"Fall\" in courseID:\n semester = \"Fall\" + (courseID.split(\"Fall\"))[1]\n courseID = (courseID.split(\"Fall\"))[0]\n else:\n print(courseID)\n print(\"ERROR IN GET DATA OH\")\n raise NotFound\n sql = text(query)\n results = db.engine.execute(sql)\n res_class = []\n DOW = {'M ':'Monday ', 'T ':'Tuesday ', 'W ':'Wednesday ', 'Th ':'Thursday ', 'F ':'Friday ', 'Sa ':'Saturday ', 'S ':'Sunday '}\n for course in results:\n Times = []\n Times.append(course[1])\n Times_temp = []\n print(\"Times: \", Times)\n for time in Times:\n if ', ' in time:\n time = time.split(', ')\n for t in time:\n Times_temp.append(t)\n else:\n Times_temp.append(time)\n Times = Times_temp\n Times_temp = []\n print(\"after comma: \", Times)\n for time in Times:\n print('time in loop: ', time)\n if 'TTh ' in time:\n temp = time[4:]\n print('temp: ', temp)\n t = 'Tuesday ' + temp\n Times_temp.append(t)\n t = 'Thursday ' + temp\n Times_temp.append(t)\n print(\"ttemp: \", Times_temp)\n elif 'MWF ' in time:\n temp = time[4:]\n print('temp: ', temp)\n t = 'Monday ' + temp\n Times_temp.append(t)\n t = 'Wednesday ' + temp\n Times_temp.append(t)\n t = 'Friday ' + temp\n Times_temp.append(t)\n print(\"ttemp: \", Times_temp)\n elif 'MW ' in time:\n print('I am in MW')\n temp = time[3:]\n print('temp: ', temp)\n t = 'Monday ' + temp\n Times_temp.append(t)\n t = 'Wednesday ' + temp\n Times_temp.append(t)\n print(\"ttemp: \", Times_temp)\n else:\n Times_temp.append(time)\n print(\"Times_Temp: \", Times_temp)\n Times = Times_temp\n print(\"New Times: \", Times)\n LocID = course[2]\n Type = course[3]\n print(\"time: \", Times)\n print(\"Location: \", LocID)\n print(\"Type: \", Type)\n for time in Times:\n if time[:3] in DOW.keys():\n time = DOW[time[:3]] + time[3:]\n elif time[:2] in DOW.keys():\n time = DOW[time[:2]] + time[2:]\n if ' - ' in time:\n time = time.replace(' - ', '-')\n resultClass = {\"type\": Type, \"times\": time, \"location\": LocID}\n res_class.append(resultClass)\n\n id, cName, prof = getProfInfo(courseID, semester)\n\n res = {\"id\": id, \"name\": cName, \"prof\": prof, \"meetings\": res_class}\n return res\n\n '''\n if courseID != \"EN.601.320Spring 2019\":\n return {\"meetings\" : []}\n res = {\"id\":\"EN.601.320Spring 2019\",\n \"name\":\"Parallel Programming\",\n \"prof\":\"Randal Burns\",\n \"meetings\":[\n {\\\n \"type\":\"Lecture\",\n \"times\":\"Monday 3:00pm-5:00pm\",\n \"location\":\"Remsen 104\"\n },\n {\\\n \"type\":\"Section\",\n \"times\":\"Thursday 12:00pm-2:00pm\",\n \"location\":\"Malone 218\"\n }]}\n return res'''\n","sub_path":"parsy-backend/flaskApp/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":13160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"506527645","text":"import random\nstart = input('請輸入開始值')\nend = input('請輸入結束值')\nstart = int(start)\nend = int(end)\nr = random.randint(start, end)\ncount = 0\nwhile True:\n\tnum = input('請猜數字')\n\tnum = int(num)\n\tcount += 1\n\tif num == r:\n\t\tprint('你猜對了!')\n\t\tprint('這是你猜的第', count, '次') \n\t\tbreak\n\telif num > r:\n\t\tprint('太大了')\n\telif num < r:\n\t\tprint('太小了')\n\tprint('這是你猜的第', count, '次') ","sub_path":"guess_num.py","file_name":"guess_num.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"629971430","text":"# A modification version from chainercv repository.\n# (See https://github.com/chainer/chainercv/blob/master/chainercv/evaluations/eval_detection_voc.py)\nfrom __future__ import division\n\nimport os\nfrom collections import defaultdict\nimport numpy as np\nimport torch\nfrom PIL import Image\nimport cv2 as cv\nimport json\nfrom maskrcnn_benchmark.structures.bounding_box import BoxList\nfrom maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou\nfrom maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker\nimport pycocotools.mask as mask_util\nfrom deeplab.utils.eval import Eval\n\ndef do_isic_evaluation(dataset, predictions, grounds, output_folder, logger, meters):\n masker = Masker(threshold=0.5, padding=1)\n evaluator = Eval(2) # 2 = num_classes\n iou = []\n pred_list = []\n for image_id, prediction in enumerate(predictions):\n img_info = dataset.get_img_info(image_id)\n image_width = img_info[\"width\"]\n image_height = img_info[\"height\"]\n name = prediction[1]\n prediction = prediction[0].resize((image_width, image_height))\n masks = prediction.get_field(\"mask\")\n masks = masker(masks.expand(1, -1, -1, -1, -1), prediction)\n mask = masks[0].squeeze()\n # ground = grounds[image_id].resize((image_width, image_height))\n store_result = True\n if len(prediction) == 0:\n print('img ' + str(image_id) + ' senza bbox')\n pred_list.append(list([0, 0, 0, 0]))\n iou.append(0)\n continue\n scores = prediction.get_field(\"scores\")\n scores_ord, scores_ind = scores.squeeze().sort(descending=True)\n # iou_value, bbox_index = boxlist_iou(prediction, ground).squeeze().sort(descending=True)\n # if len(iou_value.shape)> 0:\n # iou.append(iou_value[0])\n # else:\n # iou.append(iou_value)\n if len(scores_ind.shape) > 0:\n pred_list.append(list(prediction.bbox[scores_ind[0]].long().tolist()))\n mask_image = np.array(mask[scores_ind[0]])\n else:\n pred_list.append(list(prediction.bbox[scores_ind].long().tolist()))\n mask_image = np.array(mask)\n # gt_image = np.array(dataset.get_gt_image(image_id))\n # if len(scores_ind.shape) > 0:\n # mask_image = np.array(mask[scores_ind[0]])\n # else:\n # mask_image = np.array(mask)\n # gt_image[gt_image == 255] = 1\n # evaluator.add_batch(gt_image, mask_image)\n if store_result:\n # res = overlay_boxes(gt_image.copy(), pred_boxlists[i].bbox[bbox_index[0]])\n # res = Image.fromarray(res)\n # res.save('box_img_' + str(i) + '.png')\n mask_image[mask_image==1] = 255\n segm = Image.fromarray(mask_image)\n segm.save('/homes/my_d/my_d/img_train_2018_5200/ISIC_{}.png'.format(name))\n\n # gr = Image.fromarray(gt_image)\n # gr.save('ground_'+ str(i) + '.png')\n\n # MIoU_segm = evaluator.Mean_Intersection_over_Union()\n # print(\"Miou segmentation:\" + str(MIoU_segm))\n with open(os.path.join(output_folder, \"predictions_train_2018_5200.json\"), \"w\") as fid:\n json.dump(pred_list, fid)\n\n result_str = 'mIOU: ' + str(np.nanmean(iou)) + ' - '\n result_str += 'IOU: ' + str(iou)\n\n logger.info(result_str)\n if output_folder:\n with open(os.path.join(output_folder, \"result.txt\"), \"a\") as fid:\n fid.write(\"\\n\" + result_str)\n\n# def do_isic_evaluation(dataset, predictions, grounds, output_folder, logger):\n# pred_boxlists = []\n# gt_boxlists = []\n# masker = Masker(threshold=0.5, padding=1)\n# evaluator = Eval(2) #2 = num_classes\n# # assert isinstance(dataset, COCODataset)\n# pred_masklist = []\n# for image_id, prediction in enumerate(predictions):\n# img_info = dataset.get_img_info(image_id)\n# image_width = img_info[\"width\"]\n# image_height = img_info[\"height\"]\n# prediction = prediction.resize((image_width, image_height))\n# pred_boxlists.append(prediction)\n#\n# masks = prediction.get_field(\"mask\")\n# # if list(masks.shape[-2:]) != [image_height, image_width]:\n# masks = masker(masks.expand(1, -1, -1, -1, -1), prediction)\n# pred_masklist.append(masks[0].squeeze())\n#\n# ground = grounds[image_id].resize((image_width, image_height))\n# # gt_boxlist = dataset.get_groundtruth(image_id)\n# gt_boxlists.append(ground)\n#\n# result = eval_detection_isic(\n# pred_boxlists=pred_boxlists,\n# pred_masklist=pred_masklist,\n# gt_boxlists=gt_boxlists,\n# dataset=dataset,\n# eval=evaluator,\n# iou_thresh=0.5,\n# use_07_metric=True,\n# output_folder=output_folder\n# )\n# result_str = 'mIOU: ' + str(result[\"miou\"]) + ' - '\n# result_str += 'IOU: ' + str(result[\"iou\"])\n# # for i, ap in enumerate(result[\"ap\"]):\n# # if i == 0: # skip background\n# # continue\n# # result_str += \"{:<16}: {:.4f}\\n\".format(\n# # dataset.map_class_id_to_class_name(i), ap\n# # )\n# logger.info(result_str)\n# if output_folder:\n# with open(os.path.join(output_folder, \"result.txt\"), \"a\") as fid:\n# fid.write(\"\\n\" + result_str)\n# return result\n\n# def eval_detection_isic(pred_boxlists, pred_masklist, gt_boxlists, dataset, eval, output_folder, iou_thresh=0.5, use_07_metric=False):\n# \"\"\"Evaluate on voc dataset.\n# Args:\n# pred_boxlists(list[BoxList]): pred boxlist, has labels and scores fields.\n# gt_boxlists(list[BoxList]): ground truth boxlist, has labels field.\n# iou_thresh: iou thresh\n# use_07_metric: boolean\n# Returns:\n# dict represents the results\n# \"\"\"\n# assert len(gt_boxlists) == len(\n# pred_boxlists\n# ), \"Length of gt and pred lists need to be same.\"\n#\n# iou = []\n# pred_list = []\n#\n# store_result = True\n# for i in range(dataset.get_len_grnds()):\n# if len(pred_boxlists[i]) == 0:\n# print('img ' + str(i) + ' senza bbox')\n# pred_list.append(list([0, 0, 0, 0]))\n# iou.append(0)\n# continue\n# iou_value, bbox_index = boxlist_iou(pred_boxlists[i], gt_boxlists[i]).squeeze().sort(descending=True)\n# if len(iou_value.shape)> 0:\n# iou.append(iou_value[0])\n# else:\n# iou.append(iou_value)\n# pred_list.append(list(pred_boxlists[i].bbox[bbox_index[0]].long().tolist()))\n#\n# gt_image = np.array(dataset.get_gt_image(i))\n# if len(bbox_index.shape) > 0:\n# mask_image = np.array(pred_masklist[i][bbox_index[0]])\n# else:\n# mask_image = np.array(pred_masklist[i])\n# gt_image[gt_image == 255] = 1\n# eval.add_batch(gt_image, mask_image)\n#\n# if store_result:\n# # res = overlay_boxes(gt_image.copy(), pred_boxlists[i].bbox[bbox_index[0]])\n# # res = Image.fromarray(res)\n# # res.save('box_img_' + str(i) + '.png')\n# mask_image[mask_image==1] = 255\n# segm = Image.fromarray(mask_image)\n# segm.save('{}-out_top-epoch-{}.png'.format(i, 1))\n#\n# # gr = Image.fromarray(gt_image)\n# # gr.save('ground_'+ str(i) + '.png')\n#\n# MIoU_segm = eval.Mean_Intersection_over_Union()\n#\n# print(\"Miou segmentation:\" + str(MIoU_segm))\n# with open(os.path.join(output_folder, \"predictions_eval_with_segm.json\"), \"w\") as fid:\n# json.dump(pred_list, fid)\n#\n# return {\"iou\": iou, \"miou\": np.nanmean(iou)}\n\ndef bb_intersection_over_union(boxA, boxB):\n # determine the (x, y)-coordinates of the intersection rectangle\n xA = max(int(boxA[0]), int(boxB[0]))\n yA = max(int(boxA[1]), int(boxB[1]))\n xB = min(int(boxA[2]), int(boxB[2]))\n yB = min(int(boxA[3]), int(boxB[3]))\n\n # compute the area of intersection rectangle\n interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)\n\n # compute the area of both the prediction and ground-truth\n # rectangles\n boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)\n boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)\n\n # compute the intersection over union by taking the intersection\n # area and dividing it by the sum of prediction + ground-truth\n # areas - the interesection area\n iou = interArea / float(boxAArea + boxBArea - interArea)\n\n # return the intersection over union value\n return iou\n\ndef select_top_prediction(predictions):\n \"\"\"\n Select only predictions which have a `score` > self.confidence_threshold,\n and returns the predictions in descending order of score\n\n Arguments:\n predictions (BoxList): the result of the computation by the model.\n It should contain the field `scores`.\n\n Returns:\n prediction (BoxList): the detected objects. Additional information\n of the detection properties can be found in the fields of\n the BoxList via `prediction.fields()`\n \"\"\"\n # scores = predictions.get_field(\"scores\")\n # keep = torch.nonzero(scores > 0.7).squeeze(1)\n # predictions = predictions[keep]\n\n scores = predictions.get_field(\"scores\")\n _, idx = scores.sort(0, descending=True)\n return predictions[idx[[0]]]\n\ndef compute_colors_for_labels(labels):\n \"\"\"\n Simple function that adds fixed colors depending on the class\n \"\"\"\n palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])\n colors = labels[:, None] * palette\n colors = (colors % 255).numpy().astype(\"uint8\")\n return colors\n\ndef overlay_boxes(image, predictions):\n \"\"\"\n Adds the predicted boxes on top of the image\n\n Arguments:\n image (np.ndarray): an image as returned by OpenCV\n predictions (BoxList): the result of the computation by the model.\n It should contain the field `labels`.\n \"\"\"\n # labels = predictions.get_field(\"labels\")\n # boxes = predictions.bbox\n\n # colors = compute_colors_for_labels(labels).tolist()\n\n # for box in zip(boxes):\n # box = box.to(torch.int64)\n # top_left, bottom_right = box[:2].tolist(), box[2:].tolist()\n # cv.rectangle(\n # image, tuple(top_left), tuple(bottom_right), (255,255,255), 3\n # )\n left = predictions[0].long().numpy()\n top = predictions[1].long().numpy()\n right = predictions[2].long().numpy()\n bottom = predictions[3].long().numpy()\n cv.rectangle(\n image, (left, top), (right, bottom), (255,255,255), 3\n )\n return image\n\ndef overlay_mask(image, prediction):\n \"\"\"\n Adds the instances contours for each predicted object.\n Each label has a different color.\n Arguments:\n image (np.ndarray): an image as returned by OpenCV\n predictions (BoxList): the result of the computation by the model.\n It should contain the field `mask` and `labels`.\n \"\"\"\n # mask = prediction.get_field(\"mask\").numpy()\n # labels = predictions.get_field(\"labels\")\n\n # colors = compute_colors_for_labels(1).tolist()\n\n # for mask, color in zip(masks, colors):\n # thresh = prediction[0, :, :, None]\n prediction = prediction.squeeze().numpy().astype('uint8')\n _, contours, hierarchy = cv.findContours(\n prediction, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE\n )\n image = cv.drawContours(image, contours, -1, (255,255,255), 3)\n\n return image\n\ndef calc_detection_isic_prec_rec(gt_boxlists, pred_boxlists, iou_thresh=0.5):\n \"\"\"Calculate precision and recall based on evaluation code of PASCAL VOC.\n This function calculates precision and recall of\n predicted bounding boxes obtained from a dataset which has :math:`N`\n images.\n The code is based on the evaluation code used in PASCAL VOC Challenge.\n \"\"\"\n n_pos = defaultdict(int)\n score = defaultdict(list)\n match = defaultdict(list)\n for gt_boxlist, pred_boxlist in zip(gt_boxlists, pred_boxlists):\n pred_bbox = pred_boxlist.bbox.numpy()\n pred_label = pred_boxlist.get_field(\"labels\").numpy()\n pred_score = pred_boxlist.get_field(\"scores\").numpy()\n gt_bbox = gt_boxlist.bbox.numpy()\n gt_label = gt_boxlist.get_field(\"labels\").numpy()\n gt_difficult = gt_boxlist.get_field(\"difficult\").numpy()\n\n for l in np.unique(np.concatenate((pred_label, gt_label)).astype(int)):\n pred_mask_l = pred_label == l\n pred_bbox_l = pred_bbox[pred_mask_l]\n pred_score_l = pred_score[pred_mask_l]\n # sort by score\n order = pred_score_l.argsort()[::-1]\n pred_bbox_l = pred_bbox_l[order]\n pred_score_l = pred_score_l[order]\n\n gt_mask_l = gt_label == l\n gt_bbox_l = gt_bbox[gt_mask_l]\n gt_difficult_l = gt_difficult[gt_mask_l]\n\n n_pos[l] += np.logical_not(gt_difficult_l).sum()\n score[l].extend(pred_score_l)\n\n if len(pred_bbox_l) == 0:\n continue\n if len(gt_bbox_l) == 0:\n match[l].extend((0,) * pred_bbox_l.shape[0])\n continue\n\n # VOC evaluation follows integer typed bounding boxes.\n pred_bbox_l = pred_bbox_l.copy()\n pred_bbox_l[:, 2:] += 1\n gt_bbox_l = gt_bbox_l.copy()\n gt_bbox_l[:, 2:] += 1\n iou = boxlist_iou(\n BoxList(pred_bbox_l, gt_boxlist.size),\n BoxList(gt_bbox_l, gt_boxlist.size),\n ).numpy()\n gt_index = iou.argmax(axis=1)\n # set -1 if there is no matching ground truth\n gt_index[iou.max(axis=1) < iou_thresh] = -1\n del iou\n\n selec = np.zeros(gt_bbox_l.shape[0], dtype=bool)\n for gt_idx in gt_index:\n if gt_idx >= 0:\n if gt_difficult_l[gt_idx]:\n match[l].append(-1)\n else:\n if not selec[gt_idx]:\n match[l].append(1)\n else:\n match[l].append(0)\n selec[gt_idx] = True\n else:\n match[l].append(0)\n\n n_fg_class = max(n_pos.keys()) + 1\n prec = [None] * n_fg_class\n rec = [None] * n_fg_class\n\n for l in n_pos.keys():\n score_l = np.array(score[l])\n match_l = np.array(match[l], dtype=np.int8)\n\n order = score_l.argsort()[::-1]\n match_l = match_l[order]\n\n tp = np.cumsum(match_l == 1)\n fp = np.cumsum(match_l == 0)\n\n # If an element of fp + tp is 0,\n # the corresponding element of prec[l] is nan.\n prec[l] = tp / (fp + tp)\n # If n_pos[l] is 0, rec[l] is None.\n if n_pos[l] > 0:\n rec[l] = tp / n_pos[l]\n\n return prec, rec\n\n\ndef calc_detection_isic_ap(prec, rec, use_07_metric=False):\n \"\"\"Calculate average precisions based on evaluation code of PASCAL VOC.\n This function calculates average precisions\n from given precisions and recalls.\n The code is based on the evaluation code used in PASCAL VOC Challenge.\n Args:\n prec (list of numpy.array): A list of arrays.\n :obj:`prec[l]` indicates precision for class :math:`l`.\n If :obj:`prec[l]` is :obj:`None`, this function returns\n :obj:`numpy.nan` for class :math:`l`.\n rec (list of numpy.array): A list of arrays.\n :obj:`rec[l]` indicates recall for class :math:`l`.\n If :obj:`rec[l]` is :obj:`None`, this function returns\n :obj:`numpy.nan` for class :math:`l`.\n use_07_metric (bool): Whether to use PASCAL VOC 2007 evaluation metric\n for calculating average precision. The default value is\n :obj:`False`.\n Returns:\n ~numpy.ndarray:\n This function returns an array of average precisions.\n The :math:`l`-th value corresponds to the average precision\n for class :math:`l`. If :obj:`prec[l]` or :obj:`rec[l]` is\n :obj:`None`, the corresponding value is set to :obj:`numpy.nan`.\n \"\"\"\n\n n_fg_class = len(prec)\n ap = np.empty(n_fg_class)\n for l in range(n_fg_class):\n if prec[l] is None or rec[l] is None:\n ap[l] = np.nan\n continue\n\n if use_07_metric:\n # 11 point metric\n ap[l] = 0\n for t in np.arange(0.0, 1.1, 0.1):\n if np.sum(rec[l] >= t) == 0:\n p = 0\n else:\n p = np.max(np.nan_to_num(prec[l])[rec[l] >= t])\n ap[l] += p / 11\n else:\n # correct AP calculation\n # first append sentinel values at the end\n mpre = np.concatenate(([0], np.nan_to_num(prec[l]), [0]))\n mrec = np.concatenate(([0], rec[l], [1]))\n\n mpre = np.maximum.accumulate(mpre[::-1])[::-1]\n\n # to calculate area under PR curve, look for points\n # where X axis (recall) changes value\n i = np.where(mrec[1:] != mrec[:-1])[0]\n\n # and sum (\\Delta recall) * prec\n ap[l] = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n\n return ap","sub_path":"pipeline/ISIC_MaskR-CNN/maskrcnn_benchmark/data/datasets/evaluation/isic/isic_eval_noground.py","file_name":"isic_eval_noground.py","file_ext":"py","file_size_in_byte":17219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"134218224","text":"# https://leetcode.com/problems/permutations/\n\n\nclass Solution(object):\n def permute(self, nums):\n if not nums:\n return []\n\n results = {(nums[0],)}\n\n for num in nums[1:]:\n next_results = set()\n for perm in results:\n for i in range(len(perm) + 1):\n next_results.add(perm[:i] + (num,) + perm[i:])\n\n results = next_results\n\n return [list(i) for i in results]\n\nif __name__ == '__main__':\n print(Solution().permute([1, 2, 3]))\n","sub_path":"leecode/0014_permutations.py","file_name":"0014_permutations.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"6868481","text":"import psycopg2\nimport common.globals as g\n\nclass profile_37530_quota_closed_and_balance_transferred_event(object):\n\tdef import_xml(self, app, update_type, oMessage, transaction_id, message_id):\n\t\tg.app.message_count += 1\n\t\toperation_date\t\t\t\t= app.getDatestamp()\n\t\tquota_definition_sid\t\t= app.getNumberValue(oMessage, \".//oub:quota.definition.sid\", True)\n\t\toccurrence_timestamp\t\t= app.getValue(oMessage, \".//oub:occurrence.timestamp\", True)\n\t\tclosing_date\t\t\t\t= app.getDateValue(oMessage, \".//oub:closing.date\", True)\n\t\ttransferred_amount\t\t\t= app.getNumberValue(oMessage, \".//oub:transferred.amount\", True)\n\t\ttarget_quota_definition_sid\t= app.getNumberValue(oMessage, \".//oub:target.quota.definition.sid\", True)\n\n\n\t\tif update_type == \"1\":\t# Update\n\t\t\toperation = \"U\"\n\t\t\tapp.doprint (\"Updating quota closed and balance transferred event for quota definition \" + str(quota_definition_sid))\n\t\telif update_type == \"2\":\t# DELETE\n\t\t\toperation = \"D\"\n\t\t\tapp.doprint (\"Deleting quota closed and balance transferred event for quota definition \" + str(quota_definition_sid))\n\t\telse:\t\t\t\t\t# INSERT\n\t\t\toperation = \"C\"\n\t\t\tapp.doprint (\"Creating quota closed and balance transferred event for quota definition \" + str(quota_definition_sid))\n\n\t\tcur = app.conn.cursor()\n\t\ttry:\n\t\t\tcur.execute(\"\"\"INSERT INTO quota_unblocking_events_oplog (\n\t\t\tquota_definition_sid, occurrence_timestamp, closing_date, transferred_amount, target_quota_definition_sid, operation, operation_date)\n\t\t\tVALUES (%s, %s, %s, %s, %s, %s, %s)\"\"\", \n\t\t\t(quota_definition_sid, occurrence_timestamp, closing_date, transferred_amount, target_quota_definition_sid, operation, operation_date))\n\t\t\tapp.conn.commit()\n\t\texcept:\n\t\t\tg.app.log_error(\"quota closed and balance transferred event\", operation, quota_definition_sid, None, transaction_id, message_id)\n\t\tcur.close()\n","sub_path":"create-data/convert_and_import_taric/profile/profile_37530_quota_closed_and_balance_transferred_event.py","file_name":"profile_37530_quota_closed_and_balance_transferred_event.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"552277065","text":"#!/usr/bin/python3\nimport os\nimport subprocess\nfrom subprocess import Popen\nimport utils\ncmd = \"echo `xclip -o`\"\ncmd = Popen(cmd, shell=True, stdout=subprocess.PIPE)\nprint (cmd)\n#cmd_out = str(cmd.stdout.read())\nbar = str(cmd.stdout.read())\nbar = os.path.expanduser(bar[2:-3])\nprint (bar)\nexit()\nif os.path.isfile(bar):\n print(os.path.dirname(bar))\nelse:\n if os.path.exists(bar):\n print (bar)\n else:\n bar = os.path.dirname(bar)\n print (bar)\n","sub_path":"bin/other.py","file_name":"other.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"643895621","text":"#!/usr/bin/env python3\nimport rospy\nfrom gazebo_msgs.msg import ModelStates\nimport math\n#import gym\nimport numpy as np\nimport tensorflow as tf\n# from ddpg import *\nfrom mddpg.magent import *\n# from tworobot_environment import Env\n# from tworobot_environment_getobjects import Env\nfrom multirobot_environment import Env\nfrom pathlib import Path\nimport argparse\n\nimport os\n# os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" # see issue #152\n# os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"1\"\n\nexploration_decay_start_step = 50000\nstate_dim = 16\naction_dim = 2\naction_linear_max = 0.25 # m/s\naction_angular_max = 0.5 # rad/s\n\ndef write_to_csv(item, file_name):\n with open(file_name, 'a') as f:\n f.write(\"%s\\n\" % item)\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--train', type=int, default=0, help='1 for training and 0 for testing')\n parser.add_argument('--env_id', type=int, default=2, help='env name')\n parser.add_argument('--sac', type=int, default=0, help='1 for using sac')\n parser.add_argument('--visual_obs', type=int, default=0, help='1 for using image at robot observation')\n parser.add_argument('--test_env_id', type=int, default=2, help='test environment id')\n parser.add_argument('--n_scan', type=int, default=10, help='num of scan sampled from full scan')\n\n args = parser.parse_args()\n return args\n\ndef main():\n rospy.init_node('baseline')\n\n # get arg\n args = parse_args()\n is_training = bool(args.train)\n env_name = 'env' + str(args.env_id)\n trained_models_dir = './src/trained_models/bl-' + env_name + '-models/' if not args.visual_obs else \\\n './src/trained_models/vis_obs-' + env_name + '-models/'\n\n # env = Env(is_training, args.env_id, args.test_env_id, args.visual_obs, args.n_scan)\n env = Env(is_training, args.env_id, args.test_env_id, 2, args.visual_obs, args.n_scan)\n \n # agent = DDPG(env, state_dim, action_dim, trained_models_dir)\n lr_actor = 1e-4\n lr_critic = 1e-4\n lr_decay = .95\n replay_buff_size = 10000\n gamma = .99\n batch_size = 128\n random_seed = 42\n soft_update_tau = 1e-3\n\n # 2 agents\n agent = MADDPG(state_dim, action_dim, lr_actor, lr_critic, lr_decay, replay_buff_size, gamma, batch_size, random_seed, soft_update_tau)\n\n past_action = np.array([[0., 0.], [0., 0.]])\n print('State Dimensions: ' + str(state_dim))\n print('Action Dimensions: ' + str(action_dim))\n print('Action Max: ' + str(action_linear_max) + ' m/s and ' + str(action_angular_max) + ' rad/s')\n\n if is_training:\n print('Training mode')\n # path things\n figures_path = './figures/bl-' + env_name + '/' if not args.visual_obs else \\\n './figures/vis_obs-' + env_name + '/'\n print(figures_path)\n Path(trained_models_dir + 'actor').mkdir(parents=True, exist_ok=True)\n Path(trained_models_dir + 'critic').mkdir(parents=True, exist_ok=True)\n Path(figures_path).mkdir(parents=True, exist_ok=True)\n\n avg_reward_his = []\n threshold_init = 20\n total_rewards = []\n avg_scores = []\n max_avg_score = -1\n max_score = -1\n var = 1.\n ep_rets = []\n ep_ret = 0.\n \n while True:\n states = env.reset()\n one_round_step = 0\n scores = np.zeros(2) \n while True:\n a = agent.act(states)\n a[0][0] = np.clip(np.random.normal(a[0][0], var), 0., 1.)\n a[0][1] = np.clip(np.random.normal(a[0][1], var), -0.5, 0.5)\n a[1][0] = np.clip(np.random.normal(a[1][0], var), 0., 1.)\n a[1][1] = np.clip(np.random.normal(a[1][1], var), -0.5, 0.5)\n\n state_s, r, dones, arrives = env.step([a[0], a[1]], [past_action[0], past_action[1]])\n\n time_step = agent.update(states, a, r, state_s, dones)\n\n if arrives:\n result = 'Success'\n else:\n result = 'Fail'\n\n \n # if time_step > 0:\n # total_reward += r\n # ep_ret += r\n # print(\"Timestep: \",time_step)\n # if time_step % 10000 == 0 and time_step > 0:\n # print('---------------------------------------------------')\n # avg_reward = total_reward / 10000\n # print('Average_reward = ', avg_reward)\n # avg_reward_his.append(round(avg_reward, 2))\n # print('Average Reward:',avg_reward_his)\n # total_reward = 0\n # print('Mean episode return over training time step: {:.2f}'.format(np.mean(ep_rets)))\n # print('Mean episode return over current 10k training time step: {:.2f}'.format(np.mean(ep_rets[-10:])))\n # write_to_csv(np.mean(ep_rets), figures_path + 'mean_ep_ret_his.csv')\n # write_to_csv(np.mean(ep_rets[-10:]), figures_path + 'mean_ep_ret_10k_his.csv')\n # write_to_csv(avg_reward, figures_path + 'avg_reward_his.csv')\n # print('---------------------------------------------------')\n\n # if time_step % 5 == 0 and time_step > exploration_decay_start_step:\n # var *= 0.9999\n\n scores += np.array(r)\n past_action = a\n states = state_s\n one_round_step += 1\n\n # if arrive_s:\n # print('Step: %3i' % one_round_step, '| Var: %.2f' % var, '| Time step: %i' % time_step, '|', result)\n # one_round_step = 0\n # if time_step > 0:\n # ep_rets.append(ep_ret)\n # ep_ret = 0.\n\n # if done_s or one_round_step >= 500:\n # print('Step: %3i' % one_round_step, '| Var: %.2f' % var, '| Time step: %i' % time_step, '|', result)\n # if time_step > 0:\n # ep_rets.append(ep_ret)\n # ep_ret = 0.\n # break\n if (dones[0] == 1 and dones[1] == 1) or (arrives[0] == 1 and arrives[1] == 1) or one_round_step >= 500:\n break\n \n episode_score = np.max(scores)\n total_rewards.append(episode_score)\n print(\"Score: {:.4f}\".format(episode_score))\n\n if max_score <= episode_score: \n max_score = episode_score\n agent.save('./tworobot_weights.pth')\n\n if len(total_rewards) >= 100: # record avg score for the latest 100 steps\n latest_avg_score = sum(total_rewards[(len(total_rewards)-100):]) / 100\n print(\"100 Episodic Everage Score: {:.4f}\".format(latest_avg_score))\n avg_scores.append(latest_avg_score)\n \n # if max_avg_score <= latest_avg_score: # record better results\n # worsen_tolerance = threshold_init # re-count tolerance\n # max_avg_score = latest_avg_score\n # else: \n # if max_avg_score > 0.5: \n # worsen_tolerance -= 1 # count worsening counts\n # print(\"Loaded from last best model.\")\n # agent.load(best_model_path) # continue from last best-model\n # if worsen_tolerance <= 0: # earliy stop training\n # print(\"Early Stop Training.\")\n # break\n\n else:\n print('Testing mode')\n total_return = 0.\n total_step = 0\n total_path_len = 0.\n arrive_cnt = 0\n robot_name='turtlebot3_burger_1'\n # robot_name = 'robot1'\n while True:\n state = env.reset()\n \n one_round_step = 0\n\n data = None\n while data is None:\n try:\n data = rospy.wait_for_message('gazebo/model_states', ModelStates, timeout=5)\n except:\n pass\n\n robot_cur_state = data.pose[data.name.index(robot_name)].position\n \n while True:\n a = agent.action(state)\n a[0] = np.clip(a[0], 0., 1.)\n a[1] = np.clip(a[1], -0.5, 0.5)\n state_, r, done, arrive = env.step(a, past_action)\n total_return += r\n past_action = a\n state = state_\n one_round_step += 1\n total_step += 1\n\n data = None\n while data is None:\n try:\n data = rospy.wait_for_message('gazebo/model_states', ModelStates, timeout=5)\n except:\n pass\n\n robot_next_state = data.pose[data.name.index(robot_name)].position\n dist = math.hypot(\n robot_cur_state.x - robot_next_state.x,\n robot_cur_state.y - robot_next_state.y\n )\n total_path_len += dist\n robot_cur_state = robot_next_state\n\n if arrive:\n arrive_cnt += 1\n print('Step: %3i' % one_round_step, '| Arrive!!!')\n one_round_step = 0\n if env.test_goals_id >= len(env.test_goals):\n print('Finished, total return: ', total_return)\n print('Total step: ', total_step)\n print('Total path length: ', total_path_len)\n print('Success rate: ', arrive_cnt / len(env.test_goals))\n exit(0)\n\n if done:\n print('Step: %3i' % one_round_step, '| Collision!!!')\n if env.test_goals_id >= len(env.test_goals):\n print('Finished, total return: ', total_return)\n print('Total step: ', total_step)\n print('Total path length: ', total_path_len)\n print('Success rate: ', arrive_cnt / len(env.test_goals))\n exit(0)\n break\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/tworobot_main.py","file_name":"tworobot_main.py","file_ext":"py","file_size_in_byte":10452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"128345516","text":"\"\"\"\nModule provides different loss functions for calculating the dissimilarities between labels.\n\"\"\"\nfrom typing import Callable\n\nimport tensorflow as tf\n\nEPS = tf.keras.backend.epsilon()\n\n\ndef get_dissimilarity_fn(config: dict) -> Callable:\n \"\"\"\n Parse arguments from a configuration dictionary\n and return the loss by averaging batch loss returned by\n multi- or single-scale loss functions.\n\n :param config: dict, containing configuration for training.\n :return: loss function, which returns a tensor of shape (batch, )\n \"\"\"\n if config[\"name\"] == \"multi_scale\":\n\n def loss(y_true, y_pred):\n return multi_scale_loss(\n y_true=y_true, y_pred=y_pred, **config[\"multi_scale\"]\n )\n\n return loss\n elif config[\"name\"] == \"single_scale\":\n\n def loss(y_true, y_pred):\n return single_scale_loss(\n y_true=y_true, y_pred=y_pred, **config[\"single_scale\"]\n )\n\n return loss\n else:\n raise ValueError(f\"Unknown loss type {config['name']}.\")\n\n\ndef multi_scale_loss(\n y_true: tf.Tensor, y_pred: tf.Tensor, loss_type: str, loss_scales: list\n) -> tf.Tensor:\n \"\"\"\n Apply the loss at different scales (gaussian smoothing).\n It is assumed that loss values are between 0 and 1.\n\n :param y_true: tensor, shape = (batch, dim1, dim2, dim3)\n :param y_pred: tensor, shape = (batch, dim1, dim2, dim3)\n :param loss_type: string, indicating which loss to pass to function single_scale_loss.\n\n Supported:\n\n - cross-entropy\n - mean-squared\n - dice\n - dice_generalized\n - jaccard\n\n :param loss_scales: list, values of sigma to pass to func\n gauss_kernel_1d.\n :return: (batch,)\n \"\"\"\n assert len(y_true.shape) == 4\n assert len(y_pred.shape) == 4\n label_loss_all = tf.stack(\n [\n single_scale_loss(\n y_true=separable_filter3d(y_true, gauss_kernel1d(s)),\n y_pred=separable_filter3d(y_pred, gauss_kernel1d(s)),\n loss_type=loss_type,\n )\n for s in loss_scales\n ],\n axis=1,\n )\n return tf.reduce_mean(label_loss_all, axis=1)\n\n\ndef single_scale_loss(\n y_true: tf.Tensor, y_pred: tf.Tensor, loss_type: str\n) -> tf.Tensor:\n \"\"\"\n Calculate the loss on two tensors based on defined\n loss.\n\n :param y_true: tensor, shape = (batch, dim1, dim2, dim3)\n :param y_pred: tensor, shape = (batch, dim1, dim2, dim3)\n :param loss_type: string, indicating which loss to pass to\n function single_scale_loss.\n\n Supported:\n\n - cross-entropy\n - mean-squared\n - dice\n - dice_generalized\n - jaccard\n\n :return: shape = (batch,)\n \"\"\"\n if loss_type == \"cross-entropy\":\n return weighted_binary_cross_entropy(y_true, y_pred)\n elif loss_type == \"mean-squared\":\n return squared_error(y_true, y_pred)\n elif loss_type == \"dice\":\n return 1 - dice_score(y_true, y_pred)\n elif loss_type == \"dice_generalized\":\n return 1 - dice_score_generalized(y_true, y_pred)\n elif loss_type == \"jaccard\":\n return 1 - jaccard_index(y_true, y_pred)\n else:\n raise ValueError(\"Unknown loss type.\")\n\n\ndef squared_error(y_true: tf.Tensor, y_pred: tf.Tensor) -> tf.Tensor:\n \"\"\"\n Calculates the mean squared difference between y_true, y_pred.\n\n mean((y_true - y_pred)(y_true - y_pred))\n\n :param y_true: tensor, shape = (batch, dim1, dim2, dim3)\n :param y_pred: shape = (batch, dim1, dim2, dim3)\n :return: shape = (batch,)\n \"\"\"\n return tf.reduce_mean(tf.math.squared_difference(y_true, y_pred), axis=[1, 2, 3])\n\n\ndef weighted_binary_cross_entropy(\n y_true: tf.Tensor, y_pred: tf.Tensor, pos_weight: float = 1\n) -> tf.Tensor:\n \"\"\"\n Calculates weighted binary cross- entropy:\n\n -loss = − pos_w * y_true log(y_pred) - (1−y_true) log(1−y_pred)\n\n :param y_true: shape = (batch, dim1, dim2, dim3)\n :param y_pred: shape = (batch, dim1, dim2, dim3)\n :param pos_weight: weight of positive class, scalar. Default value is 1\n :return: shape = (batch,)\n \"\"\"\n y_pred = tf.clip_by_value(y_pred, 0, 1)\n loss_pos = tf.reduce_mean(y_true * tf.math.log(y_pred + EPS), axis=[1, 2, 3])\n loss_neg = tf.reduce_mean(\n (1 - y_true) * tf.math.log(1 - y_pred + EPS), axis=[1, 2, 3]\n )\n return -pos_weight * loss_pos - loss_neg\n\n\ndef dice_score(y_true: tf.Tensor, y_pred: tf.Tensor, binary: bool = False) -> tf.Tensor:\n \"\"\"\n Calculates dice score:\n\n 1. num = 2 * y_true * y_pred\n 2. denom = y_true + y_pred\n 3. dice score = num / denom\n\n where num and denom are summed over the entire image first.\n\n :param y_true: shape = (batch, dim1, dim2, dim3)\n :param y_pred: shape = (batch, dim1, dim2, dim3)\n :param binary: True if the y should be projected to 0 or 1\n :return: shape = (batch,)\n \"\"\"\n if binary:\n y_true = tf.cast(y_true >= 0.5, dtype=tf.float32)\n y_pred = tf.cast(y_pred >= 0.5, dtype=tf.float32)\n numerator = tf.reduce_sum(y_true * y_pred, axis=[1, 2, 3]) * 2\n denominator = tf.reduce_sum(y_true, axis=[1, 2, 3]) + tf.reduce_sum(\n y_pred, axis=[1, 2, 3]\n )\n return (numerator + EPS) / (denominator + EPS)\n\n\ndef dice_score_generalized(\n y_true: tf.Tensor, y_pred: tf.Tensor, pos_weight: float = 1, neg_weight: float = 0\n) -> tf.Tensor:\n \"\"\"\n Calculates weighted dice score:\n\n 1. let y_prod = y_true * y_pred and y_sum = y_true + y_pred\n 2. num = 2 * (pos_w * y_true * y_pred + neg_w * (1−y_true) * (1−y_pred))\n\n = 2 * ((pos_w+neg_w) * y_prod - neg_w * y_sum + neg_w)\n 3. denom = (pos_w * (y_true + y_pred) + neg_w * (1−y_true + 1−y_pred))\n\n = (pos_w-neg_w) * y_sum + 2 * neg_w\n 4. dice score = num / denom\n\n where num and denom are summed over the entire image first.\n\n :param y_true: shape = (batch, dim1, dim2, dim3)\n :param y_pred: shape = (batch, dim1, dim2, dim3)\n :param pos_weight: weight of positive class, default = 1\n :param neg_weight: weight of negative class, default = 0\n :return: shape = (batch,)\n \"\"\"\n y_prod = tf.reduce_sum(y_true * y_pred, axis=[1, 2, 3])\n y_sum = tf.reduce_sum(y_true, axis=[1, 2, 3]) + tf.reduce_sum(\n y_pred, axis=[1, 2, 3]\n )\n\n numerator = 2 * (\n (pos_weight + neg_weight) * y_prod - neg_weight * y_sum + neg_weight\n )\n denominator = (pos_weight - neg_weight) * y_sum + 2 * neg_weight\n return (numerator + EPS) / (denominator + EPS)\n\n\ndef jaccard_index(y_true: tf.Tensor, y_pred: tf.Tensor) -> tf.Tensor:\n \"\"\"\n Calculates jaccard index:\n\n 1. num = y_true * y_pred\n 2. denom = y_true + y_pred - y_true * y_pred\n 3. jaccard index = num / denom\n\n :param y_true: shape = (batch, dim1, dim2, dim3)\n :param y_pred: shape = (batch, dim1, dim2, dim3)\n :return: shape = (batch,)\n \"\"\"\n numerator = tf.reduce_sum(y_true * y_pred, axis=[1, 2, 3])\n denominator = (\n tf.reduce_sum(y_true, axis=[1, 2, 3])\n + tf.reduce_sum(y_pred, axis=[1, 2, 3])\n - numerator\n )\n return (numerator + EPS) / (denominator + EPS)\n\n\ndef gauss_kernel1d(sigma: int) -> tf.Tensor:\n \"\"\"\n Calculates a gaussian kernel.\n\n :param sigma: number defining standard deviation for\n gaussian kernel.\n :return: shape = (dim, ) or ()\n \"\"\"\n if sigma == 0:\n return tf.constant(0, tf.float32)\n else:\n tail = int(sigma * 3)\n k = tf.exp([-0.5 * x ** 2 / sigma ** 2 for x in range(-tail, tail + 1)])\n return k / tf.reduce_sum(k)\n\n\ndef cauchy_kernel1d(sigma: int) -> tf.Tensor:\n \"\"\"\n Approximating cauchy kernel in 1d.\n\n :param sigma: int, defining standard deviation of kernel.\n :return: shape = (dim, ) or ()\n \"\"\"\n if sigma == 0:\n return tf.constant(0, tf.float32)\n else:\n tail = int(sigma * 5)\n k = tf.math.reciprocal([((x / sigma) ** 2 + 1) for x in range(-tail, tail + 1)])\n return k / tf.reduce_sum(k)\n\n\ndef separable_filter3d(tensor: tf.Tensor, kernel: tf.Tensor) -> tf.Tensor:\n \"\"\"\n Creates a 3d separable filter.\n\n Here `tf.nn.conv3d` accepts the `filters` argument of shape\n (filter_depth, filter_height, filter_width, in_channels, out_channels),\n where the first axis of `filters` is the depth not batch,\n and the input to `tf.nn.conv3d` is of shape\n (batch, in_depth, in_height, in_width, in_channels).\n\n :param tensor: shape = (batch, dim1, dim2, dim3)\n :param kernel: shape = (dim4,)\n :return: shape = (batch, dim1, dim2, dim3)\n \"\"\"\n if len(kernel.shape) == 0:\n return tensor\n else:\n strides = [1, 1, 1, 1, 1]\n tensor = tf.nn.conv3d(\n tf.nn.conv3d(\n tf.nn.conv3d(\n tf.expand_dims(tensor, axis=4),\n filters=tf.reshape(kernel, [-1, 1, 1, 1, 1]),\n strides=strides,\n padding=\"SAME\",\n ),\n filters=tf.reshape(kernel, [1, -1, 1, 1, 1]),\n strides=strides,\n padding=\"SAME\",\n ),\n filters=tf.reshape(kernel, [1, 1, -1, 1, 1]),\n strides=strides,\n padding=\"SAME\",\n )\n return tensor[:, :, :, :, 0]\n\n\ndef compute_centroid(mask: tf.Tensor, grid: tf.Tensor) -> tf.Tensor:\n \"\"\"\n Calculate the centroid of the mask.\n\n :param mask: shape = (batch, dim1, dim2, dim3)\n :param grid: shape = (dim1, dim2, dim3, 3)\n :return: shape = (batch, 3), batch of vectors denoting\n location of centroids.\n \"\"\"\n assert len(mask.shape) == 4\n assert len(grid.shape) == 4\n bool_mask = tf.expand_dims(\n tf.cast(mask >= 0.5, dtype=tf.float32), axis=4\n ) # (batch, dim1, dim2, dim3, 1)\n masked_grid = bool_mask * tf.expand_dims(\n grid, axis=0\n ) # (batch, dim1, dim2, dim3, 3)\n numerator = tf.reduce_sum(masked_grid, axis=[1, 2, 3]) # (batch, 3)\n denominator = tf.reduce_sum(bool_mask, axis=[1, 2, 3]) # (batch, 1)\n return (numerator + EPS) / (denominator + EPS) # (batch, 3)\n\n\ndef compute_centroid_distance(\n y_true: tf.Tensor, y_pred: tf.Tensor, grid: tf.Tensor\n) -> tf.Tensor:\n \"\"\"\n Calculate the L2-distance between two tensors' centroids.\n\n :param y_true: tensor, shape = (batch, dim1, dim2, dim3)\n :param y_pred: tensor, shape = (batch, dim1, dim2, dim3)\n :param grid: tensor, shape = (dim1, dim2, dim3, 3)\n :return: shape = (batch,)\n \"\"\"\n centroid_1 = compute_centroid(mask=y_pred, grid=grid) # (batch, 3)\n centroid_2 = compute_centroid(mask=y_true, grid=grid) # (batch, 3)\n return tf.sqrt(tf.reduce_sum((centroid_1 - centroid_2) ** 2, axis=1))\n\n\ndef foreground_proportion(y: tf.Tensor) -> tf.Tensor:\n \"\"\"\n Calculating the percentage of foreground vs\n background per 3d volume.\n\n :param y: shape = (batch, dim1, dim2, dim3), a 3D label tensor\n :return: shape = (batch,)\n \"\"\"\n y = tf.cast(y >= 0.5, dtype=tf.float32)\n return tf.reduce_sum(y, axis=[1, 2, 3]) / tf.reduce_sum(\n tf.ones_like(y), axis=[1, 2, 3]\n )\n","sub_path":"deepreg/model/loss/label.py","file_name":"label.py","file_ext":"py","file_size_in_byte":11157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"416389710","text":"##############################################################################\n#\n# Copyright (c) 2001, 2002 Zope Corporation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"\n$Id$\n\"\"\"\n\nimport unittest\nfrom zope.component import getGlobalServices\nfrom zope.app.component.hooks import getServices_hook\nfrom zope.app.component.localservice import serviceServiceAdapter\nfrom zope.app.site.interfaces import IPossibleSite, ISite, ISiteManager\nfrom zope.app.traversing.interfaces import IContainmentRoot\nfrom zope.component.exceptions import ComponentLookupError\nfrom zope.component.interfaces import IServiceService\nfrom zope.component.service import serviceManager\nfrom zope.interface import implements, directlyProvides, directlyProvidedBy\nfrom zope.interface.verify import verifyObject\nfrom zope.app.tests.setup import placelessSetUp, placelessTearDown\nfrom zope.app.tests import ztapi\nfrom zope.app.component.hooks import setSite, getSite\n\nclass ServiceManager:\n implements(ISiteManager)\n\n def __init__(self):\n self.dummy_service = object()\n\n def getService(self, name):\n if name == 'dummy':\n return self.dummy_service\n raise ComponentLookupError(name)\n\nclass Folder:\n implements(IPossibleSite)\n\n sm = None\n\n def getSiteManager(self, default=None):\n return self.sm\n\n def setSiteManager(self, sm):\n self.sm = sm\n sm.__parent__ = self\n directlyProvides(self, ISite, directlyProvidedBy(self))\n\nclass Package:\n pass\n\nclass Root(Folder):\n implements(IContainmentRoot, ISite)\n def getSiteManager(self):\n return getGlobalServices()\n\nclass ServiceServiceStub:\n implements(IServiceService)\n\n\ndef Wrapper(ob, container):\n ob.__parent__ = container\n return ob\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n unittest.TestCase.setUp(self)\n placelessSetUp()\n root = Root()\n\n f1 = Wrapper(Folder(), root)\n sm1 = ServiceManager()\n f1.setSiteManager(sm1)\n p1 = Wrapper(Package(), sm1)\n\n f2 = Wrapper(Folder(), f1)\n sm2 = ServiceManager()\n f2.setSiteManager(sm2)\n p2 = Wrapper(Package(), sm2)\n\n sm1.next = serviceManager\n sm2.next = sm1\n\n self.root = root\n self.f1 = f1\n self.f2 = f2\n self.sm1 = sm1\n self.sm2 = sm2\n self.p1 = p1\n self.p2 = p2\n self.unparented_folder = Folder()\n self.unrooted_subfolder = Wrapper(Folder(), self.unparented_folder)\n\n ztapi.provideAdapter(None, IServiceService, serviceServiceAdapter)\n\n def tearDown(self):\n setSite()\n placelessTearDown()\n\n def test_getServices(self):\n self.assertEqual(getServices_hook(None), serviceManager)\n self.assertEqual(getServices_hook(self.root), serviceManager)\n self.assertEqual(getServices_hook(self.f1), self.sm1)\n self.assertEqual(getServices_hook(self.f2), self.sm2)\n setSite(self.f2)\n self.assertEqual(getServices_hook(None), self.sm2)\n\n def test_queryNextService(self):\n from zope.app.component.localservice import queryNextService\n self.assert_(queryNextService(self.sm2, 'dummy') is\n self.sm1.dummy_service)\n self.assert_(queryNextService(self.p2, 'dummy') is\n self.sm1.dummy_service)\n marker = object()\n self.assert_(queryNextService(self.p1, 'dummy', marker) is marker)\n\n def test_getNextService(self):\n from zope.app.component.localservice import getNextService\n self.assert_(getNextService(self.sm2, 'dummy') is\n self.sm1.dummy_service)\n self.assert_(getNextService(self.p2, 'dummy') is\n self.sm1.dummy_service)\n self.assertRaises(ComponentLookupError,\n getNextService, self.p1, 'dummy')\n\n\n def test_queryNextServices(self):\n from zope.app.component.localservice import queryNextServices\n marker = object()\n self.assert_(queryNextServices(self.root, marker) is marker)\n self.assert_(queryNextServices(self.f1, marker) is marker)\n self.assert_(queryNextServices(self.f2, marker) is marker)\n self.assertEqual(queryNextServices(self.sm1), serviceManager)\n self.assertEqual(queryNextServices(self.sm2), self.sm1)\n self.assertEqual(queryNextServices(self.p1), serviceManager)\n self.assertEqual(queryNextServices(self.p2), self.sm1)\n\n self.assert_(queryNextServices(self.unparented_folder, marker)\n is marker)\n self.assert_(queryNextServices(self.unrooted_subfolder, marker)\n is marker)\n\n def test_getNextServices(self):\n from zope.app.component.localservice import getNextServices\n self.assertRaises(ComponentLookupError,\n getNextServices, self.root)\n self.assertRaises(ComponentLookupError,\n getNextServices, self.f1)\n self.assertRaises(ComponentLookupError,\n getNextServices, self.f2)\n self.assertEqual(getNextServices(self.sm1), serviceManager)\n self.assertEqual(getNextServices(self.sm2), self.sm1)\n self.assertEqual(getNextServices(self.p1), serviceManager)\n self.assertEqual(getNextServices(self.p2), self.sm1)\n\n self.assertRaises(ComponentLookupError,\n getNextServices, self.unparented_folder)\n self.assertRaises(ComponentLookupError,\n getNextServices, self.unrooted_subfolder)\n\n def test_getNextServices_security(self):\n from zope.app.component.localservice import getNextServices\n from zope.security.checker import ProxyFactory, NamesChecker\n sm = ProxyFactory(self.sm1, NamesChecker(('next',)))\n # Check that serviceManager is not proxied\n self.assert_(getNextServices(sm) is serviceManager)\n\n def test_queryLocalServices(self):\n from zope.app.component.localservice import queryLocalServices\n marker = object()\n self.assert_(queryLocalServices(self.root, marker) is marker)\n self.assert_(queryLocalServices(self.f1, marker) is marker)\n self.assert_(queryLocalServices(self.f2, marker) is marker)\n self.assertEqual(queryLocalServices(self.sm1), self.sm1)\n self.assertEqual(queryLocalServices(self.sm2), self.sm2)\n self.assertEqual(queryLocalServices(self.p1), self.sm1)\n self.assertEqual(queryLocalServices(self.p2), self.sm2)\n\n self.assert_(queryLocalServices(self.unparented_folder, marker)\n is marker)\n self.assert_(queryLocalServices(self.unrooted_subfolder, marker)\n is marker)\n\n def test_getLocalServices(self):\n from zope.app.component.localservice import getLocalServices\n self.assertRaises(ComponentLookupError,\n getLocalServices, self.root)\n self.assertRaises(ComponentLookupError,\n getLocalServices, self.f1)\n self.assertRaises(ComponentLookupError,\n getLocalServices, self.f2)\n self.assertEqual(getLocalServices(self.sm1), self.sm1)\n self.assertEqual(getLocalServices(self.sm2), self.sm2)\n self.assertEqual(getLocalServices(self.p1), self.sm1)\n self.assertEqual(getLocalServices(self.p2), self.sm2)\n\n unparented_folder = Folder()\n self.assertRaises(ComponentLookupError,\n getLocalServices, unparented_folder)\n unrooted_subfolder = Wrapper(Folder(), unparented_folder)\n self.assertRaises(ComponentLookupError,\n getLocalServices, unrooted_subfolder)\n\n def test_serviceServiceAdapter(self):\n from zope.app.component.localservice import serviceServiceAdapter\n\n # If it is a site, return the service service.\n ss = ServiceServiceStub()\n site = Folder()\n site.setSiteManager(ss)\n self.assertEqual(serviceServiceAdapter(site), ss)\n\n # If it is locatable (has __parent__), \"acquire\" the site\n # and return the service service\n ob = Folder()\n ob.__parent__ = site\n self.assertEqual(serviceServiceAdapter(ob), ss)\n ob2 = Folder()\n ob2.__parent__ = ob\n self.assertEqual(serviceServiceAdapter(ob2), ss)\n\n # If it does we are unable to find a service service, raise\n # ComponentLookupError\n orphan = Folder()\n self.assertRaises(ComponentLookupError, serviceServiceAdapter, orphan)\n\n def test_setThreadSite_clearThreadSite(self):\n from zope.app.component.localservice import threadSiteSubscriber\n from zope.app.component.localservice import clearSite\n from zope.app.publication.zopepublication import BeforeTraverseEvent\n\n self.assertEqual(getSite(), None)\n\n # A non-site is traversed\n ob = object()\n request = object()\n ev = BeforeTraverseEvent(ob, request)\n threadSiteSubscriber(ev)\n\n self.assertEqual(getSite(), None)\n\n # A site is traversed\n ss = ServiceServiceStub()\n site = Folder()\n site.setSiteManager(ss)\n\n ev = BeforeTraverseEvent(site, request)\n threadSiteSubscriber(ev)\n\n self.assertEqual(getSite(), site)\n\n clearSite()\n\n self.assertEqual(getSite(), None)\n\n\ndef test_suite():\n return unittest.makeSuite(Test)\n\nif __name__ == '__main__':\n unittest.main(defaultTest='test_suite')\n","sub_path":"Zope3/tags/ZopeX3-3.0.0b1/src/zope/app/component/tests/test_localservice.py","file_name":"test_localservice.py","file_ext":"py","file_size_in_byte":9989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"169603256","text":"#%%\nimport numpy as np\nfrom agents.agent import DDPG\nfrom task import Task\nfrom helpers import Params, run_training\n\n\n## Modify the values below to give the quadcopter a different starting position.\nfile_output = 'data.txt' # file name for saved results\n\nbuffer_size = 100000\nbatch_size = 64\n\nnum_episodes = 1000 # 1000\n\nprint('\\n\\nStart training...')\ntarget_pos = np.array([ 0.0, 0.0, 10.0])\ninit_pose = np.array([ 0.0, 0.0, 10.0, 0.0, 0.0, 0.0])\ninit_velocities = np.array([ 0.0, 0.0, 0.0])\n\n\nparams = Params()\nparams.extra_text = 'speed_reward_multip__concatenate'\nparams.exploration_mu = 0\nparams.exploration_theta = 0.15\nparams.exploration_sigma = 0.02 #0.002\nparams.actor_learning_rate = 1.0e-5 # 0.0001\nparams.critic_learning_rate = 0.001 # 0.001\nparams.tau = 0.001\nparams.actor_net_cells = [32, 32]\nparams.critic_net_cells = [32, 64]\nparams.gamma = 0.99\n\n# test_values = [1.0e-3, 1.0e-4, 1.0e-5,1.0e-6, 1.0e-7] # actor_learning_rate\n# test_values = [1.0e-2, 1.0e-3, 1.0e-4,1.0e-5] # critic_learning_rate\n# test_values = [0.9, 0.99] # gamma\ntest_values = [0.2, 0.02, 0.002, 0.0002] # exploration_sigma\n# test_values = [0.1, 0.01, 0.001, 0.0001] # tau\n# test_values = [0.9, 0.99] # gamma\n# Think how to do the networks batch.\n\nfor test_value in test_values:\n params.exploration_sigma = test_value\n\n task = Task(init_pose = init_pose,\n init_velocities = init_velocities,\n target_pos = target_pos)\n agent = DDPG(task,\n params,\n buffer_size = buffer_size,\n batch_size = batch_size\n )\n\n run_training(agent, task, params, num_episodes, file_output)","sub_path":"batch_test_SPECTRE.py","file_name":"batch_test_SPECTRE.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"316042089","text":"# =============================================================================\r\n# Boston Scientific Confidential\r\n# Copyright 2008-2010 by Boston Scientific or its affiliates. \r\n# All rights reserved.\r\n#\r\n# $Archive: CRM:eid=370487:/APM/System/dvt/Tools/fortytwo/sandbox/assets/views/workorder.py $\r\n# $Revision: 11779/35 $\r\n# $Author: randy kleinman; g043746 $\r\n# $Modtime: 2013-09-20T12:04:56-0500 $\r\n# =============================================================================\r\n__version__ = \"$Revision: 11779/35 $\"\r\n\r\nimport os, random, logging, pprint, datetime, urllib, json\r\n\r\nfrom django.utils.simplejson import JSONEncoder, loads\r\n\r\nfrom assets.models.testplan import CommandLineArgs\r\nfrom assets.models import WorkOrder, Batch\r\nfrom assets.utils import JSONHttpResponse\r\nfrom settings import LOG_PARSE_BASE_PATH\r\n\r\nfrom dolphace.models import ControllerInfo\r\n\r\nfrom parseus.logParser.dbInterface import DBInterfaceWrapper\r\nfrom parseus.logParser.logParser import LogParser\r\nfrom parseus.logParser._util import convertTimeToString\r\n\r\nlogger = logging.getLogger(__name__) \r\n\r\n#----------------------------------------------------------------------\r\ndef submitWorkOrder(controller, service, w, **kwargs):\r\n \"\"\"\r\n Submits the given workorder object to the Dolphin web service.\r\n Returns the workorder number or response from the server\r\n \"\"\"\r\n workOrder = w.getDolphinWorkOrder(controller, **kwargs)\r\n logger.info('Workorder text follows %s' % pprint.pformat(workOrder))\r\n resp = controller.restRequest(\"PUT\", service.service_url, workOrder)\r\n\r\n # Dolphin returns a dictionary if there was a problem\r\n if type(resp) == str:\r\n result = eval(resp)\r\n\r\n elif type(resp) == int:\r\n result = {'WorkOrderId': resp}\r\n #result = {'WorkOrderId': random.randint()} # debug\r\n \r\n logger.info('Received workorder id=%s for fortyTwo Planned Execution: %s' % (result['WorkOrderId'], w.testplan) )\r\n return result, workOrder\r\n\r\ndef processDolphinWorkorderRunning(request):\r\n \"\"\"\r\n When a workorder is moved from the queue to the test station it is scheduled\r\n on, the test station sends this message to tell the submitter where it is \r\n running and what the workorder text was.\r\n \"\"\"\r\n post = loads(request.raw_post_data)\r\n woid = post.get('workorder')\r\n station = urllib.unquote(post.get('station', None))\r\n wotext = post.get('workordercontent')\r\n cluster = post.get('cluster')\r\n \r\n logger.info('Received status running message from the regression system for workorder: %s, cluster %s' % (woid, cluster))\r\n logger.info('Workorder %s is running on test station: %s' %(woid, station))\r\n\r\n # First, find the workorder, if fortyTwo knows about it. The calling \r\n # function already checked for existence of this workorder\r\n controller = ControllerInfo.objects.get(cluster_name=cluster)\r\n wo = WorkOrder.objects.get(workorder_id=woid, testplan__team=controller.team, testplan__release=controller.release)\r\n\r\n wo.result_test_starttime = datetime.datetime.now()\r\n wo.result_test_station = station\r\n wo.workorder_text = wotext\r\n wo.complete = False\r\n wo.state = WorkOrder.STATE_RUNNING\r\n wo.save()\r\n\r\n # Update the batch\r\n # If we have already cancelled it and a workorder got through in the \r\n # meantime, don't change\r\n if wo.batch.run_state != Batch.STATE_COMPLETE_CANCEL: \r\n wo.batch.run_state = Batch.STATE_IN_PROGRESS\r\n wo.batch.save()\r\n return JSONHttpResponse({'status': True, 'msg': 'Successfully received workorder %s running notice. So long and thanks for all the fish.' % woid })\r\n\r\n\r\ndef processDolphinWorkorderCompleteWithError(request):\r\n \"\"\"\r\n When a workorder is completed but generates no log file, Dolphin will send \r\n the workorder id, test station run on, and state as a JSON POST. This method\r\n grabs that data and handles the updates to the workorder.\r\n \"\"\"\r\n # Dolphin sends just json in the raw post, so decode it here\r\n post = loads(request.raw_post_data)\r\n woid = post.get('workorder')\r\n station = urllib.unquote(post.get('station', None))\r\n cluster = post.get('cluster')\r\n\r\n logger.info('Received status completion error message from the regression system for workorder: %s, cluster %s' % (woid, cluster))\r\n\r\n # First, find the workorder, if fortyTwo knows about it, the calling \r\n # function already checked for existence of this workorder\r\n controller = ControllerInfo.objects.get(cluster_name=cluster)\r\n wo = WorkOrder.objects.get(workorder_id=woid, testplan__team=controller.team, testplan__release=controller.release)\r\n\r\n # Dolphin sends this message when test not able to generate a log file \r\n logger.info(\"Workorder %s did not generate a log file for the regression system to report, or for fortyTwo to parse.\" % woid)\r\n\r\n wo.complete = True\r\n wo.result_passed = False\r\n wo.result_log_location = None\r\n wo.result_test_runtime = None\r\n wo.result_test_station = station\r\n if wo.state != WorkOrder.STATE_CANCELLED:\r\n wo.state = WorkOrder.STATE_COMPLETE\r\n wo.save()\r\n\r\n # Update the batch\r\n if wo.batch.run_state != Batch.STATE_COMPLETE_CANCEL: \r\n updateBatchOnWorkorderComplete(wo)\r\n return JSONHttpResponse({'status': True, 'msg': 'Successfully received workorder %s complete error notice. So long and thanks for all the fish.' % woid })\r\n\r\n\r\ndef processDolphinWorkorderRetry(request):\r\n \"\"\"\r\n When a workorder is sent back to the queue by the regression system, Dolphin\r\n will send the workorder id\r\n \"\"\"\r\n post = loads(request.raw_post_data)\r\n woid = post.get('workorder')\r\n cluster = post.get('cluster')\r\n logger.info('Received status retry message from the regression system for workorder: %s, cluster %s' % (woid, cluster))\r\n\r\n # First, find the workorder, if fortyTwo knows about it, the calling \r\n # function already checked for existence of this workorder\r\n controller = ControllerInfo.objects.get(cluster_name=cluster)\r\n wo = WorkOrder.objects.get(workorder_id=woid, testplan__team=controller.team, testplan__release=controller.release)\r\n\r\n if wo.state != WorkOrder.STATE_CANCELLED:\r\n wo.state = WorkOrder.STATE_RETRY\r\n wo.result_test_starttime = None\r\n wo.result_test_station = None\r\n wo.save()\r\n logger.info(\"STATE of workorder id=%s (%s) is %s\" %(wo.id, woid, wo.get_state_display())) \r\n\r\n return JSONHttpResponse({'status': True, 'msg': 'Successfully received workorder %s retry notice. So long and thanks for all the fish.' % woid })\r\n\r\n\r\ndef processDolphinWorkorderComplete(request):\r\n \"\"\"\r\n When a workorder is completed, Dolphin will send the filename, workorder id,\r\n and contents of the test log file as a JSON POST. This method grabs that \r\n data and handles the updates to the workorder and saves the log file content.\r\n \"\"\"\r\n\r\n # Dolphin sends just json in the raw post, so decode it here\r\n post = loads(request.raw_post_data)\r\n woid = post.get('workorder')\r\n log_file = urllib.unquote(post.get('logname'))\r\n raw_log = urllib.unquote_plus(post.get('log'))\r\n station = urllib.unquote(post.get('station', None))\r\n cluster = post.get('cluster')\r\n logger.info('Received status completion message from the regression system for workorder: %s, cluster: %s' % (woid, cluster))\r\n\r\n # First, find the workorder, if fortyTwo knows about it, the calling \r\n # function already checked for existence of this workorder\r\n controller = ControllerInfo.objects.get(cluster_name=cluster)\r\n wo = WorkOrder.objects.get(workorder_id=woid, testplan__team=controller.team, testplan__release=controller.release)\r\n \r\n logger.info('Retrieved workorder id = %s (fortytwo id: %s)' % (woid, wo.id)) \r\n\r\n # Save the log file contents to a file on the system\r\n # Create the log directory if necessary\r\n basePath = os.path.join(LOG_PARSE_BASE_PATH, wo.testplan.team.team_name, wo.testplan.release.release_name)\r\n if not os.path.exists(basePath):\r\n os.makedirs(basePath)\r\n\r\n # Write the file content to the workorder log location and update the workorder\r\n fpath = os.path.join(basePath, log_file)\r\n try:\r\n f = open(fpath,'w')\r\n f.write(raw_log)\r\n\r\n except Exception as ee:\r\n logger.error('fortyTwo was not able to save the log content for workorder %s. Error as follows: %s' % (wo.workorder_id, ee.message))\r\n raise\r\n\r\n finally:\r\n f.close()\r\n logger.info('Saved the log content for workorder id = %s (fortytwo id: %s) to %s' % (woid, wo.id, fpath)) \r\n\r\n wo.complete = True\r\n wo.result_log_location = os.sep.join([wo.testplan.team.team_name, wo.testplan.release.release_name, log_file])\r\n wo.result_test_station = station\r\n if wo.state != WorkOrder.STATE_CANCELLED:\r\n wo.state = WorkOrder.STATE_COMPLETE\r\n wo.save()\r\n\r\n # Parse the log to get some results and update the workorder model\r\n try:\r\n logger.info('Attempting to parse the log content for workorder id = %s (fortytwo id: %s)' % (woid, wo.id)) \r\n parser = LogParser(fpath)\r\n parser.parseInput()\r\n logger.info('Successfully parsed the log content for workorder id = %s (fortytwo id: %s)' % (woid, wo.id)) \r\n\r\n # Incomplete log parsing -> parse error, parse error is denoted by a complete\r\n # workorder, with false results, and null runtime\r\n if len(parser.results._dict['incomplete']) > 0:\r\n logger.info('Log content is determined to be an incomplete log for workorder id = %s (fortytwo id: %s)' % (woid, wo.id)) \r\n wo.result_passed = False\r\n wo.result_test_runtime = None\r\n\r\n # Store the results. By fiat, workorders only have one case, so take the first one\r\n else:\r\n logger.info('Log content was successfully parsed for workorder id = %s (fortytwo id: %s)' % (woid, wo.id)) \r\n wo.result_passed = parser.results.cases[0].passed\r\n wo.result_test_runtime = convertTimeToString(parser.results.cases[0].elapsedTime.rpartition('.')[0])\r\n wo.result_test_starttime = parser.results.cases[0].startTime\r\n wo.passfailerror = \",\".join([str(parser.results.cases[0].verificationsPassed),\r\n str(parser.results.cases[0].verificationFailures),\r\n str(parser.results.cases[0].systemErrors)])\r\n wo.exception_json = json.dumps(parser.results.cases[0].exceptionInfo)\r\n wo.traceback = parser.results.cases[0].traceback\r\n\r\n # Put the log into the database if the batch is setup to do so\r\n # Only parse passing logs\r\n if wo.state != WorkOrder.STATE_CANCELLED and ((wo.batch.auto_log_parse == Batch.AUTOPARSE_PASS_ONLY and wo.result_passed is True) or wo.batch.auto_log_parse == Batch.AUTOPARSE_ALL):\r\n\r\n logger.info('Attempting to write the log content to the database for workorder id = %s (fortytwo id: %s)' % (woid, wo.id)) \r\n db = DBInterfaceWrapper(parser.results, wo.batch.team, wo.batch.release)\r\n db.writeLogsToDB()\r\n\r\n if parser.results._dict['matched']:\r\n logger.info('Log content autoparsing has matched the log content to the database for workorder id = %s (fortytwo id: %s)' % (woid, wo.id)) \r\n wo.testlog = wo.testplan.latesttestlog_set.get().testlog\r\n wo.save()\r\n logger.info('Log content saved to the database for workorder id = %s (fortytwo id: %s)' % (woid, wo.id)) \r\n else:\r\n logger.warn('The log for workorder %s did not match a planned execution in the database' % woid)\r\n raise Exception('The log for workorder %s did not match a planned execution in the database' % woid)\r\n else:\r\n logger.info('Log not inserted in database due to batch autoparse options for workorder id = %s (fortytwo id: %s) or because the workorder was already marked as cancelled' % (woid, wo.id)) \r\n\r\n except Exception as error:\r\n logger.info('Log parsing encountered an error for workorder id = %s (fortytwo id: %s). Error: %s' % (woid, wo.id, error))\r\n wo.result_passed = False\r\n wo.result_test_runtime = None\r\n\r\n wo.save()\r\n\r\n # Update the batch\r\n if wo.batch.run_state != Batch.STATE_COMPLETE_CANCEL: \r\n updateBatchOnWorkorderComplete(wo)\r\n return JSONHttpResponse({'status': True, 'msg': 'Successfully received workorder %s complete notice. So long and thanks for all the fish.' % woid })\r\n\r\ndef updateBatchOnWorkorderComplete(wo):\r\n \"\"\"\r\n After a workorder has completed, this method will update the batch state if\r\n necessary.\r\n \"\"\"\r\n all_wo = wo.batch.workorder_set.all()\r\n wo_complete = all_wo.filter(complete=True).count()\r\n wo_total = all_wo.count()\r\n\r\n # If this is the first one in the batch that has completed, update to INPR state\r\n if wo_complete > 0:\r\n wo.batch.run_state = Batch.STATE_IN_PROGRESS\r\n wo.batch.complete = False\r\n\r\n # This is the last one with errors, set to complete w/errors\r\n if wo_complete == wo_total and all_wo.filter(result_passed=False).count() != 0:\r\n wo.batch.run_state = Batch.STATE_COMPLETE_ERRORS\r\n wo.batch.complete = True\r\n\r\n # If this is the last one with no errors, set to complete\r\n elif wo_complete == wo_total and all_wo.filter(result_passed=False).count() == 0:\r\n wo.batch.run_state = Batch.STATE_COMPLETE\r\n wo.batch.complete = True\r\n\r\n wo.batch.save()\r\n return None","sub_path":"sandbox/assets/views/workorder.py","file_name":"workorder.py","file_ext":"py","file_size_in_byte":13731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"544772638","text":"def fancyRide(l, fares):\r\n cars = [\"UberX\",\"UberXL\",\"UberPlus\",\"UberBlack\",\"UberSUV\"]\r\n bestFair = \"\"\r\n currentBestTotal = 0\r\n for i in range(len(fares)):\r\n total = l * fares[i]\r\n if(total > currentBestTotal and total <= 20):\r\n bestFair = cars[i]\r\n currentBestTotal = total\r\n return bestFair","sub_path":"Company Bots/Uber/fancyRide/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"479691856","text":"# # from google.cloud import bigquery\n# # client = bigquery.Client()\n# #\n# #\n# # import os\n# # os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"]=\"C:/Users/dvoruga/Downloads/shc-dfs-test-78d7a2e8ed52.json\"\n# #\n# # # static_df = pd.read_gbq(static_query, env['BILLING_PROJECT'], dialect='legacy')\n# #\n# # Perform a query.\n# # QUERY = ('SELECT Table_name FROM shc-dfs-test.dfs_test.backup_master')\n# # query_job = client.query(QUERY) # API request\n# # rows = query_job.result() # Waits for query to finish\n# # #\n# # for row in rows:\n# # print(row.name)\n# \nfrom google.cloud import bigquery\n# # from airflow.contrib.hooks.bigquery_hook import BigQueryHook\n# # from bigquery_run_with_timeout_retry.run_bigquery import read_gbq, to_gbq\n# # from bigquery_run_with_timeout_retry.run_bigquery import runbq\n#\n# # Construct a BigQuery client object.\nclient = bigquery.Client.from_service_account_json('C:/Users/dvoruga/Downloads/shc-dfs-test-78d7a2e8ed52.json')\nimport pandas as pd\n# #\n# # query = \"\"\"\n# # SELECT * FROM shc-dfs-test.dfs_test.backup_master limit 10;\n# # \"\"\"\n# # query_job = client.query(query) # Make an API request.\n# # df=read_gbq(query)\n# # print(df)\n# # for row in query_job:\n# # print(type(row))\n#\n#\ndef comparing_tables():\n import pandas as pd\n pd.set_option('display.max_columns', 15)\n pd.set_option('display.precision',16)\n all_tables = pd.read_csv('C:/Users/dvoruga/Downloads/All_tables.csv')\n master_comparision = pd.read_csv('C:/Users/dvoruga/Downloads/master_comparision.csv')\n # query_all_tables = 'SELECT *,TIMESTAMP_MILLIS(creation_time) as create_ts, TIMESTAMP_MILLIS(last_modified_time) as modified_ts FROM `shc-dfs-test.dfs_tbl_bkp.__TABLES__` where row_count between 1 and 5 order by modified_ts desc'\n # # logging.info('Executing: %s', str(qry))\n # all_tables = client.query(query_all_tables)\n all_tables = all_tables[['dataset_id','table_id','modified_ts']]\n # all_tables['modified_ts'].astype(int)\n # all_tables['modified_ts'].astype(int)\n # query_master_comparision = '''select dataset_id, table_id, modified_ts from shc-dfs-test.dfs_test.backup_master'''\n # master_comparision = client.query(query_master_comparision)\n master_comparision = master_comparision[['dataset_id','table_id','modified_ts']]\n # master_comparision['modified_ts'].astype(int)\n merged_df = all_tables.merge(master_comparision, how='outer', indicator='ADD_MOD_DROP', on=['table_id','dataset_id'])\n\n\n merged_df = merged_df.replace({'right_only': 'Drop', 'left_only': 'Add', 'both': 'Exist'})\n print(merged_df)\n # # exist_tables = merged_df.loc[merged_df['ADD_MOD_DROP'] == 'Exist']\n # # not_modified = pd.concat([exist_tables['modified_ts_x'], exist_tables['modified_ts_y']], axis=1)\n # # exp = graph.loc[(graph['wk_no'] == top_wk), ['OFR_ID','exp','std_units','ini_pro']]\n\n newly_added = merged_df[merged_df['ADD_MOD_DROP'] == 'Add']\n dropped = merged_df[merged_df['ADD_MOD_DROP'] == 'Drop']\n modified = merged_df.loc[(merged_df['ADD_MOD_DROP'] == 'Exist') & (merged_df['modified_ts_x'] != merged_df['modified_ts_y'])]\n modified = modified.replace({'Exist':'Modified'})\n # modified.loc[modified['ADD_MOD_DROP']] = 'Modified'\n # modified.loc[modified.ADD_MOD_DROP:],'ADD_MOD_DROP'] = modified.apply(lambda x: x['ADD_MOD_DROP'].replace('Exist','Modified'), axis=1)\n # modified['ADD_MOD_DROP']= modified.loc[modified.ADD_MOD_DROP == 'Exist','ADD_MOD_DROP'] = 'Modified' #This is returning a warning - returning-a-view-versus-a-copy\n print(\"modified::::::\\n\", modified)\n\n\n not_modified = merged_df.loc[(merged_df['ADD_MOD_DROP'] == 'Exist') & (merged_df['modified_ts_x'] == merged_df['modified_ts_y'])]\n not_modified = not_modified.replace({'Exist': 'Not_Modified'})\n # not_modified['ADD_MOD_DROP'] = not_modified.apply(lambda x: x['ADD_MOD_DROP'].replace('Exist','Not_Modified'),axis =1) #This is returning a warning - SettingWithCopyWarning\n print(\"Not_Modified:::\\n\", not_modified)\n # # df8 = df7[df7['State'] == df7['STATE_ALPHA']]\n # # exist_tables['is_winner'] = exist_tables['modified_ts_x'].str.lower() == exist_tables['modified_ts_y'].str.lower()\n #\n # # exist_tables['result'] = exist_tables.loc[exist_tables['modified_ts_x'] == exist_tables['modified_ts_x'], 'no_change', 'changed']\n # return newly_added, dropped, modified, not_modified\n print(\"Dropped:::\\n\",dropped)\n print(\"modified::::\\n\",modified)\n\ncomparing_tables()\n\n# bqclient = bigquery.Client()\n# query_string =\n# dataframe = (\n# bqclient.query(query_string)\n# .result()\n# .to_dataframe(bqstorage_client=bqstorageclient)\n# )\n# print(dataframe.head())\n# bq = BigQueryHook(bigquery_conn_id='bigquery_default', delegate_to=None, use_legacy_sql=False, location='US')\n# cEYW_tbls = bq.get_pandas_df(qry)\n\n\n\n\n# insert_gbq = pd.concat([newly_added,modified,not_modified,dropped], join='outer')\n# insert_gbq = insert_gbq.reset_index(drop=True)\n# print(\"create_table_list:::::\\n\",insert_gbq)\n# insert_gbq = insert_gbq[['dataset_id_x','table_id','modified_ts_x','ADD_MOD_DROP']]\n# print(\"insert into temp bgq table to be pickup in the next task:::::\\n\",insert_gbq)\n# insert_gbq.to_gbq('dfs_test.archival_extracted_data_temp', project_id='shc-dfs-test', chunksize=None, reauth=False,\n# if_exists='replace',\n# table_schema=[{'name': 'dataset_id_x', 'type': 'STRING'}, {'name': 'table_id', 'type': 'STRING'},\n# {'name': 'modified_ts_x', 'type': 'INTEGER'},\n# {'name': 'ADD_MOD_DROP', 'type': 'STRING'}])\n\n\n# pd.set_option('display.max_columns',10)\n# sql ='''select dataset_id_x, table_id, modified_ts_x, ADD_MOD_DROP from dfs_test.archival_extracted_data_temp'''\n# project_id = 'shc-dfs-test'\n# create_table_list = pd.read_gbq(sql, project_id=project_id, dialect='standard')\n# print(create_table_list.dtypes)\n# create_table_list = create_table_list.loc[(create_table_list['ADD_MOD_DROP'] == 'Add') | (create_table_list['ADD_MOD_DROP'] == 'Modified')]\n# # create_table_list['dataset_id_x'].astype(str)\n# # create_table_list['table_id'].astype(str)\n# # create_table_list['modified_ts_x'].astype(np.int64)\n# # create_table_list['ADD_MOD_DROP'].astype(str)\n# print(create_table_list)\n\n# table_list = []\n# query = []\n# for i in range(0, create_table_list.shape[0]):\n# table_list.append([create_table_list.iloc[i, 0], create_table_list.iloc[i, 1], str(int(create_table_list.iloc[i, 2]))])\n# query.append(\"insert into dfs_test.backup_master values('\" + create_table_list.iloc[i, 0] + \"','\"+ create_table_list.iloc[i, 1] + \"',\" +str(int(create_table_list.iloc[i, 2])) +\")\")\n# print(query)\n# print(\"table_list:::::::::\",table_list)\n# #lv_bq_project #This need to be replaced with actual bq project\n# for i, rec in enumerate(table_list):\n# ds = rec[0]\n# tab = rec[1]\n# ms = rec[2]\n# ms = str(int(ms))\n# tgt_tab = project_id + ds + '_' + tab\n# src_table = project_id + '.' + ds+ '.' +tab\n# tgt_table = project_id + ds+ '_' +tab+ '_' +ms\n# print(\"src_table:::\\n\",src_table)\n# print(\"tgt_table:::\\n\",tgt_table)\n#\n#\n# task_list.append(BigQueryToBigQueryOperator(\n# task_id='backup_' + tab,\n# source_project_dataset_tables=src_table,\n# destination_project_dataset_table=tgt_table,\n# create_disposition='CREATE_IF_NEEDED',\n# write_disposition='WRITE_TRUNCATE',\n# dag=dag)\n# master_rows_remove = pd.concat([dropped, modified]) # insert/delete a row into master table\n# print(\"master_rows_remove\", master_rows_remove[['dataset_id_y', 'table_id', 'modified_ts_y']])\n#\n# q = SELECT dataset_id_x,table_id,modified_ts_x FROM `shc-dfs-test.dfs_test.archival_extracted_data_temp` group by 1,2,3;\n# bq = BigQueryHook(bigquery_conn_id='bigquery_default', delegate_to=None, use_legacy_sql=False, location='US')\n# cEYW_tbls = bq.get_pandas_df(q)\n\n#This is not working as the destination table structure is different.\n# create_table_list.to_gbq('dfs_test.backup_master', project_id='shc-dfs-test', chunksize=None, reauth=False,\n# if_exists='append',\n# )\n\n# client.insert_rows_from_dataframe(dataframe='create_table_list',table = )\n# client.insert_rows_from_dataframe('shc-dfs-test.dfs_test.backup_master', 'create_table_list', selected_fields=None, chunk_size=500)\n # for q in query:\n # client.query(q)\n\n\n # query_job = client.query(q)\n # for job in query_job:\n # print(job)\n\n\n\n\n\n\n\n # for table in table_list:\n # table[2].astype(int)\n # print(type(table[2]))\n # table_list1 = '.'.join(table_list)\n # print(table_list1)\n # for column in create_table_list[['dataset_id_x','table_id','modified_ts_x']]:\n # column_series = create_table_list[column]\n # print(\"Row:::\",column_series)\n # print(\"Row contents::::\",column_series.values)\n # print(dropped)\n # print(modified)\n # print(not_modified)\n\n# comparing_tables()\n\n# def table_dump():\n# dropped = comparing_tables()\n# print(dropped)\n# for e in dropped:\n# print(e)\n#\n# table_dump()\n\n#\n# # dropped_df = merged_df.loc[((merged_df['ADD_MOD_DROP'] == 'Drop')]\n# # merged_df = pd.merge(all_tables,master_comparision, how='outer',left_on='table_id',right_on='Table_name')\n# # print(exist_tables)\n\n# print(\"test\")\n\nfrom itertools import permutations\n\n# username = [\"sandyb@\",\"sbrown@\"]\n# domain =[\"gmail\",\"yahoo\"]\n# suffixes = [\".com\",\".net\"]\n#\n# z = zip(username,domain)\n# username.extend(domain)\n# username.extend(suffixes)\n# print(list(z))\n# # no length entered so default length\n# # taken as 4(the length of string GeEK)\n# p = permutations([username,domain,suffixes],1)\n#\n# # Print the obtained permutations\n# for j in list(p):\n# print(j)\n\n# def recurse(username, domain,suffixes):\n# return recurse(username + domain + suffixes)\n#\n# print(recurse([username, domain,suffixes]))","sub_path":"bq_archive.py","file_name":"bq_archive.py","file_ext":"py","file_size_in_byte":9975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"599354640","text":"# Copyright (c) 2014 eBay Software Foundation\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\nfrom trove.common import cfg\nfrom trove.common import utils\nfrom trove.guestagent.common import operating_system\nfrom trove.guestagent.datastore.mongodb import service as mongo_service\nfrom trove.guestagent.strategies.restore import base\nfrom trove.openstack.common import log as logging\n\nCONF = cfg.CONF\nLOG = logging.getLogger(__name__)\nIP = operating_system.get_ip_address()\nLARGE_TIMEOUT = 600\nMONGODB_DBPATH = CONF.mongodb.mount_point\nMONGO_DUMP_DIR = MONGODB_DBPATH + \"/dump\"\n\n\nclass MongoDump(base.RestoreRunner):\n __strategy_name__ = 'mongodump'\n base_restore_cmd = 'sudo tar xPf -'\n\n cleanup_commands = [\n 'sudo rm -fr ' + MONGO_DUMP_DIR,\n 'sudo chown -R mongodb:nogroup ' + MONGODB_DBPATH\n ]\n\n def __init__(self, *args, **kwargs):\n super(MongoDump, self).__init__(*args, **kwargs)\n self.status = mongo_service.MongoDbAppStatus()\n self.app = mongo_service.MongoDBApp(self.status)\n\n def pre_restore(self):\n self.app.stop_db()\n\n def post_restore(self):\n \"\"\"\n Actual restore command streams the archive data from the object store\n This command creates DB FS object xtype from the storage archive\n \"\"\"\n utils.execute_with_timeout(\"mongorestore\", '--host', IP,\n \"--journal\", \"--drop\", \"--dbpath\",\n MONGODB_DBPATH, MONGO_DUMP_DIR,\n run_as_root=True, root_helper=\"sudo\",\n timeout=LARGE_TIMEOUT)\n\n # now that db fs system has been created cleanup archive dir\n for cmd in self.cleanup_commands:\n utils.execute_with_timeout(cmd, shell=True)\n\n self.app.start_db()\n","sub_path":"trove/guestagent/strategies/restore/mongo_impl.py","file_name":"mongo_impl.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"267155917","text":"import tkinter as tk\nfrom tkinter import messagebox\nfrom provinciaForm import ProvinciaForm\nfrom provinciaForm import StateProvinciaForm\nfrom provinciaForm import NewProvincia\nfrom Provincias import Provincia\nfrom provinciaList import ProvinciaList\n\nclass ProvinciaView(tk.Tk):\n\n def __init__(self):\n super().__init__()\n self.title(\"Lista de Provincias\")\n self.resizable(0, 0)\n self.list = ProvinciaList(self, height=18,background=\"#F5CC5E\")\n self.form = StateProvinciaForm(self)\n self.btn_new = tk.Button(self, text=\"Agregar Provincia\", background=\"#55F93E\")\n self.list.pack(side=tk.LEFT, padx=10, pady=10)\n self.form.pack(padx=10, pady=10)\n self.btn_new.pack(side=tk.BOTTOM, pady=5)\n \n def setControlador(self, ctrl):\n #vincula la vista con el controlador\n self.btn_new.config(command=ctrl.crearProvincia)\n self.list.bind_doble_click(ctrl.seleccionarProvincia)\n \n def agregarProvincia(self, provincia):\n self.list.insertar(provincia)\n \n def obtenerDetalles(self):\n return self.form.crearProvinciaDesdeFormulario()\n #Ver estado de Provincia en formulario de Provincia\n \n def verProvinciaEnForm(self, provincia):\n self.form.mostrarEstadoProvinciaEnFormulario(provincia)\n","sub_path":"vista.py","file_name":"vista.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"570883470","text":"from __future__ import unicode_literals, division, absolute_import\nfrom flexget import plugin\nfrom flexget import validator\n\n\nclass Magnets(object):\n \"\"\"Removes magnet urls form the urls list. Rejects entries that have nothing but magnet urls.\"\"\"\n def validator(self):\n return validator.factory('boolean')\n\n @plugin.priority(0)\n def on_task_urlrewrite(self, task, config):\n if config is not False:\n return\n for entry in task.accepted:\n if 'urls' in entry:\n entry['urls'] = filter(lambda url: not url.startswith('magnet:'), entry['urls'])\n\n if entry['url'].startswith('magnet:'):\n if entry.get('urls'):\n entry['url'] = entry['urls'][0]\n else:\n entry.reject('Magnet urls not allowed.', remember=True)\n\nplugin.register_plugin(Magnets, 'magnets', api_ver=2)\n","sub_path":"flexget/plugins/filter/magnets.py","file_name":"magnets.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"174830164","text":"#!/usr/bin/env python\n\nimport os\nimport argparse\nimport yaml\n\nfrom fabric.api import *\nfrom fabric.utils import puts\nfrom fabric.contrib.files import exists\nfrom fabric.colors import cyan\n\nfrom signal import pause\n\nfrom fsevents import Observer\nfrom fsevents import Stream\n\nfrom GitNotified import GitNotified\n\n# These are a bunch of constants that identify different type of file system\n# events and the fsevents library uses.\nkFSEventStreamEventFlagNone = 0x00000000\nkFSEventStreamEventFlagMustScanSubDirs = 0x00000001\nkFSEventStreamEventFlagUserDropped = 0x00000002\nkFSEventStreamEventFlagKernelDropped = 0x00000004\nkFSEventStreamEventFlagEventIdsWrapped = 0x00000008\nkFSEventStreamEventFlagHistoryDone = 0x00000010\nkFSEventStreamEventFlagRootChanged = 0x00000020\nkFSEventStreamEventFlagMount = 0x00000040\nkFSEventStreamEventFlagUnmount = 0x00000080\nkFSEventStreamEventFlagItemCreated = 0x00000100 # 256\nkFSEventStreamEventFlagItemRemoved = 0x00000200\nkFSEventStreamEventFlagItemInodeMetaMod = 0x00000400\nkFSEventStreamEventFlagItemRenamed = 0x00000800\nkFSEventStreamEventFlagItemModified = 0x00001000\nkFSEventStreamEventFlagItemFinderInfoMod = 0x00002000\nkFSEventStreamEventFlagItemChangeOwner = 0x00004000\nkFSEventStreamEventFlagItemXattrMod = 0x00008000\nkFSEventStreamEventFlagItemIsFile = 0x00010000\nkFSEventStreamEventFlagItemIsDir = 0x00020000\nkFSEventStreamEventFlagItemIsSymlink = 0x00040000\n\n\nclass GitSync:\n\n local_path = ''\n local_branch = ''\n remote_path = ''\n remote_host = ''\n remote_user = ''\n git_ignore_lines = ''\n #observer\n #stream\n\n def __init__(self, config, notify):\n\n self.notify = notify\n\n self.local_path = config['local_path']\n self.local_branch = config['local_branch']\n self.remote_path = config['remote_path']\n self.remote_host = config['remote_host']\n self.remote_user = config['remote_user']\n self.git_ignore_lines = config['git_ignore']\n self.observer = Observer()\n\n # Sort the git ignore lines.\n self.git_ignore_lines = sorted(self.git_ignore_lines)\n\n if self.remote_user:\n self.remote_host = self.remote_user + '@' + self.remote_host\n\n # Start watching the directory\n self.stream = Stream(self.callback, self.local_path, file_events=True)\n self.observer.schedule(self.stream)\n\n def start(self):\n self.run_initial_sync()\n self.observer.start()\n\n @task\n def init_remote_master_repository(self, remote_path, local_branch, git_ignore_lines):\n\n puts(\"Setting up %s\" % remote_path)\n\n if not exists(remote_path):\n abort(\"The remote path does not exist: %s\" % remote_path)\n\n git_repo = self.get_remote_git_repo(self, remote_path)\n\n if exists(git_repo):\n puts(\"The git repo already exist: %s\" % git_repo)\n else:\n with cd(remote_path):\n run(\"git init\")\n\n self.update_git_ignore_file(self, remote_path, git_ignore_lines)\n\n with cd(remote_path):\n run(\"git add .gitignore\")\n run(\"git commit -m 'Inital Commit'\")\n run(\"git add .\")\n run(\"git commit -m 'add project'\")\n\n @task\n def update_git_ignore_file(self, remote_path, git_ignore_lines):\n\n puts(\"Updating ignore files.\")\n\n with cd(remote_path):\n with hide('running'):\n\n cmd = []\n for line in git_ignore_lines:\n cmd.append(\"echo '{0}' >> .gitignore_new\".format(line))\n\n run(';'.join(cmd))\n\n run('mv .gitignore_new .gitignore', shell=False)\n\n @task\n def remote_has_modified_files(self, remote_path):\n with cd(remote_path):\n with settings(warn_only=True):\n with hide('running', 'status', 'warnings', 'stderr', 'stdout'):\n\n git_status_output = run(\"git status --porcelain .\")\n\n if not git_status_output:\n puts(cyan(\"%s (remote) is clean.\" % remote_path))\n return False\n else:\n puts(\n cyan(\n \" %s (remote) has uncommitted changes.\"\n % remote_path\n )\n )\n return True\n\n @task\n def local_has_modified_files(self, local_path):\n with lcd(local_path):\n with settings(warn_only=True):\n with hide('running', 'status', 'warnings', 'stderr', 'stdout'):\n\n git_status_output = local(\"git status --porcelain .\", capture=True)\n\n if not git_status_output:\n puts(cyan(\"%s (local) is clean.\" % local_path))\n return False\n else:\n puts(\n cyan(\"%s (local) has uncommitted changes.\" % local_path)\n )\n return True\n\n @task\n def get_remote_git_repo(self, remote_path):\n git_repo = os.path.join(remote_path, '.git')\n return git_repo\n\n @task\n def get_local_git_clone(self, remote_path, local_path):\n local(\"git clone ssh://%s/%s %s\" % (env.host, remote_path, local_path))\n\n @task\n def commit_remote_modified_files(self, remote_path):\n if not self.remote_has_modified_files(self, remote_path):\n return True\n with cd(remote_path):\n run(\"git add .\")\n run(\"git commit -a -m 'committing all changes from %s'\" % (remote_path))\n return True\n\n @task\n def push_remote_master(self, remote_path, local_branch):\n\n self.remote_has_local_branch(self, remote_path, local_branch)\n\n with cd(remote_path):\n run(\"git push origin %s\" % (local_branch))\n return True\n\n def remote_has_local_branch(self, remote_path, local_branch):\n with cd(remote_path):\n git_branches = run('git branch')\n puts(cyan(git_branches))\n\n @task\n def pull_local(self, local_path):\n with lcd(local_path):\n local('git fetch origin')\n\n @task\n def merge_local_master(self, local_path):\n with lcd(local_path):\n local('git merge origin/master')\n\n @task\n def pull_and_merge_local(self, local_path):\n self.pull_local(self, local_path)\n self.merge_local_master(self, local_path)\n\n @task\n def commit_local_modified_files(self, local_path):\n with lcd(local_path):\n if self.local_has_modified_files(self, local_path):\n local(\"git add .\")\n local(\n \"git commit -a -m 'committing all changes from a local machine'\"\n )\n return True\n\n @task\n def push_local_to_remote(self, local_path, local_branch):\n if not self.local_has_local_branch(local_path, local_branch):\n self.local_create_local_branch(local_path, local_branch)\n\n with lcd(local_path):\n local(\"git push origin %s\" % (local_branch))\n\n def local_create_local_branch(self, local_path, local_branch):\n with lcd(local_path):\n local('git branch %s' % (local_branch), capture=True)\n\n def local_has_local_branch(self, local_path, local_branch):\n\n puts(cyan(local_path))\n\n with lcd(local_path):\n git_branches = local('git branch', capture=True)\n for branch in git_branches.split():\n if branch == local_branch:\n return True\n return False\n\n @task\n def merge_local_to_remote(self, remote_path, local_branch):\n with cd(remote_path):\n run('git merge %s' % (local_branch))\n\n @task\n def send_local_changes_to_remote(self, remote_path, local_path, local_branch):\n self.commit_local_modified_files(self, local_path)\n self.push_local_to_remote(self, local_path, local_branch)\n self.merge_local_to_remote(self, remote_path, local_branch)\n\n @task\n def send_remote_changes_to_local(self, remote_path, local_path):\n self.commit_remote_modified_files(self, remote_path)\n self.pull_and_merge_local(self, local_path)\n\n @task\n def sync(self, remote_path, local_path, local_branch, git_ignore_lines):\n\n if not os.path.exists(local_path):\n self.init(self, remote_path, local_path, local_branch, git_ignore_lines)\n\n if self.remote_has_modified_files(self, remote_path):\n self.send_remote_changes_to_local(self, remote_path, local_path)\n\n self.send_local_changes_to_remote(self, remote_path, local_path, local_branch)\n\n def initial_sync(self, remote_path, local_path, local_branch, git_ignore_lines):\n if not os.path.exists(local_path):\n self.init(self, remote_path, local_path, local_branch, git_ignore_lines)\n else:\n self.update_git_ignore_file(self, remote_path, git_ignore_lines)\n\n self.send_remote_changes_to_local(self, remote_path, local_path)\n self.send_local_changes_to_remote(self, remote_path, local_path, local_branch)\n\n @task\n def init(self, remote_path, local_path, local_branch, git_ignore_lines):\n self.init_remote_master_repository(self, remote_path, local_branch, git_ignore_lines)\n self.get_local_git_clone(self, remote_path, local_path)\n self.local_create_local_branch(local_path, local_branch)\n with lcd(local_path):\n local(\"git checkout %s\" % (local_branch))\n\n def run_remote_has_modified_files(self):\n result = execute(\n self.remote_has_modified_files,\n self.remote_path,\n host=self.remote_host,\n remote_path=self.remote_path\n )\n return result[self.remote_host]\n\n def run_send_remote_changes_to_local(self):\n result = execute(\n self.send_remote_changes_to_local,\n self,\n host=self.remote_host,\n remote_path=self.remote_path,\n local_path=self.local_path\n )\n return result[self.remote_host]\n\n def run_send_local_changes_to_remote(self):\n result = execute(\n self.send_local_changes_to_remote,\n self,\n host=self.remote_host,\n remote_path=self.remote_path,\n local_path=self.local_path,\n local_branch=self.local_branch\n )\n return result[self.remote_host]\n\n def run_initial_sync(self):\n self.notify.sync_start(self.local_path, self.remote_path, self.remote_host)\n execute(\n self.initial_sync,\n host=self.remote_host,\n remote_path=self.remote_path,\n local_path=self.local_path,\n local_branch=self.local_branch,\n git_ignore_lines=self.git_ignore_lines\n )\n self.notify.sync_done(self.local_path, self.remote_path, self.remote_host)\n\n def callback(self, event):\n\n if event.mask == kFSEventStreamEventFlagItemCreated:\n # Sublime Text Seems to trigger a lot of these and they don't seem to\n # warrant a new sync, so lets skip these for now.\n return\n\n filename = event.name\n git_dir = os.path.join(self.local_path, '.git')\n\n if git_dir in filename:\n #Skip sync for file change that are in the .git directory.\n return\n\n self.notify.sync_start(self.local_path, self.remote_path, self.remote_host)\n\n try:\n if self.run_remote_has_modified_files():\n # Stop observing.\n self.observer.unschedule(self.stream)\n self.observer.stop()\n\n self.run_send_remote_changes_to_local()\n\n # Start observing again.\n self.observer = Observer()\n self.observer.schedule(self.stream)\n self.observer.start()\n\n self.run_send_local_changes_to_remote()\n\n except Exception as inst:\n print(\"sync failed.\")\n print(type(inst))\n print(inst.args)\n print(inst)\n self.notify.sync_failed()\n raise\n else:\n self.notify.sync_done(self.local_path, self.remote_path, self.remote_host)\n\n def stop(self):\n self.observer.unschedule(git_sync.stream)\n self.observer.stop()\n\n\ndef parse_config():\n # Setup Parser\n parser = argparse.ArgumentParser(\n description='Use git to sync a site on a server to your local machine.'\n )\n\n parser.add_argument(\n 'config_file',\n nargs='?',\n type=argparse.FileType('r')\n )\n\n args = parser.parse_args()\n\n # Read in config file.\n return yaml.safe_load(args.config_file)\n\n\ndef main():\n global git_sync\n config = parse_config()\n notifier = GitNotified()\n\n git_sync = GitSync(config, notifier)\n git_sync.start()\n\n try:\n while 1:\n pause()\n except KeyboardInterrupt:\n git_sync.stop()\n\n git_sync.observer.join()\n\nif __name__ == '__main__':\n main()\n","sub_path":"gitsync/GitSync.py","file_name":"GitSync.py","file_ext":"py","file_size_in_byte":13131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"206294647","text":"from threading import Lock\n\nclass BankAccount(object):\n def __init__(self):\n self.balance = 0\n self.status = 'closed'\n self.lock = Lock()\n\n def get_balance(self):\n if self.status == 'opened':\n return self.balance\n else:\n raise ValueError('Account is closed.')\n\n def open(self):\n if self.status == 'closed':\n self.status = 'opened'\n return \"Account is already opened.\"\n\n def deposit(self, amount):\n if self.status == 'opened':\n if amount > 0:\n self.balance += amount\n else:\n raise ValueError(\"You cannot deposit negative values\")\n else:\n raise ValueError('Account is closed.')\n \n def withdraw(self, amount):\n if self.status == 'opened':\n if self.balance > 0 and amount > 0 and amount <= self.balance:\n self.balance -= amount\n elif self.balance <= amount and amount > 0:\n raise ValueError('You need to withdraw less money')\n elif amount < 0:\n raise ValueError('You cannot withdraw negative values')\n else:\n raise ValueError('Account is closed.')\n \n\n def close(self):\n if self.status == 'opened':\n self.status = 'closed'\n return \"Account is already closed.\"\n","sub_path":"python/bank-account/bank_account.py","file_name":"bank_account.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"333516077","text":"# Copyright (c) 2009-2015, Dmitry Vasiliev \n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n# * Neither the name of the copyright holders nor the names of its\n# contributors may be used to endorse or promote products derived from this\n# software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nimport os\nimport errno\nimport unittest\n\nfrom erlport.erlproto import Port\nfrom erlport.erlterms import Atom\n\n\nclass TestPortClient(object):\n\n def __init__(self, **kwargs):\n r, self.out_d = os.pipe()\n self.in_d, w = os.pipe()\n self.port = Port(descriptors=(r, w), **kwargs)\n\n def read(self):\n return os.read(self.in_d, 65536)\n\n def write(self, data):\n return os.write(self.out_d, data)\n\n def close(self):\n os.close(self.in_d)\n os.close(self.out_d)\n\nclass PortTestCase(unittest.TestCase):\n\n def test_default_port_read(self):\n client = TestPortClient()\n self.assertEqual(12, client.write(\"\\0\\0\\0\\10\\x83d\\0\\4test\"))\n atom = client.port.read()\n self.assert_(isinstance(atom, Atom))\n self.assertEqual(Atom(\"test\"), atom)\n\n def test_default_port_write(self):\n client = TestPortClient()\n self.assertEqual(12, client.port.write(Atom(\"test\")))\n self.assertEqual(\"\\0\\0\\0\\10\\x83d\\0\\4test\", client.read())\n\n def test_invalid_packet_value(self):\n self.assertRaises(ValueError, Port, packet=0)\n self.assertRaises(ValueError, Port, packet=3)\n\n def test_use_stdio(self):\n port = Port()\n self.assertEqual(0, port.in_d)\n self.assertEqual(1, port.out_d)\n port = Port(use_stdio=True)\n self.assertEqual(0, port.in_d)\n self.assertEqual(1, port.out_d)\n\n def test_nouse_stdio(self):\n port = Port(use_stdio=False)\n self.assertEqual(3, port.in_d)\n self.assertEqual(4, port.out_d)\n\n def test_descriptors(self):\n port = Port(descriptors=(10, 20))\n self.assertEqual(10, port.in_d)\n self.assertEqual(20, port.out_d)\n\n def test_port_close(self):\n client = TestPortClient()\n client.port.close()\n self.assertRaises(OSError, client.write, \"data\")\n self.assertEqual(\"\", client.read())\n\n def test_closed_port(self):\n client = TestPortClient()\n client.close()\n self.assertRaises(EOFError, client.port.read)\n self.assertRaises(EOFError, client.port.write, \"data\")\n\n def test_read_multiple_terms(self):\n client = TestPortClient()\n atom_data = \"\\0\\0\\0\\10\\x83d\\0\\4test\"\n self.assertEqual(24, client.write(atom_data + atom_data))\n atom = client.port.read()\n self.assert_(isinstance(atom, Atom))\n self.assertEqual(Atom(\"test\"), atom)\n atom = client.port.read()\n self.assert_(isinstance(atom, Atom))\n self.assertEqual(Atom(\"test\"), atom)\n\n def test_small_buffer_read(self):\n client = TestPortClient(buffer_size=1)\n self.assertEqual(12, client.write(\"\\0\\0\\0\\10\\x83d\\0\\4test\"))\n atom = client.port.read()\n self.assert_(isinstance(atom, Atom))\n self.assertEqual(Atom(\"test\"), atom)\n\n def test_invalid_buffer_size(self):\n self.assertRaises(ValueError, Port, buffer_size=0)\n\n def test_packet4_port_read(self):\n client = TestPortClient(packet=4)\n self.assertEqual(12, client.write(\"\\0\\0\\0\\10\\x83d\\0\\4test\"))\n atom = client.port.read()\n self.assert_(isinstance(atom, Atom))\n self.assertEqual(Atom(\"test\"), atom)\n\n def test_packet4_port_write(self):\n client = TestPortClient(packet=4)\n self.assertEqual(12, client.port.write(Atom(\"test\")))\n self.assertEqual(\"\\0\\0\\0\\10\\x83d\\0\\4test\", client.read())\n\n def test_packet2_port_read(self):\n client = TestPortClient(packet=2)\n self.assertEqual(10, client.write(\"\\0\\10\\x83d\\0\\4test\"))\n atom = client.port.read()\n self.assert_(isinstance(atom, Atom))\n self.assertEqual(Atom(\"test\"), atom)\n\n def test_packet2_port_write(self):\n client = TestPortClient(packet=2)\n self.assertEqual(10, client.port.write(Atom(\"test\")))\n self.assertEqual(\"\\0\\10\\x83d\\0\\4test\", client.read())\n\n def test_packet1_port_read(self):\n client = TestPortClient(packet=1)\n self.assertEqual(9, client.write(\"\\10\\x83d\\0\\4test\"))\n atom = client.port.read()\n self.assert_(isinstance(atom, Atom))\n self.assertEqual(Atom(\"test\"), atom)\n\n def test_packet1_port_write(self):\n client = TestPortClient(packet=1)\n self.assertEqual(9, client.port.write(Atom(\"test\")))\n self.assertEqual(\"\\10\\x83d\\0\\4test\", client.read())\n\n def test_compressed_port_read(self):\n client = TestPortClient(packet=1, compressed=True)\n self.assertEqual(26, client.write(\"\\x19\\x83P\\0\\0\\0\\x1a\\x78\\x9c\\xcb\\x61\"\n \"\\x60\\x60\\x60\\xcd\\x66\\x60\\xd4\\x43\\xc7\\x59\\0\\x30\\x48\\3\\xde\"))\n self.assertEqual([[46], [46], [46], [46], [46]], client.port.read())\n\n def test_compressed_port_write(self):\n client = TestPortClient(packet=1, compressed=True)\n self.assertEqual(26, client.port.write([[46], [46], [46], [46], [46]]))\n self.assertEqual(\"\\x19\\x83P\\0\\0\\0\\x1a\\x78\\x9c\\xcb\\x61\"\n \"\\x60\\x60\\x60\\xcd\\x66\\x60\\xd4\\x43\\xc7\\x59\\0\\x30\\x48\\3\\xde\",\n client.read())\n\n def test_slow_write(self):\n write = os.write\n os.write = lambda d, data: 1\n try:\n port = Port(packet=1)\n self.assertEqual(9, port.write(Atom(\"test\")))\n finally:\n os.write = write\n\n def test_no_data_written(self):\n write = os.write\n os.write = lambda d, data: 0\n try:\n port = Port()\n self.assertRaises(EOFError, port.write, \"test\")\n finally:\n os.write = write\n\n def test_error_on_write(self):\n def test_write(d, data):\n raise OSError()\n write = os.write\n os.write = test_write\n try:\n port = Port()\n self.assertRaises(OSError, port.write, \"test\")\n finally:\n os.write = write\n\n def test_error_on_read(self):\n def test_read(d, buffer_size):\n raise OSError()\n read = os.read\n os.read = test_read\n try:\n port = Port()\n self.assertRaises(OSError, port.read)\n finally:\n os.read = read\n\n def test_close_on_read(self):\n def test_read(d, buffer_size):\n raise OSError(errno.EPIPE, \"Pipe closed\")\n read = os.read\n os.read = test_read\n try:\n port = Port()\n self.assertRaises(EOFError, port.read)\n finally:\n os.read = read\n\n\ndef get_suite():\n load = unittest.TestLoader().loadTestsFromTestCase\n suite = unittest.TestSuite()\n suite.addTests(load(PortTestCase))\n return suite\n","sub_path":"priv/python2/erlport/tests/erlproto_tests.py","file_name":"erlproto_tests.py","file_ext":"py","file_size_in_byte":8157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"345817499","text":"#!usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n# Coded by mrb-in and MikhailKhromov\r\n\r\nimport os, sys, time, mechanize, random, requests, re\r\nfrom bs4 import BeautifulSoup as BS\r\n\r\n\r\nclass Gratis:\r\n def __init__(self):\r\n self.url = 'https://www.sms-gratis.xyz'\r\n self.detekos()\r\n\r\n # clears screen\r\n def detekos(self):\r\n if os.name == ['nt', 'win32']:\r\n os.system('cls')\r\n else:\r\n os.system('clear')\r\n self.banner()\r\n\r\n # prints hello message\r\n def banner(self):\r\n html = requests.get('https://www.sms-gratis.xyz').text\r\n tes = re.findall(r'
(.*?)', html)\r\n tess = str(tes).replace('', '').replace('', '').replace('',\r\n '').replace('

\\\\t', '')\r\n print(\"\"\"\r\n\t\t;;;;;;;;;;;;;;;;;;;\r\n\t\t; Sms Gratis ;\r\n\t\t; By: mrb12in ;\r\n\t\t;;;;;;;;;;;;;;;;;;;\r\n\t\t\"\"\")\r\n print(\"[!] Status Server: \" + tess)\r\n self.no = input('[?] Number Target: ')\r\n self.msg = input('[Note] From 5 to 100 messages\\n[?] Pesan: ')\r\n jum = int(input('[?] Amount of messages: '))\r\n print()\r\n for i in range(jum):\r\n self.send()\r\n time.sleep(5)\r\n\r\n def send(self):\r\n br = mechanize.Browser()\r\n br.set_handle_equiv(True)\r\n br.set_handle_gzip(True)\r\n br.set_handle_redirect(True)\r\n br.set_handle_referer(True)\r\n br.set_handle_robots(False)\r\n br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)\r\n br.addheaders = [(\"User-Agent\", random.choice(ua))]\r\n\r\n def add(x, y):\r\n return x + y\r\n\r\n def subtract(x, y):\r\n return x - y\r\n\r\n def multiply(x, y):\r\n return x * y\r\n\r\n def divide(x, y):\r\n return x / y\r\n\r\n o = []\r\n bs = BS(br.open(self.url), features=\"html.parser\")\r\n for x in bs.find_all(\"b\"):\r\n o.append(x.text)\r\n ja = o[1].split(' ')\r\n a = int(ja[0])\r\n x = ja[1]\r\n b = int(ja[2])\r\n\r\n\t\t# феликс блять что это за хуйня? капча?\r\n\r\n if '+' in x:\r\n jawab = add(a, b)\r\n elif '-' in x:\r\n jawab = subtract(a, b)\r\n elif 'x' in x:\r\n jawab = multiply(a, b)\r\n elif '/' in x:\r\n jawab = divide(a, b)\r\n br.select_form(nr=0)\r\n br.form['nomor'] = self.no\r\n br.form['pesan'] = self.msg\r\n br.form['jawaban'] = str(jawab)\r\n br.submit()\r\n br._factory.is_html = True\r\n br.select_form(nr=0)\r\n sub = br.submit().read()\r\n # print(sub)\r\n if 'SMS Berhasil Dikirim' in str(sub):\r\n print('[+] SMS sent')\r\n elif 'Limit Telah Tercapai' in str(sub):\r\n print('[!] Temoporary message limit')\r\n else:\r\n print('[-] This thing doesn\\'t work')\r\n\r\n\r\ntry:\r\n Gratis()\r\nexcept KeyboardInterrupt:\r\n print('[Exit] Key interrupt')\r\nexcept Exception as F:\r\n print('Err: %s' % (F))\r\n","sub_path":"src/gratis.py","file_name":"gratis.py","file_ext":"py","file_size_in_byte":3149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"639891280","text":"# Christine Eunji Kim\n# COSI 101a - HW4\n\nfrom scipy import misc\nimport numpy as np\nimport argparse\nimport sys\nimport input_data\nimport os\nimport math\nimport tensorflow as tf\n\t\ndef weight_var(shape):\n\tinit = tf.truncated_normal(shape, stddev=0.1)\n\treturn tf.Variable(init)\n\t\ndef bias_var(shape):\n\tinit = tf.constant(0.1, shape=shape)\n\treturn tf.Variable(init)\n\t\ndef conv2d(x, weights):\n\treturn tf.nn.conv2d(x, weights, strides=[1,1,1,1], padding='SAME')\n\t\ndef max_pool(x):\n\treturn tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')\n\ndef main(arg):\n\t# get folder path of test images\n\ttest_folder = str(arg[1])\n\t\n\t# setup placeholders\n\tx = tf.placeholder(tf.float32, [None, 784])\n\tx_image = tf.reshape(x, [-1,28,28,1])\n\ty_ = tf.placeholder(tf.float32, [None, 10])\n\t\n\t# convolutional layer 1\n\tw1 = weight_var([3,3,1,32]) # patch size 3x3\n\tb1 = bias_var([32])\n\thc1 = tf.nn.relu(conv2d(x_image, w1) + b1)\n\t\n\t# convolutional layer 2\n\tw2 = weight_var([3,3,32,32]) # patch size 3x3\n\tb2 = bias_var([32])\n\thc2 = tf.nn.relu(conv2d(hc1, w2) + b2)\n\t\n\thp1 = max_pool(hc2) # image size to 14x14\n\t\n\t# convolutional layer 3\n\tw3 = weight_var([3,3,32,64])\n\tb3 = bias_var([64])\n\thc3 = tf.nn.relu(conv2d(hp1, w3) + b3)\n\t\n\t# convolutional layer 4\n\tw4 = weight_var([3,3,64,64])\n\tb4 = bias_var([64])\n\thc4 = tf.nn.relu(conv2d(hc3, w4) + b4)\n\t\n\thp2 = max_pool(hc4) # image size to 7x7\n\t\n\t# densely connected layer\n\t# (fully connected with 1024 neurons)\n\twfc1 = weight_var([7*7*64,1024])\n\tbfc1 = bias_var([1024])\n\thp2_flat = tf.reshape(hp2,[-1,7*7*64])\n\thfc1 = tf.nn.relu(tf.matmul(hp2_flat, wfc1) + bfc1)\n\t\n\t# dropout\n\tkeep = tf.placeholder(tf.float32)\n\thfc1_drop = tf.nn.dropout(hfc1,keep)\n\t\n\t# readout layer\n\twfc2 = weight_var([1024,10])\n\tbfc2 = bias_var([10])\n\tyc = tf.matmul(hfc1_drop,wfc2) + bfc2\n\t\n\t# minimize cross entropy using adam optimizer\n\tcross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=yc))\n\ttrain = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n\t\n\t# launch model\n\tsaver = tf.train.Saver()\n\t# with tf.Session() as sess:\n\t\t# sess.run(tf.global_variables_initializer())\n\t\t# # import training data, using Tensorflow's mnist.py input_data\n\t\t# data = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)\n\t\t# # train\n\t\t# correct_prediction = tf.equal(tf.argmax(yc,1), tf.argmax(y_,1))\n\t\t# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\t\t# for i in range(20000):\n\t\t\t# batch = data.train.next_batch(50)\n\t\t\t# if i%100 == 0:\n\t\t\t\t# train_accuracy = accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep: 1.0})\n\t\t\t\t# print(\"step %d, training accuracy %g\"%(i, train_accuracy))\n\t\t\t# train.run(feed_dict={x: batch[0], y_: batch[1], keep: 0.5})\n\t\t# save_path = saver.save(sess, \"/tmp/cosi101a/model.ckpt\")\n\t\t# print(\"Model saved in file: %s\" % save_path)\n\t\n\twith tf.Session() as sess:\n\t\tsess.run(tf.global_variables_initializer())\n\t\tsaver.restore(sess, \"/tmp/cosi101a/model.ckpt\")\n\t\tprint(\"Model restored.\")\n\t\t# test\t\n\t\toutput_file = open(\"output.txt\", \"w\")\n\t\tfor image_file in os.listdir(test_folder):\t\n\t\t\tif image_file.endswith(\".png\"):\n\t\t\t\timage = misc.imread(test_folder+\"/\"+image_file, flatten=True)\n\t\t\t\timage = misc.imresize(image, (28,28))\n\t\t\t\timage = np.reshape(image, [-1, 784])\n\t\t\t\tpred = sess.run(tf.argmax(yc, 1), feed_dict={x: image, keep: 1.0})\n\t\t\t\toutput_file.write(image_file)\n\t\t\t\toutput_file.write(\"\\t\")\n\t\t\t\toutput_file.write(str(pred[0]))\n\t\t\t\toutput_file.write(\"\\n\")\n\t\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--data_dir', type=str, default='/tmp/cosi101a/input_data', help='Directory for storing input data')\n\tFLAGS, unparsed = parser.parse_known_args()\n\ttf.app.run(main=main, argv=sys.argv)","sub_path":"hw/Christine HW/digit_recognition.py","file_name":"digit_recognition.py","file_ext":"py","file_size_in_byte":3719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"427122763","text":"import datetime\nimport math\nimport os\n\nfrom PIL import Image, ImageDraw, ImageFont\n\nimport sudokugen.gen as gen\n\n\ndef draw(*, board: gen.SudokuBoard, filename: str=None, fontname: str=None) -> None:\n \"\"\"draw board to PNG image.\"\"\"\n HERE = os.path.abspath(os.path.dirname(__file__))\n if filename is None:\n now = datetime.datetime.now().astimezone()\n date_fmt = now.strftime(\"%Y-%m-%dT%H:%M:%SZ%z\")\n filename = os.path.join(HERE, f\"sudoku.png\")\n\n FONT_SIZE = 40\n if fontname is not None:\n fnt = ImageFont.truetype(fontname, FONT_SIZE)\n else:\n fnt = ImageFont.truetype(\n os.path.join(HERE, \"FreeMono.ttf\"),\n FONT_SIZE,\n )\n\n text_block = str(board)\n text_rows = text_block.split(\"\\n\")\n\n # in characters, pixels and the font don't come into it yet.\n text_height = len(text_rows)\n\n # use longest row for width\n longest = \"\"\n longest_size = 0\n for row in text_rows:\n if longest == \"\" or longest_size < len(row):\n longest = row\n longest_size = len(row)\n\n text_width = len(longest)\n\n # https://stackoverflow.com/a/46220683\n ascent, descent = fnt.getmetrics()\n (width, baseline), (offset_x, offset_y) = fnt.font.getsize(longest)\n\n # and then we can get an average width of a character (and height) and pretend that's\n # a sensical way to use monospaced tricks in a proportionally-spaced font.\n char_height = ascent + descent\n char_width = math.floor(width / text_width)\n\n # pad by some number of characters on all sides.\n w_pad = 2\n h_pad = 1\n\n full_width = math.floor((char_width * (text_width + (w_pad * 2))))\n full_height = math.floor(char_height * (text_height + (h_pad * 5)))\n\n BG_COLOR = (250, 250, 250, 254)\n im = Image.new(\n \"RGB\",\n (full_width, full_height),\n BG_COLOR,\n )\n\n dr = ImageDraw.Draw(im)\n\n TEXT_COLOR = (0, 0, 0, 254)\n HOLE_COLOR = (0, 0, 0, 0)\n PERF_COLOR = (200, 200, 200, 0)\n GREEN_ROW_COLOR = (173, 235, 228, 255)\n\n DIAM = char_height // 2\n\n # draw out fake dot-matrix stuff.\n y_offset = 0\n while y_offset < full_height:\n # green/white rectangle backgrounds of characters.\n # two rectangles per row of characters.\n dr.rectangle(\n (\n 2*(DIAM//2) + DIAM,\n y_offset,\n full_width - (2*(DIAM//2) + DIAM),\n y_offset + (char_height//2),\n ),\n fill=GREEN_ROW_COLOR,\n )\n\n dr.rectangle(\n (\n 2*(DIAM//2) + DIAM,\n y_offset + (char_height//2),\n full_width - (2*(DIAM//2) + DIAM),\n y_offset + char_height,\n ),\n fill=BG_COLOR,\n )\n\n # perforations in the \"paper\", for tearing the feed holes off.\n for y_pos in range(y_offset, y_offset + char_height, 4):\n dr.line(\n (\n full_width - (2*(DIAM//2) + DIAM),\n y_pos,\n full_width - (2*(DIAM//2) + DIAM),\n y_pos + 2,\n ),\n fill=PERF_COLOR,\n )\n dr.line(\n (\n 2*(DIAM//2) + DIAM,\n y_pos,\n 2*(DIAM//2) + DIAM,\n y_pos + 2,\n ),\n fill=PERF_COLOR,\n )\n\n # feed holes in \"paper\".\n x_offset = DIAM // 2\n dr.ellipse(\n (\n x_offset,\n y_offset + (DIAM//2),\n x_offset + DIAM,\n y_offset + (DIAM//2) + DIAM,\n ),\n fill=HOLE_COLOR,\n )\n\n x_offset = full_width - 3*(DIAM//2)\n dr.ellipse(\n (\n x_offset,\n y_offset + (DIAM//2),\n x_offset + DIAM,\n y_offset + (DIAM//2) + DIAM,\n ),\n fill=HOLE_COLOR,\n )\n\n y_offset += char_height\n\n # draw out text.\n x_offset = math.floor(w_pad * char_width)\n y_offset = math.floor(h_pad * char_height)\n for r, row in enumerate(text_rows):\n dr.text((x_offset, y_offset), row, font=fnt, fill=TEXT_COLOR)\n y_offset += char_height\n\n im.save(filename, \"PNG\")\n","sub_path":"sudokugen/imagegen.py","file_name":"imagegen.py","file_ext":"py","file_size_in_byte":4328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"5541899","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Collection of stats, used on items, buffs, creatures etc.\"\"\"\n\n__authors__ = [\"Ole Herman Schumacher Elgesem\"]\n__copyright__ = \"Ole Herman Schumacher Elgesem\"\n\nimport random\n\nclass Stats:\n def reset(self):\n \"\"\"Sets all stats to 0, identical to __init__\"\"\"\n self.maxhp = 0\n self.maxmp = 0\n self.strength = 0\n self.will = 0\n self.defense = 0\n self.speed = 0\n\n def randomize(self, start=1, stop=100):\n hp = self.maxhp\n mp = self.maxmp\n keys = []\n for key in self.__dict__:\n keys.append(key)\n for key in sorted(keys):\n self.__dict__[key] = random.randint(start, stop)\n\n def __init__(self, copy_from=None):\n \"\"\"See reset()\"\"\"\n self.reset()\n if copy_from:\n copy_from.copy_to(self)\n\n def set_level(self, level):\n self.maxhp = level + 10\n self.maxmp = level + 10\n self.strength = level\n self.will = level\n self.defense = level\n self.speed = level\n\n def set_dict(self, dictionary):\n for name, value in dictionary.items():\n try:\n assert name in self.__dict__\n except AssertionError:\n raise KeyError\n try:\n assert type(value) is int\n except AssertionError:\n raise TypeError\n self.__dict__[name] = value\n\n def set(self, **kwargs):\n self.set_dict(kwargs)\n\n def get(self, key):\n return self.__dict__[key]\n\n def copy_to(self, target):\n target.set_dict(self.__dict__)\n\n def copy(self):\n c = Stats()\n self.copy_to(c)\n return c\n","sub_path":"mrpg/core/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"86920789","text":"import unittest\nimport requests\nimport json\n\nclass TestUsersApi(unittest.TestCase):\n\n API_URL = \"http://apiflask-env.uuhyrnua83.us-east-2.elasticbeanstalk.com\"\n\n def test_get_all_users(self):\n \n response = requests.get(self.API_URL+'/users',\n headers={'Accept': 'application/json'})\n\n self.assertEqual(response.status_code, 200, \"Should be 200\")\n \n\n def test_path_not_found(self):\n response = requests.get(self.API_URL+'/user',\n headers={'Accept': 'application/json'})\n\n self.assertEqual(response.status_code, 404, \"Should be 404\")\n \n\n def test_add_new_user(self):\n\n response = requests.post(self.API_URL+'/users/JesusPaz',\n headers={'Accept': 'application/json'})\n\n self.assertEqual(response.status_code, 200, \"Should be 200\")\n\n data = json.loads(response.content)\n aux = json.loads(data)\n userId = aux[\"id\"]\n numRepos = aux[\"numRepos\"]\n\n # Used to delete the user and can try the test again\n response = requests.delete(self.API_URL+'/users/JesusPaz',\n headers={'Accept': 'application/json'})\n\n # test deleted ok\n self.assertEqual(response.status_code, 200, \"Should be 200\")\n\n self.assertTrue(userId != \"\" and userId != None, \"Can not be empty or None\")\n # Commented because i create many repos all time\n # Will be ok\n # self.assertEqual(numRepos, 22, \"Number of repos should be 22\")\n \n def test_add_new_user_empty_name(self):\n\n response = requests.post(self.API_URL+'/users/ ',\n headers={'Accept': 'application/json'})\n \n self.assertEqual(response.status_code, 400, \"Should be 400\")\n\n def test_add_new_user_exists_in_database(self):\n\n response = requests.post(self.API_URL+'/users/danielq97',\n headers={'Accept': 'application/json'})\n \n self.assertEqual(response.status_code, 400, \"Should be 400\")\n\n def test_add_new_user_dont_exists_in_github(self):\n\n response = requests.post(self.API_URL+'/users/atdgps85632s',\n headers={'Accept': 'application/json'})\n \n self.assertEqual(response.status_code, 400, \"Should be 400\")\n \n def test_delete_user_dont_exist(self):\n\n response = requests.delete(self.API_URL+'/users/atdgps85632s',\n headers={'Accept': 'application/json'})\n \n self.assertEqual(response.status_code, 400, \"Should be 400\")\n\n def test_delete_user_empty_name(self):\n\n response = requests.delete(self.API_URL+'/users/ ',\n headers={'Accept': 'application/json'})\n \n self.assertEqual(response.status_code, 400, \"Should be 400\")\n\n \n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"303075044","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Nov 29 11:58:14 2019\r\n\r\n@author: BSDU ADMIN\r\n\"\"\"\r\n\r\n\r\n########################## SET A\r\n\r\n# Question 1\r\n\r\ndef factorial():\r\n global e1\r\n fact=1\r\n num=int(str(e1.get()))\r\n for i in range(num,1,-1):\r\n fact=i*fact\r\n Button(root,width=30, text = (\"Factorial of the number is -\",fact), bd = '5', bg=\"black\", fg=\"white\").grid(row=4,column=1)\r\n \r\n\r\n\r\n\r\nfrom tkinter import *\r\n\r\nroot=Tk()\r\n\r\nroot.geometry(\"500x100+600+200\")\r\n\r\n\r\nLabel(root, text='Enter the number for factorial').grid(row=0) \r\ne1 = Entry(root) \r\ne1.grid(row=0, column=1) \r\nb1=Button(root, text = 'Find', bd = '5',command=factorial, bg=\"blue\", fg=\"white\")\r\nb1.grid(row=2,column=1)\r\nroot.mainloop()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n####### Question 2\r\n\r\nstack=['ash','ank','luv']\r\n\r\nprint(stack)\r\n\r\nstack.pop()\r\n\r\nprint(stack)\r\n\r\nstack.append(\"ankit\")\r\n\r\nprint(stack)\r\n\r\n\r\n\r\n##### question -3\r\n\r\n\r\nfrom flask import Flask\r\n\r\napp=Flask(__name__)\r\n\r\n@app.route(\"/ashwani\")\r\n\r\ndef BSDU():\r\n return \"

BSDU


Welcome to Bhartiya Skill Development University,Jaipur\"\r\n\r\nif __name__==\"__main__\":\r\n app.run()\r\n \r\n \r\n \r\n ########### question 4\r\n \r\n \r\ndef hanoi(ndisks, startPeg=1, endPeg=3):\r\n\r\n if ndisks:\r\n\r\n hanoi(ndisks-1, startPeg, 6-startPeg-endPeg)\r\n\r\n print(\"Move disk %d from peg %d to peg %d\" % (ndisks, startPeg, endPeg))\r\n\r\n hanoi(ndisks-1, 6-startPeg-endPeg, endPeg)\r\n\r\n \r\nnum=int(input(\"Enter the number of disks :\"))\r\nhanoi(ndisks=num)\r\n\r\n","sub_path":"Ashwani_backup/AI/Assignment/Set1/Set-A.py","file_name":"Set-A.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"365433219","text":"import os\n\nD = {}\ninFile = open('/mnt/larsix/projects/NMD/hansun/Data/Ensembl/Homo_sapiens.GRCh38.88.Exons')\nfor line in inFile:\n line = line.strip()\n fields = line.split('\\t')\n ch = fields[1]\n start = fields[2]\n end = fields[3]\n D[ch + ':' + start] = fields[4] + '\\t' + fields[5]\n D[ch + ':' + end] = fields[4] + '\\t' + fields[5]\ninFile.close()\n\nG = {}\ninFile = open('/mnt/larsix/projects/NMD/hansun/Data/Ensembl/Homo_sapiens.GRCh38.88.GeneRegion')\nfor line in inFile:\n line = line.strip()\n fields = line.split('\\t')\n gene = fields[0] + '\\t' + fields[1]\n ch = 'chr' + fields[2]\n start = int(fields[3])\n end = int(fields[4])\n strand = fields[5]\n G[gene] = [ch, start, end, strand]\ninFile.close()\n\n\ndef DiffGenes(inF):\n inFile = open(inF)\n ouFile = open(inF + '_DifferentGenes', 'w')\n for line in inFile:\n line = line.strip()\n fields = line.split('\\t')\n ch = fields[12]\n start = ch + ':' + fields[13]\n end = ch + ':' + fields[14]\n if start in D and end in D:\n if D[start] != D[end]:\n g1 = G[D[start]]\n g2 = G[D[end]]\n if g1[0] == g2[0] and g1[3] == g2[3]:\n if g1[2] < g2[1] or g1[1] > g2[2]:\n ouFile.write(line + '\\t' + D[start] + '\\t' + D[end] + '\\n')\n inFile.close()\n ouFile.close()\n\nFs = os.listdir('.')\nfor F in Fs: \n if F[-13:] == '.KnownJuncs10':\n DiffGenes(F)\n","sub_path":"Readthrough/KnownJuncsAnchor10/02-DiffGenes.py","file_name":"02-DiffGenes.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"145193018","text":"import requests\nfrom fastapi import FastAPI\nimport asyncio\nimport time\nimport sqlite3\nfrom typing import List, Tuple, Dict\n\napp = FastAPI()\nconn = sqlite3.connect('orders.db')\n\n\ndef get_region_id(region: str) -> int:\n url = 'https://m.avito.ru/api/1/slocations?' \\\n 'key=af0deccbgcgidddjgnvljitntccdduijhdinfgjgfjir' \\\n '&locationId=621540&limit=10&q={}'.format(region)\n response = requests.get(url)\n id_region = response.json()['result']['locations'][1]['id']\n return id_region\n\n\nasync def get_count(key_: int, search: str, id_reg: int) -> None:\n while True:\n url = 'https://m.avito.ru/api/10/items?' \\\n 'key=af0deccbgcgidddjgnvljitntccdduijhdinfgjgfjir&' \\\n 'query={}&locationId={}'.format(search, id_reg)\n response = requests.get(url)\n count = response.json()['result']['count']\n print(search, \"+\", id_reg, '=', count)\n timestamp = int(time.time())\n info = (key_, count, timestamp, search, id_reg)\n cur = conn.cursor()\n cur.execute(\"INSERT INTO keys(key, count, timestamp, search_fraze, region) VALUES(?, ?, ?, ?, ?);\", info)\n conn.commit()\n await asyncio.sleep(60)\n\n\n@app.get(\"/add\")\nasync def root(search: str, region: str) -> Dict[str, int]:\n cur = conn.cursor()\n cur.execute(\"SELECT MAX(key) FROM keys;\")\n key = cur.fetchone()[0] + 1\n id_reg: int\n id_reg = get_region_id(region)\n asyncio.create_task(get_count(key, search, id_reg))\n return {'id связки (поисковая фраза + регион)': key}\n\n\n@app.get(\"/stat\")\nasync def root(pair_id: int, t1: int, t2: int) -> Dict[str, List[Tuple[int, int]]]:\n cur = conn.cursor()\n sql_select_query = \"\"\"select count, timestamp from keys where key = ? and timestamp > ? and timestamp < ?\"\"\"\n cur.execute(sql_select_query, (pair_id, t1, t2))\n records = cur.fetchall()\n return {'счётчики и соответствующие им временные метки': records}\n","sub_path":"avito.py","file_name":"avito.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"357797576","text":"# /users/mrcarver/PFHT800Studies/V1 (CMSSW_7_4_0_pre7_HLT3)\n\nimport FWCore.ParameterSet.Config as cms\n\n\nHLTConfigVersion = cms.PSet(\n tableName = cms.string('/users/mrcarver/PFHT800Studies/V1')\n)\n\nhltTriggerType = cms.EDFilter( \"HLTTriggerTypeFilter\",\n SelectedTriggerType = cms.int32( 1 )\n)\nhltGtDigis = cms.EDProducer( \"L1GlobalTriggerRawToDigi\",\n DaqGtFedId = cms.untracked.int32( 813 ),\n Verbosity = cms.untracked.int32( 0 ),\n UnpackBxInEvent = cms.int32( 5 ),\n ActiveBoardsMask = cms.uint32( 0xffff ),\n DaqGtInputTag = cms.InputTag( \"rawDataCollector\" )\n)\nhltCaloStage1Digis = cms.EDProducer( \"L1TRawToDigi\",\n lenSlinkTrailer = cms.untracked.int32( 8 ),\n lenAMC13Header = cms.untracked.int32( 8 ),\n lenAMC13Trailer = cms.untracked.int32( 8 ),\n Setup = cms.string( \"stage1::CaloSetup\" ),\n InputLabel = cms.InputTag( \"rawDataCollector\" ),\n lenSlinkHeader = cms.untracked.int32( 8 ),\n FWId = cms.untracked.int32( 2 ),\n lenAMCHeader = cms.untracked.int32( 8 ),\n lenAMCTrailer = cms.untracked.int32( 0 ),\n FedId = cms.int32( 1352 )\n)\nhltCaloStage1LegacyFormatDigis = cms.EDProducer( \"L1TCaloUpgradeToGCTConverter\",\n InputHFCountsCollection = cms.InputTag( 'hltCaloStage1Digis','HFBitCounts' ),\n InputHFSumsCollection = cms.InputTag( 'hltCaloStage1Digis','HFRingSums' ),\n InputRlxTauCollection = cms.InputTag( 'hltCaloStage1Digis','rlxTaus' ),\n InputIsoTauCollection = cms.InputTag( 'hltCaloStage1Digis','isoTaus' ),\n InputCollection = cms.InputTag( \"hltCaloStage1Digis\" )\n)\nhltL1GtObjectMap = cms.EDProducer( \"L1GlobalTrigger\",\n TechnicalTriggersUnprescaled = cms.bool( True ),\n ProduceL1GtObjectMapRecord = cms.bool( True ),\n AlgorithmTriggersUnmasked = cms.bool( False ),\n EmulateBxInEvent = cms.int32( 1 ),\n AlgorithmTriggersUnprescaled = cms.bool( True ),\n ProduceL1GtDaqRecord = cms.bool( False ),\n ReadTechnicalTriggerRecords = cms.bool( True ),\n RecordLength = cms.vint32( 3, 0 ),\n TechnicalTriggersUnmasked = cms.bool( False ),\n ProduceL1GtEvmRecord = cms.bool( False ),\n GmtInputTag = cms.InputTag( \"hltGtDigis\" ),\n TechnicalTriggersVetoUnmasked = cms.bool( True ),\n AlternativeNrBxBoardEvm = cms.uint32( 0 ),\n TechnicalTriggersInputTags = cms.VInputTag( 'simBscDigis' ),\n CastorInputTag = cms.InputTag( \"castorL1Digis\" ),\n GctInputTag = cms.InputTag( \"hltCaloStage1LegacyFormatDigis\" ),\n AlternativeNrBxBoardDaq = cms.uint32( 0 ),\n WritePsbL1GtDaqRecord = cms.bool( False ),\n BstLengthBytes = cms.int32( -1 )\n)\nhltL1extraParticles = cms.EDProducer( \"L1ExtraParticlesProd\",\n tauJetSource = cms.InputTag( 'hltCaloStage1LegacyFormatDigis','tauJets' ),\n etHadSource = cms.InputTag( \"hltCaloStage1LegacyFormatDigis\" ),\n isoTauJetSource = cms.InputTag( 'hltCaloStage1LegacyFormatDigis','isoTauJets' ),\n etTotalSource = cms.InputTag( \"hltCaloStage1LegacyFormatDigis\" ),\n centralBxOnly = cms.bool( True ),\n centralJetSource = cms.InputTag( 'hltCaloStage1LegacyFormatDigis','cenJets' ),\n etMissSource = cms.InputTag( \"hltCaloStage1LegacyFormatDigis\" ),\n hfRingEtSumsSource = cms.InputTag( \"hltCaloStage1LegacyFormatDigis\" ),\n produceMuonParticles = cms.bool( True ),\n forwardJetSource = cms.InputTag( 'hltCaloStage1LegacyFormatDigis','forJets' ),\n ignoreHtMiss = cms.bool( False ),\n htMissSource = cms.InputTag( \"hltCaloStage1LegacyFormatDigis\" ),\n produceCaloParticles = cms.bool( True ),\n muonSource = cms.InputTag( \"hltGtDigis\" ),\n isolatedEmSource = cms.InputTag( 'hltCaloStage1LegacyFormatDigis','isoEm' ),\n nonIsolatedEmSource = cms.InputTag( 'hltCaloStage1LegacyFormatDigis','nonIsoEm' ),\n hfRingBitCountsSource = cms.InputTag( \"hltCaloStage1LegacyFormatDigis\" )\n)\nhltScalersRawToDigi = cms.EDProducer( \"ScalersRawToDigi\",\n scalersInputTag = cms.InputTag( \"rawDataCollector\" )\n)\nhltOnlineBeamSpot = cms.EDProducer( \"BeamSpotOnlineProducer\",\n maxZ = cms.double( 40.0 ),\n src = cms.InputTag( \"hltScalersRawToDigi\" ),\n gtEvmLabel = cms.InputTag( \"\" ),\n changeToCMSCoordinates = cms.bool( False ),\n setSigmaZ = cms.double( 0.0 ),\n maxRadius = cms.double( 2.0 )\n)\nhltL1sL1HTT150ORHTT175 = cms.EDFilter( \"HLTLevel1GTSeed\",\n L1SeedsLogicalExpression = cms.string( \"L1_HTT150 OR L1_HTT175\" ),\n saveTags = cms.bool( True ),\n L1MuonCollectionTag = cms.InputTag( \"hltL1extraParticles\" ),\n L1UseL1TriggerObjectMaps = cms.bool( True ),\n L1UseAliasesForSeeding = cms.bool( True ),\n L1GtReadoutRecordTag = cms.InputTag( \"hltGtDigis\" ),\n L1CollectionsTag = cms.InputTag( \"hltL1extraParticles\" ),\n L1NrBxInEvent = cms.int32( 3 ),\n L1GtObjectMapTag = cms.InputTag( \"hltL1GtObjectMap\" ),\n L1TechTriggerSeeding = cms.bool( False )\n)\nhltPrePFHT800 = cms.EDFilter( \"HLTPrescaler\",\n L1GtReadoutRecordTag = cms.InputTag( \"hltGtDigis\" ),\n offset = cms.uint32( 0 )\n)\nhltEcalDigis = cms.EDProducer( \"EcalRawToDigi\",\n orderedDCCIdList = cms.vint32( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54 ),\n FedLabel = cms.InputTag( \"listfeds\" ),\n eventPut = cms.bool( True ),\n srpUnpacking = cms.bool( True ),\n syncCheck = cms.bool( True ),\n headerUnpacking = cms.bool( True ),\n feUnpacking = cms.bool( True ),\n orderedFedList = cms.vint32( 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654 ),\n tccUnpacking = cms.bool( True ),\n numbTriggerTSamples = cms.int32( 1 ),\n InputLabel = cms.InputTag( \"rawDataCollector\" ),\n numbXtalTSamples = cms.int32( 10 ),\n feIdCheck = cms.bool( True ),\n FEDs = cms.vint32( 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654 ),\n silentMode = cms.untracked.bool( True ),\n DoRegional = cms.bool( False ),\n forceToKeepFRData = cms.bool( False ),\n memUnpacking = cms.bool( True )\n)\nhltEcalUncalibRecHit = cms.EDProducer( \"EcalUncalibRecHitProducer\",\n EEdigiCollection = cms.InputTag( 'hltEcalDigis','eeDigis' ),\n EBdigiCollection = cms.InputTag( 'hltEcalDigis','ebDigis' ),\n EEhitCollection = cms.string( \"EcalUncalibRecHitsEE\" ),\n EBhitCollection = cms.string( \"EcalUncalibRecHitsEB\" ),\n algo = cms.string( \"EcalUncalibRecHitWorkerMultiFit\" ),\n algoPSet = cms.PSet( \n outOfTimeThresholdGain61pEB = cms.double( 5.0 ),\n EBtimeFitParameters = cms.vdouble( -2.015452, 3.130702, -12.3473, 41.88921, -82.83944, 91.01147, -50.35761, 11.05621 ),\n activeBXs = cms.vint32( -5, -4, -3, -2, -1, 0, 1, 2, 3, 4 ),\n amplitudeThresholdEE = cms.double( 10.0 ),\n EBtimeConstantTerm = cms.double( 0.6 ),\n EEtimeFitLimits_Lower = cms.double( 0.2 ),\n outOfTimeThresholdGain61pEE = cms.double( 1000.0 ),\n ebSpikeThreshold = cms.double( 1.042 ),\n EBtimeNconst = cms.double( 28.5 ),\n ampErrorCalculation = cms.bool( False ),\n kPoorRecoFlagEB = cms.bool( True ),\n EBtimeFitLimits_Lower = cms.double( 0.2 ),\n kPoorRecoFlagEE = cms.bool( False ),\n chi2ThreshEB_ = cms.double( 65.0 ),\n EEtimeFitParameters = cms.vdouble( -2.390548, 3.553628, -17.62341, 67.67538, -133.213, 140.7432, -75.41106, 16.20277 ),\n useLumiInfoRunHeader = cms.bool( False ),\n outOfTimeThresholdGain12mEE = cms.double( 1000.0 ),\n outOfTimeThresholdGain12mEB = cms.double( 5.0 ),\n EEtimeFitLimits_Upper = cms.double( 1.4 ),\n prefitMaxChiSqEB = cms.double( 100.0 ),\n EEamplitudeFitParameters = cms.vdouble( 1.89, 1.4 ),\n prefitMaxChiSqEE = cms.double( 10.0 ),\n EBamplitudeFitParameters = cms.vdouble( 1.138, 1.652 ),\n EBtimeFitLimits_Upper = cms.double( 1.4 ),\n timealgo = cms.string( \"None\" ),\n amplitudeThresholdEB = cms.double( 10.0 ),\n outOfTimeThresholdGain12pEE = cms.double( 1000.0 ),\n outOfTimeThresholdGain12pEB = cms.double( 5.0 ),\n EEtimeNconst = cms.double( 31.8 ),\n outOfTimeThresholdGain61mEB = cms.double( 5.0 ),\n outOfTimeThresholdGain61mEE = cms.double( 1000.0 ),\n EEtimeConstantTerm = cms.double( 1.0 ),\n chi2ThreshEE_ = cms.double( 50.0 ),\n doPrefitEE = cms.bool( True ),\n doPrefitEB = cms.bool( True )\n )\n)\nhltEcalDetIdToBeRecovered = cms.EDProducer( \"EcalDetIdToBeRecoveredProducer\",\n ebIntegrityChIdErrors = cms.InputTag( 'hltEcalDigis','EcalIntegrityChIdErrors' ),\n ebDetIdToBeRecovered = cms.string( \"ebDetId\" ),\n integrityTTIdErrors = cms.InputTag( 'hltEcalDigis','EcalIntegrityTTIdErrors' ),\n eeIntegrityGainErrors = cms.InputTag( 'hltEcalDigis','EcalIntegrityGainErrors' ),\n ebFEToBeRecovered = cms.string( \"ebFE\" ),\n ebIntegrityGainErrors = cms.InputTag( 'hltEcalDigis','EcalIntegrityGainErrors' ),\n eeDetIdToBeRecovered = cms.string( \"eeDetId\" ),\n eeIntegrityGainSwitchErrors = cms.InputTag( 'hltEcalDigis','EcalIntegrityGainSwitchErrors' ),\n eeIntegrityChIdErrors = cms.InputTag( 'hltEcalDigis','EcalIntegrityChIdErrors' ),\n ebIntegrityGainSwitchErrors = cms.InputTag( 'hltEcalDigis','EcalIntegrityGainSwitchErrors' ),\n ebSrFlagCollection = cms.InputTag( \"hltEcalDigis\" ),\n eeSrFlagCollection = cms.InputTag( \"hltEcalDigis\" ),\n integrityBlockSizeErrors = cms.InputTag( 'hltEcalDigis','EcalIntegrityBlockSizeErrors' ),\n eeFEToBeRecovered = cms.string( \"eeFE\" )\n)\nhltEcalRecHit = cms.EDProducer( \"EcalRecHitProducer\",\n recoverEEVFE = cms.bool( False ),\n EErechitCollection = cms.string( \"EcalRecHitsEE\" ),\n recoverEBIsolatedChannels = cms.bool( False ),\n recoverEBVFE = cms.bool( False ),\n laserCorrection = cms.bool( True ),\n EBLaserMIN = cms.double( 0.5 ),\n killDeadChannels = cms.bool( True ),\n dbStatusToBeExcludedEB = cms.vint32( 14, 78, 142 ),\n EEuncalibRecHitCollection = cms.InputTag( 'hltEcalUncalibRecHit','EcalUncalibRecHitsEE' ),\n EBLaserMAX = cms.double( 3.0 ),\n EELaserMIN = cms.double( 0.5 ),\n ebFEToBeRecovered = cms.InputTag( 'hltEcalDetIdToBeRecovered','ebFE' ),\n EELaserMAX = cms.double( 8.0 ),\n recoverEEIsolatedChannels = cms.bool( False ),\n eeDetIdToBeRecovered = cms.InputTag( 'hltEcalDetIdToBeRecovered','eeDetId' ),\n recoverEBFE = cms.bool( True ),\n algo = cms.string( \"EcalRecHitWorkerSimple\" ),\n ebDetIdToBeRecovered = cms.InputTag( 'hltEcalDetIdToBeRecovered','ebDetId' ),\n singleChannelRecoveryThreshold = cms.double( 8.0 ),\n ChannelStatusToBeExcluded = cms.vstring( ),\n EBrechitCollection = cms.string( \"EcalRecHitsEB\" ),\n singleChannelRecoveryMethod = cms.string( \"NeuralNetworks\" ),\n recoverEEFE = cms.bool( True ),\n triggerPrimitiveDigiCollection = cms.InputTag( 'hltEcalDigis','EcalTriggerPrimitives' ),\n dbStatusToBeExcludedEE = cms.vint32( 14, 78, 142 ),\n flagsMapDBReco = cms.PSet( \n kGood = cms.vstring( 'kOk',\n 'kDAC',\n 'kNoLaser',\n 'kNoisy' ),\n kNeighboursRecovered = cms.vstring( 'kFixedG0',\n 'kNonRespondingIsolated',\n 'kDeadVFE' ),\n kDead = cms.vstring( 'kNoDataNoTP' ),\n kNoisy = cms.vstring( 'kNNoisy',\n 'kFixedG6',\n 'kFixedG1' ),\n kTowerRecovered = cms.vstring( 'kDeadFE' )\n ),\n EBuncalibRecHitCollection = cms.InputTag( 'hltEcalUncalibRecHit','EcalUncalibRecHitsEB' ),\n algoRecover = cms.string( \"EcalRecHitWorkerRecover\" ),\n eeFEToBeRecovered = cms.InputTag( 'hltEcalDetIdToBeRecovered','eeFE' ),\n cleaningConfig = cms.PSet( \n e6e2thresh = cms.double( 0.04 ),\n tightenCrack_e6e2_double = cms.double( 3.0 ),\n e4e1Threshold_endcap = cms.double( 0.3 ),\n tightenCrack_e4e1_single = cms.double( 3.0 ),\n tightenCrack_e1_double = cms.double( 2.0 ),\n cThreshold_barrel = cms.double( 4.0 ),\n e4e1Threshold_barrel = cms.double( 0.08 ),\n tightenCrack_e1_single = cms.double( 2.0 ),\n e4e1_b_barrel = cms.double( -0.024 ),\n e4e1_a_barrel = cms.double( 0.04 ),\n ignoreOutOfTimeThresh = cms.double( 1.0E9 ),\n cThreshold_endcap = cms.double( 15.0 ),\n e4e1_b_endcap = cms.double( -0.0125 ),\n e4e1_a_endcap = cms.double( 0.02 ),\n cThreshold_double = cms.double( 10.0 )\n ),\n logWarningEtThreshold_EB_FE = cms.double( 50.0 ),\n logWarningEtThreshold_EE_FE = cms.double( 50.0 )\n)\nhltHcalDigis = cms.EDProducer( \"HcalRawToDigi\",\n ExpectedOrbitMessageTime = cms.untracked.int32( -1 ),\n FilterDataQuality = cms.bool( True ),\n silent = cms.untracked.bool( True ),\n HcalFirstFED = cms.untracked.int32( 700 ),\n InputLabel = cms.InputTag( \"rawDataCollector\" ),\n ComplainEmptyData = cms.untracked.bool( False ),\n UnpackCalib = cms.untracked.bool( True ),\n FEDs = cms.untracked.vint32( ),\n UnpackerMode = cms.untracked.int32( 0 ),\n UnpackTTP = cms.untracked.bool( False ),\n lastSample = cms.int32( 9 ),\n UnpackZDC = cms.untracked.bool( True ),\n firstSample = cms.int32( 0 )\n)\nhltHbhereco = cms.EDProducer( \"HcalHitReconstructor\",\n digiTimeFromDB = cms.bool( True ),\n mcOOTCorrectionName = cms.string( \"\" ),\n S9S1stat = cms.PSet( ),\n saturationParameters = cms.PSet( maxADCvalue = cms.int32( 127 ) ),\n tsFromDB = cms.bool( True ),\n samplesToAdd = cms.int32( 4 ),\n mcOOTCorrectionCategory = cms.string( \"MC\" ),\n dataOOTCorrectionName = cms.string( \"\" ),\n puCorrMethod = cms.int32( 0 ),\n correctionPhaseNS = cms.double( 13.0 ),\n HFInWindowStat = cms.PSet( ),\n digiLabel = cms.InputTag( \"hltHcalDigis\" ),\n setHSCPFlags = cms.bool( False ),\n firstAuxTS = cms.int32( 4 ),\n digistat = cms.PSet( ),\n hfTimingTrustParameters = cms.PSet( ),\n PETstat = cms.PSet( ),\n setSaturationFlags = cms.bool( False ),\n setNegativeFlags = cms.bool( False ),\n useLeakCorrection = cms.bool( False ),\n setTimingTrustFlags = cms.bool( False ),\n S8S1stat = cms.PSet( ),\n correctForPhaseContainment = cms.bool( True ),\n correctForTimeslew = cms.bool( True ),\n setNoiseFlags = cms.bool( False ),\n correctTiming = cms.bool( False ),\n setPulseShapeFlags = cms.bool( False ),\n Subdetector = cms.string( \"HBHE\" ),\n dataOOTCorrectionCategory = cms.string( \"Data\" ),\n dropZSmarkedPassed = cms.bool( True ),\n recoParamsFromDB = cms.bool( True ),\n firstSample = cms.int32( 4 ),\n setTimingShapedCutsFlags = cms.bool( False ),\n pulseJitter = cms.double( 1.0 ),\n chargeMax = cms.double( 6.0 ),\n negativeParameters = cms.PSet( \n TS4TS5ChargeThreshold = cms.double( 70.0 ),\n Cut = cms.vdouble( -50.0, -100.0, -100.0, -100.0, -100.0, -100.0 ),\n Last = cms.int32( 6 ),\n MinimumChargeThreshold = cms.double( 20.0 ),\n Threshold = cms.vdouble( 100.0, 120.0, 160.0, 200.0, 300.0, 500.0 ),\n First = cms.int32( 4 )\n ),\n timeMin = cms.double( -15.0 ),\n ts4chi2 = cms.double( 15.0 ),\n ts345chi2 = cms.double( 100.0 ),\n applyTimeSlew = cms.bool( True ),\n applyTimeConstraint = cms.bool( True ),\n applyPulseJitter = cms.bool( False ),\n timingshapedcutsParameters = cms.PSet( \n ignorelowest = cms.bool( True ),\n win_offset = cms.double( 0.0 ),\n ignorehighest = cms.bool( False ),\n win_gain = cms.double( 1.0 ),\n tfilterEnvelope = cms.vdouble( 4.0, 12.04, 13.0, 10.56, 23.5, 8.82, 37.0, 7.38, 56.0, 6.3, 81.0, 5.64, 114.5, 5.44, 175.5, 5.38, 350.5, 5.14 )\n ),\n ts3chi2 = cms.double( 5.0 ),\n ts4Min = cms.double( 5.0 ),\n pulseShapeParameters = cms.PSet( ),\n noise = cms.double( 1.0 ),\n applyPedConstraint = cms.bool( True ),\n applyUnconstrainedFit = cms.bool( False ),\n ts4Max = cms.double( 500.0 ),\n meanTime = cms.double( -2.5 ),\n flagParameters = cms.PSet( \n nominalPedestal = cms.double( 3.0 ),\n hitMultiplicityThreshold = cms.int32( 17 ),\n hitEnergyMinimum = cms.double( 1.0 ),\n pulseShapeParameterSets = cms.VPSet( \n cms.PSet( pulseShapeParameters = cms.vdouble( 0.0, 100.0, -50.0, 0.0, -15.0, 0.15 ) ),\n cms.PSet( pulseShapeParameters = cms.vdouble( 100.0, 2000.0, -50.0, 0.0, -5.0, 0.05 ) ),\n cms.PSet( pulseShapeParameters = cms.vdouble( 2000.0, 1000000.0, -50.0, 0.0, 95.0, 0.0 ) ),\n cms.PSet( pulseShapeParameters = cms.vdouble( -1000000.0, 1000000.0, 45.0, 0.1, 1000000.0, 0.0 ) )\n )\n ),\n fitTimes = cms.int32( 1 ),\n timeMax = cms.double( 10.0 ),\n timeSigma = cms.double( 5.0 ),\n pedSigma = cms.double( 0.5 ),\n meanPed = cms.double( 0.0 ),\n hscpParameters = cms.PSet( \n slopeMax = cms.double( -0.6 ),\n r1Max = cms.double( 1.0 ),\n r1Min = cms.double( 0.15 ),\n TimingEnergyThreshold = cms.double( 30.0 ),\n slopeMin = cms.double( -1.5 ),\n outerMin = cms.double( 0.0 ),\n outerMax = cms.double( 0.1 ),\n fracLeaderMin = cms.double( 0.4 ),\n r2Min = cms.double( 0.1 ),\n r2Max = cms.double( 0.5 ),\n fracLeaderMax = cms.double( 0.7 )\n )\n)\nhltHfreco = cms.EDProducer( \"HcalHitReconstructor\",\n digiTimeFromDB = cms.bool( True ),\n mcOOTCorrectionName = cms.string( \"\" ),\n S9S1stat = cms.PSet( \n longETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),\n shortEnergyParams = cms.vdouble( 35.1773, 35.37, 35.7933, 36.4472, 37.3317, 38.4468, 39.7925, 41.3688, 43.1757, 45.2132, 47.4813, 49.98, 52.7093 ),\n flagsToSkip = cms.int32( 24 ),\n shortETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),\n short_optimumSlope = cms.vdouble( -99999.0, 0.0164905, 0.0238698, 0.0321383, 0.041296, 0.0513428, 0.0622789, 0.0741041, 0.0868186, 0.100422, 0.135313, 0.136289, 0.0589927 ),\n longEnergyParams = cms.vdouble( 43.5, 45.7, 48.32, 51.36, 54.82, 58.7, 63.0, 67.72, 72.86, 78.42, 84.4, 90.8, 97.62 ),\n long_optimumSlope = cms.vdouble( -99999.0, 0.0164905, 0.0238698, 0.0321383, 0.041296, 0.0513428, 0.0622789, 0.0741041, 0.0868186, 0.100422, 0.135313, 0.136289, 0.0589927 ),\n isS8S1 = cms.bool( False ),\n HcalAcceptSeverityLevel = cms.int32( 9 )\n ),\n saturationParameters = cms.PSet( maxADCvalue = cms.int32( 127 ) ),\n tsFromDB = cms.bool( True ),\n samplesToAdd = cms.int32( 2 ),\n mcOOTCorrectionCategory = cms.string( \"MC\" ),\n dataOOTCorrectionName = cms.string( \"\" ),\n puCorrMethod = cms.int32( 0 ),\n correctionPhaseNS = cms.double( 13.0 ),\n HFInWindowStat = cms.PSet( \n hflongEthresh = cms.double( 40.0 ),\n hflongMinWindowTime = cms.vdouble( -10.0 ),\n hfshortEthresh = cms.double( 40.0 ),\n hflongMaxWindowTime = cms.vdouble( 10.0 ),\n hfshortMaxWindowTime = cms.vdouble( 10.0 ),\n hfshortMinWindowTime = cms.vdouble( -12.0 )\n ),\n digiLabel = cms.InputTag( \"hltHcalDigis\" ),\n setHSCPFlags = cms.bool( False ),\n firstAuxTS = cms.int32( 1 ),\n digistat = cms.PSet( \n HFdigiflagFirstSample = cms.int32( 1 ),\n HFdigiflagMinEthreshold = cms.double( 40.0 ),\n HFdigiflagSamplesToAdd = cms.int32( 3 ),\n HFdigiflagExpectedPeak = cms.int32( 2 ),\n HFdigiflagCoef = cms.vdouble( 0.93, -0.012667, -0.38275 )\n ),\n hfTimingTrustParameters = cms.PSet( \n hfTimingTrustLevel2 = cms.int32( 4 ),\n hfTimingTrustLevel1 = cms.int32( 1 )\n ),\n PETstat = cms.PSet( \n longETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),\n short_R_29 = cms.vdouble( 0.8 ),\n shortEnergyParams = cms.vdouble( 35.1773, 35.37, 35.7933, 36.4472, 37.3317, 38.4468, 39.7925, 41.3688, 43.1757, 45.2132, 47.4813, 49.98, 52.7093 ),\n flagsToSkip = cms.int32( 0 ),\n short_R = cms.vdouble( 0.8 ),\n shortETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),\n long_R_29 = cms.vdouble( 0.8 ),\n longEnergyParams = cms.vdouble( 43.5, 45.7, 48.32, 51.36, 54.82, 58.7, 63.0, 67.72, 72.86, 78.42, 84.4, 90.8, 97.62 ),\n long_R = cms.vdouble( 0.98 ),\n HcalAcceptSeverityLevel = cms.int32( 9 )\n ),\n setSaturationFlags = cms.bool( False ),\n setNegativeFlags = cms.bool( False ),\n useLeakCorrection = cms.bool( False ),\n setTimingTrustFlags = cms.bool( False ),\n S8S1stat = cms.PSet( \n longETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),\n shortEnergyParams = cms.vdouble( 40.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0 ),\n flagsToSkip = cms.int32( 16 ),\n shortETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),\n short_optimumSlope = cms.vdouble( 0.3, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1 ),\n longEnergyParams = cms.vdouble( 40.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0 ),\n long_optimumSlope = cms.vdouble( 0.3, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1 ),\n isS8S1 = cms.bool( True ),\n HcalAcceptSeverityLevel = cms.int32( 9 )\n ),\n correctForPhaseContainment = cms.bool( False ),\n correctForTimeslew = cms.bool( False ),\n setNoiseFlags = cms.bool( True ),\n correctTiming = cms.bool( False ),\n setPulseShapeFlags = cms.bool( False ),\n Subdetector = cms.string( \"HF\" ),\n dataOOTCorrectionCategory = cms.string( \"Data\" ),\n dropZSmarkedPassed = cms.bool( True ),\n recoParamsFromDB = cms.bool( True ),\n firstSample = cms.int32( 2 ),\n setTimingShapedCutsFlags = cms.bool( False ),\n pulseJitter = cms.double( 1.0 ),\n chargeMax = cms.double( 6.0 ),\n negativeParameters = cms.PSet( \n TS4TS5ChargeThreshold = cms.double( 70.0 ),\n Cut = cms.vdouble( -50.0, -100.0, -100.0, -100.0, -100.0, -100.0 ),\n Last = cms.int32( 6 ),\n MinimumChargeThreshold = cms.double( 20.0 ),\n Threshold = cms.vdouble( 100.0, 120.0, 160.0, 200.0, 300.0, 500.0 ),\n First = cms.int32( 4 )\n ),\n timeMin = cms.double( -15.0 ),\n ts4chi2 = cms.double( 15.0 ),\n ts345chi2 = cms.double( 100.0 ),\n applyTimeSlew = cms.bool( True ),\n applyTimeConstraint = cms.bool( True ),\n applyPulseJitter = cms.bool( False ),\n timingshapedcutsParameters = cms.PSet( ),\n ts3chi2 = cms.double( 5.0 ),\n ts4Min = cms.double( 5.0 ),\n pulseShapeParameters = cms.PSet( ),\n noise = cms.double( 1.0 ),\n applyPedConstraint = cms.bool( True ),\n applyUnconstrainedFit = cms.bool( False ),\n ts4Max = cms.double( 500.0 ),\n meanTime = cms.double( -2.5 ),\n flagParameters = cms.PSet( ),\n fitTimes = cms.int32( 1 ),\n timeMax = cms.double( 10.0 ),\n timeSigma = cms.double( 5.0 ),\n pedSigma = cms.double( 0.5 ),\n meanPed = cms.double( 0.0 ),\n hscpParameters = cms.PSet( )\n)\nhltHoreco = cms.EDProducer( \"HcalHitReconstructor\",\n digiTimeFromDB = cms.bool( True ),\n mcOOTCorrectionName = cms.string( \"\" ),\n S9S1stat = cms.PSet( ),\n saturationParameters = cms.PSet( maxADCvalue = cms.int32( 127 ) ),\n tsFromDB = cms.bool( True ),\n samplesToAdd = cms.int32( 4 ),\n mcOOTCorrectionCategory = cms.string( \"MC\" ),\n dataOOTCorrectionName = cms.string( \"\" ),\n puCorrMethod = cms.int32( 0 ),\n correctionPhaseNS = cms.double( 13.0 ),\n HFInWindowStat = cms.PSet( ),\n digiLabel = cms.InputTag( \"hltHcalDigis\" ),\n setHSCPFlags = cms.bool( False ),\n firstAuxTS = cms.int32( 4 ),\n digistat = cms.PSet( ),\n hfTimingTrustParameters = cms.PSet( ),\n PETstat = cms.PSet( ),\n setSaturationFlags = cms.bool( False ),\n setNegativeFlags = cms.bool( False ),\n useLeakCorrection = cms.bool( False ),\n setTimingTrustFlags = cms.bool( False ),\n S8S1stat = cms.PSet( ),\n correctForPhaseContainment = cms.bool( True ),\n correctForTimeslew = cms.bool( True ),\n setNoiseFlags = cms.bool( False ),\n correctTiming = cms.bool( False ),\n setPulseShapeFlags = cms.bool( False ),\n Subdetector = cms.string( \"HO\" ),\n dataOOTCorrectionCategory = cms.string( \"Data\" ),\n dropZSmarkedPassed = cms.bool( True ),\n recoParamsFromDB = cms.bool( True ),\n firstSample = cms.int32( 4 ),\n setTimingShapedCutsFlags = cms.bool( False ),\n pulseJitter = cms.double( 1.0 ),\n chargeMax = cms.double( 6.0 ),\n negativeParameters = cms.PSet( \n TS4TS5ChargeThreshold = cms.double( 70.0 ),\n Cut = cms.vdouble( -50.0, -100.0, -100.0, -100.0, -100.0, -100.0 ),\n Last = cms.int32( 6 ),\n MinimumChargeThreshold = cms.double( 20.0 ),\n Threshold = cms.vdouble( 100.0, 120.0, 160.0, 200.0, 300.0, 500.0 ),\n First = cms.int32( 4 )\n ),\n timeMin = cms.double( -15.0 ),\n ts4chi2 = cms.double( 15.0 ),\n ts345chi2 = cms.double( 100.0 ),\n applyTimeSlew = cms.bool( True ),\n applyTimeConstraint = cms.bool( True ),\n applyPulseJitter = cms.bool( False ),\n timingshapedcutsParameters = cms.PSet( ),\n ts3chi2 = cms.double( 5.0 ),\n ts4Min = cms.double( 5.0 ),\n pulseShapeParameters = cms.PSet( ),\n noise = cms.double( 1.0 ),\n applyPedConstraint = cms.bool( True ),\n applyUnconstrainedFit = cms.bool( False ),\n ts4Max = cms.double( 500.0 ),\n meanTime = cms.double( -2.5 ),\n flagParameters = cms.PSet( ),\n fitTimes = cms.int32( 1 ),\n timeMax = cms.double( 10.0 ),\n timeSigma = cms.double( 5.0 ),\n pedSigma = cms.double( 0.5 ),\n meanPed = cms.double( 0.0 ),\n hscpParameters = cms.PSet( )\n)\nhltTowerMakerForAll = cms.EDProducer( \"CaloTowersCreator\",\n EBSumThreshold = cms.double( 0.2 ),\n MomHBDepth = cms.double( 0.2 ),\n UseEtEBTreshold = cms.bool( False ),\n hfInput = cms.InputTag( \"hltHfreco\" ),\n AllowMissingInputs = cms.bool( False ),\n MomEEDepth = cms.double( 0.0 ),\n EESumThreshold = cms.double( 0.45 ),\n HBGrid = cms.vdouble( ),\n HcalAcceptSeverityLevelForRejectedHit = cms.uint32( 9999 ),\n HBThreshold = cms.double( 0.7 ),\n EcalSeveritiesToBeUsedInBadTowers = cms.vstring( ),\n UseEcalRecoveredHits = cms.bool( False ),\n MomConstrMethod = cms.int32( 1 ),\n MomHEDepth = cms.double( 0.4 ),\n HcalThreshold = cms.double( -1000.0 ),\n HF2Weights = cms.vdouble( ),\n HOWeights = cms.vdouble( ),\n EEGrid = cms.vdouble( ),\n UseSymEBTreshold = cms.bool( False ),\n EEWeights = cms.vdouble( ),\n EEWeight = cms.double( 1.0 ),\n UseHO = cms.bool( False ),\n HBWeights = cms.vdouble( ),\n HF1Weight = cms.double( 1.0 ),\n HF2Grid = cms.vdouble( ),\n HEDWeights = cms.vdouble( ),\n HEDGrid = cms.vdouble( ),\n EBWeight = cms.double( 1.0 ),\n HF1Grid = cms.vdouble( ),\n EBWeights = cms.vdouble( ),\n HOWeight = cms.double( 1.0E-99 ),\n HESWeight = cms.double( 1.0 ),\n HESThreshold = cms.double( 0.8 ),\n hbheInput = cms.InputTag( \"hltHbhereco\" ),\n HF2Weight = cms.double( 1.0 ),\n HF2Threshold = cms.double( 0.85 ),\n HcalAcceptSeverityLevel = cms.uint32( 9 ),\n EEThreshold = cms.double( 0.3 ),\n HOThresholdPlus1 = cms.double( 3.5 ),\n HOThresholdPlus2 = cms.double( 3.5 ),\n HF1Weights = cms.vdouble( ),\n hoInput = cms.InputTag( \"hltHoreco\" ),\n HF1Threshold = cms.double( 0.5 ),\n HOThresholdMinus1 = cms.double( 3.5 ),\n HESGrid = cms.vdouble( ),\n EcutTower = cms.double( -1000.0 ),\n UseRejectedRecoveredEcalHits = cms.bool( False ),\n UseEtEETreshold = cms.bool( False ),\n HESWeights = cms.vdouble( ),\n EcalRecHitSeveritiesToBeExcluded = cms.vstring( 'kTime',\n 'kWeird',\n 'kBad' ),\n HEDWeight = cms.double( 1.0 ),\n UseSymEETreshold = cms.bool( False ),\n HEDThreshold = cms.double( 0.8 ),\n EBThreshold = cms.double( 0.07 ),\n UseRejectedHitsOnly = cms.bool( False ),\n UseHcalRecoveredHits = cms.bool( False ),\n HOThresholdMinus2 = cms.double( 3.5 ),\n HOThreshold0 = cms.double( 3.5 ),\n ecalInputs = cms.VInputTag( 'hltEcalRecHit:EcalRecHitsEB','hltEcalRecHit:EcalRecHitsEE' ),\n UseRejectedRecoveredHcalHits = cms.bool( False ),\n MomEBDepth = cms.double( 0.3 ),\n HBWeight = cms.double( 1.0 ),\n HOGrid = cms.vdouble( ),\n EBGrid = cms.vdouble( )\n)\nhltAK4CaloJets = cms.EDProducer( \"FastjetJetProducer\",\n Active_Area_Repeats = cms.int32( 5 ),\n doAreaFastjet = cms.bool( False ),\n voronoiRfact = cms.double( 0.9 ),\n maxBadHcalCells = cms.uint32( 9999999 ),\n doAreaDiskApprox = cms.bool( True ),\n maxRecoveredEcalCells = cms.uint32( 9999999 ),\n jetType = cms.string( \"CaloJet\" ),\n minSeed = cms.uint32( 14327 ),\n Ghost_EtaMax = cms.double( 6.0 ),\n doRhoFastjet = cms.bool( False ),\n jetAlgorithm = cms.string( \"AntiKt\" ),\n nSigmaPU = cms.double( 1.0 ),\n GhostArea = cms.double( 0.01 ),\n Rho_EtaMax = cms.double( 4.4 ),\n maxBadEcalCells = cms.uint32( 9999999 ),\n useDeterministicSeed = cms.bool( True ),\n doPVCorrection = cms.bool( False ),\n maxRecoveredHcalCells = cms.uint32( 9999999 ),\n rParam = cms.double( 0.4 ),\n maxProblematicHcalCells = cms.uint32( 9999999 ),\n doOutputJets = cms.bool( True ),\n src = cms.InputTag( \"hltTowerMakerForAll\" ),\n inputEtMin = cms.double( 0.3 ),\n puPtMin = cms.double( 10.0 ),\n srcPVs = cms.InputTag( \"NotUsed\" ),\n jetPtMin = cms.double( 1.0 ),\n radiusPU = cms.double( 0.4 ),\n maxProblematicEcalCells = cms.uint32( 9999999 ),\n doPUOffsetCorr = cms.bool( False ),\n inputEMin = cms.double( 0.0 ),\n useMassDropTagger = cms.bool( False ),\n muMin = cms.double( -1.0 ),\n subtractorName = cms.string( \"\" ),\n muCut = cms.double( -1.0 ),\n subjetPtMin = cms.double( -1.0 ),\n useTrimming = cms.bool( False ),\n muMax = cms.double( -1.0 ),\n yMin = cms.double( -1.0 ),\n useFiltering = cms.bool( False ),\n rFilt = cms.double( -1.0 ),\n yMax = cms.double( -1.0 ),\n zcut = cms.double( -1.0 ),\n MinVtxNdof = cms.int32( 5 ),\n MaxVtxZ = cms.double( 15.0 ),\n UseOnlyVertexTracks = cms.bool( False ),\n dRMin = cms.double( -1.0 ),\n nFilt = cms.int32( -1 ),\n usePruning = cms.bool( False ),\n maxDepth = cms.int32( -1 ),\n yCut = cms.double( -1.0 ),\n DzTrVtxMax = cms.double( 0.0 ),\n UseOnlyOnePV = cms.bool( False ),\n rcut_factor = cms.double( -1.0 ),\n sumRecHits = cms.bool( False ),\n trimPtFracMin = cms.double( -1.0 ),\n dRMax = cms.double( -1.0 ),\n DxyTrVtxMax = cms.double( 0.0 ),\n useCMSBoostedTauSeedingAlgorithm = cms.bool( False )\n)\nhltAK4CaloJetsIDPassed = cms.EDProducer( \"HLTCaloJetIDProducer\",\n min_N90 = cms.int32( -2 ),\n min_N90hits = cms.int32( 2 ),\n min_EMF = cms.double( 1.0E-6 ),\n jetsInput = cms.InputTag( \"hltAK4CaloJets\" ),\n JetIDParams = cms.PSet( \n useRecHits = cms.bool( True ),\n hbheRecHitsColl = cms.InputTag( \"hltHbhereco\" ),\n hoRecHitsColl = cms.InputTag( \"hltHoreco\" ),\n hfRecHitsColl = cms.InputTag( \"hltHfreco\" ),\n ebRecHitsColl = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEB' ),\n eeRecHitsColl = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEE' )\n ),\n max_EMF = cms.double( 999.0 )\n)\nhltFixedGridRhoFastjetAllCalo = cms.EDProducer( \"FixedGridRhoProducerFastjet\",\n gridSpacing = cms.double( 0.55 ),\n maxRapidity = cms.double( 5.0 ),\n pfCandidatesTag = cms.InputTag( \"hltTowerMakerForAll\" )\n)\nhltAK4CaloJetsCorrected = cms.EDProducer( \"CaloJetCorrectionProducer\",\n src = cms.InputTag( \"hltAK4CaloJets\" ),\n correctors = cms.vstring( 'hltESPAK4CaloCorrection' )\n)\nhltAK4CaloJetsCorrectedIDPassed = cms.EDProducer( \"CaloJetCorrectionProducer\",\n src = cms.InputTag( \"hltAK4CaloJetsIDPassed\" ),\n correctors = cms.vstring( 'hltESPAK4CaloCorrection' )\n)\nhltHtMht = cms.EDProducer( \"HLTHtMhtProducer\",\n usePt = cms.bool( False ),\n minPtJetHt = cms.double( 40.0 ),\n maxEtaJetMht = cms.double( 5.0 ),\n minNJetMht = cms.int32( 0 ),\n jetsLabel = cms.InputTag( \"hltAK4CaloJetsCorrected\" ),\n maxEtaJetHt = cms.double( 3.0 ),\n minPtJetMht = cms.double( 30.0 ),\n minNJetHt = cms.int32( 0 ),\n pfCandidatesLabel = cms.InputTag( \"\" ),\n excludePFMuons = cms.bool( False )\n)\nhltHt700 = cms.EDFilter( \"HLTHtMhtFilter\",\n saveTags = cms.bool( False ),\n mhtLabels = cms.VInputTag( 'hltHtMht' ),\n meffSlope = cms.vdouble( 1.0 ),\n minMeff = cms.vdouble( 0.0 ),\n minMht = cms.vdouble( 0.0 ),\n htLabels = cms.VInputTag( 'hltHtMht' ),\n minHt = cms.vdouble( 700.0 )\n)\nhltTowerMakerForPF = cms.EDProducer( \"CaloTowersCreator\",\n EBSumThreshold = cms.double( 0.2 ),\n MomHBDepth = cms.double( 0.2 ),\n UseEtEBTreshold = cms.bool( False ),\n hfInput = cms.InputTag( \"hltHfreco\" ),\n AllowMissingInputs = cms.bool( False ),\n MomEEDepth = cms.double( 0.0 ),\n EESumThreshold = cms.double( 0.45 ),\n HBGrid = cms.vdouble( ),\n HcalAcceptSeverityLevelForRejectedHit = cms.uint32( 9999 ),\n HBThreshold = cms.double( 0.4 ),\n EcalSeveritiesToBeUsedInBadTowers = cms.vstring( ),\n UseEcalRecoveredHits = cms.bool( False ),\n MomConstrMethod = cms.int32( 1 ),\n MomHEDepth = cms.double( 0.4 ),\n HcalThreshold = cms.double( -1000.0 ),\n HF2Weights = cms.vdouble( ),\n HOWeights = cms.vdouble( ),\n EEGrid = cms.vdouble( ),\n UseSymEBTreshold = cms.bool( False ),\n EEWeights = cms.vdouble( ),\n EEWeight = cms.double( 1.0 ),\n UseHO = cms.bool( False ),\n HBWeights = cms.vdouble( ),\n HF1Weight = cms.double( 1.0 ),\n HF2Grid = cms.vdouble( ),\n HEDWeights = cms.vdouble( ),\n HEDGrid = cms.vdouble( ),\n EBWeight = cms.double( 1.0 ),\n HF1Grid = cms.vdouble( ),\n EBWeights = cms.vdouble( ),\n HOWeight = cms.double( 1.0 ),\n HESWeight = cms.double( 1.0 ),\n HESThreshold = cms.double( 0.4 ),\n hbheInput = cms.InputTag( \"hltHbhereco\" ),\n HF2Weight = cms.double( 1.0 ),\n HF2Threshold = cms.double( 1.8 ),\n HcalAcceptSeverityLevel = cms.uint32( 11 ),\n EEThreshold = cms.double( 0.3 ),\n HOThresholdPlus1 = cms.double( 1.1 ),\n HOThresholdPlus2 = cms.double( 1.1 ),\n HF1Weights = cms.vdouble( ),\n hoInput = cms.InputTag( \"hltHoreco\" ),\n HF1Threshold = cms.double( 1.2 ),\n HOThresholdMinus1 = cms.double( 1.1 ),\n HESGrid = cms.vdouble( ),\n EcutTower = cms.double( -1000.0 ),\n UseRejectedRecoveredEcalHits = cms.bool( False ),\n UseEtEETreshold = cms.bool( False ),\n HESWeights = cms.vdouble( ),\n EcalRecHitSeveritiesToBeExcluded = cms.vstring( 'kTime',\n 'kWeird',\n 'kBad' ),\n HEDWeight = cms.double( 1.0 ),\n UseSymEETreshold = cms.bool( False ),\n HEDThreshold = cms.double( 0.4 ),\n EBThreshold = cms.double( 0.07 ),\n UseRejectedHitsOnly = cms.bool( False ),\n UseHcalRecoveredHits = cms.bool( True ),\n HOThresholdMinus2 = cms.double( 1.1 ),\n HOThreshold0 = cms.double( 1.1 ),\n ecalInputs = cms.VInputTag( 'hltEcalRecHit:EcalRecHitsEB','hltEcalRecHit:EcalRecHitsEE' ),\n UseRejectedRecoveredHcalHits = cms.bool( False ),\n MomEBDepth = cms.double( 0.3 ),\n HBWeight = cms.double( 1.0 ),\n HOGrid = cms.vdouble( ),\n EBGrid = cms.vdouble( )\n)\nhltAK4CaloJetsPF = cms.EDProducer( \"FastjetJetProducer\",\n Active_Area_Repeats = cms.int32( 5 ),\n doAreaFastjet = cms.bool( False ),\n voronoiRfact = cms.double( -9.0 ),\n maxBadHcalCells = cms.uint32( 9999999 ),\n doAreaDiskApprox = cms.bool( False ),\n maxRecoveredEcalCells = cms.uint32( 9999999 ),\n jetType = cms.string( \"CaloJet\" ),\n minSeed = cms.uint32( 0 ),\n Ghost_EtaMax = cms.double( 6.0 ),\n doRhoFastjet = cms.bool( False ),\n jetAlgorithm = cms.string( \"AntiKt\" ),\n nSigmaPU = cms.double( 1.0 ),\n GhostArea = cms.double( 0.01 ),\n Rho_EtaMax = cms.double( 4.4 ),\n maxBadEcalCells = cms.uint32( 9999999 ),\n useDeterministicSeed = cms.bool( True ),\n doPVCorrection = cms.bool( False ),\n maxRecoveredHcalCells = cms.uint32( 9999999 ),\n rParam = cms.double( 0.4 ),\n maxProblematicHcalCells = cms.uint32( 9999999 ),\n doOutputJets = cms.bool( True ),\n src = cms.InputTag( \"hltTowerMakerForPF\" ),\n inputEtMin = cms.double( 0.3 ),\n puPtMin = cms.double( 10.0 ),\n srcPVs = cms.InputTag( \"NotUsed\" ),\n jetPtMin = cms.double( 1.0 ),\n radiusPU = cms.double( 0.4 ),\n maxProblematicEcalCells = cms.uint32( 9999999 ),\n doPUOffsetCorr = cms.bool( False ),\n inputEMin = cms.double( 0.0 ),\n useMassDropTagger = cms.bool( False ),\n muMin = cms.double( -1.0 ),\n subtractorName = cms.string( \"\" ),\n muCut = cms.double( -1.0 ),\n subjetPtMin = cms.double( -1.0 ),\n useTrimming = cms.bool( False ),\n muMax = cms.double( -1.0 ),\n yMin = cms.double( -1.0 ),\n useFiltering = cms.bool( False ),\n rFilt = cms.double( -1.0 ),\n yMax = cms.double( -1.0 ),\n zcut = cms.double( -1.0 ),\n MinVtxNdof = cms.int32( 5 ),\n MaxVtxZ = cms.double( 15.0 ),\n UseOnlyVertexTracks = cms.bool( False ),\n dRMin = cms.double( -1.0 ),\n nFilt = cms.int32( -1 ),\n usePruning = cms.bool( False ),\n maxDepth = cms.int32( -1 ),\n yCut = cms.double( -1.0 ),\n DzTrVtxMax = cms.double( 0.0 ),\n UseOnlyOnePV = cms.bool( False ),\n rcut_factor = cms.double( -1.0 ),\n sumRecHits = cms.bool( False ),\n trimPtFracMin = cms.double( -1.0 ),\n dRMax = cms.double( -1.0 ),\n DxyTrVtxMax = cms.double( 0.0 ),\n useCMSBoostedTauSeedingAlgorithm = cms.bool( False )\n)\nhltAK4CaloJetsPFEt5 = cms.EDFilter( \"EtMinCaloJetSelector\",\n filter = cms.bool( False ),\n src = cms.InputTag( \"hltAK4CaloJetsPF\" ),\n etMin = cms.double( 5.0 )\n)\nhltMuonDTDigis = cms.EDProducer( \"DTUnpackingModule\",\n useStandardFEDid = cms.bool( True ),\n maxFEDid = cms.untracked.int32( 779 ),\n inputLabel = cms.InputTag( \"rawDataCollector\" ),\n minFEDid = cms.untracked.int32( 770 ),\n dataType = cms.string( \"DDU\" ),\n readOutParameters = cms.PSet( \n debug = cms.untracked.bool( False ),\n rosParameters = cms.PSet( \n writeSC = cms.untracked.bool( True ),\n readingDDU = cms.untracked.bool( True ),\n performDataIntegrityMonitor = cms.untracked.bool( False ),\n readDDUIDfromDDU = cms.untracked.bool( True ),\n debug = cms.untracked.bool( False ),\n localDAQ = cms.untracked.bool( False )\n ),\n localDAQ = cms.untracked.bool( False ),\n performDataIntegrityMonitor = cms.untracked.bool( False )\n ),\n dqmOnly = cms.bool( False )\n)\nhltDt1DRecHits = cms.EDProducer( \"DTRecHitProducer\",\n debug = cms.untracked.bool( False ),\n recAlgoConfig = cms.PSet( \n tTrigMode = cms.string( \"DTTTrigSyncFromDB\" ),\n minTime = cms.double( -3.0 ),\n stepTwoFromDigi = cms.bool( False ),\n doVdriftCorr = cms.bool( True ),\n debug = cms.untracked.bool( False ),\n maxTime = cms.double( 420.0 ),\n tTrigModeConfig = cms.PSet( \n vPropWire = cms.double( 24.4 ),\n doTOFCorrection = cms.bool( True ),\n tofCorrType = cms.int32( 0 ),\n wirePropCorrType = cms.int32( 0 ),\n tTrigLabel = cms.string( \"\" ),\n doWirePropCorrection = cms.bool( True ),\n doT0Correction = cms.bool( True ),\n debug = cms.untracked.bool( False )\n ),\n useUncertDB = cms.bool( True )\n ),\n dtDigiLabel = cms.InputTag( \"hltMuonDTDigis\" ),\n recAlgo = cms.string( \"DTLinearDriftFromDBAlgo\" )\n)\nhltDt4DSegments = cms.EDProducer( \"DTRecSegment4DProducer\",\n debug = cms.untracked.bool( False ),\n Reco4DAlgoName = cms.string( \"DTCombinatorialPatternReco4D\" ),\n recHits2DLabel = cms.InputTag( \"dt2DSegments\" ),\n recHits1DLabel = cms.InputTag( \"hltDt1DRecHits\" ),\n Reco4DAlgoConfig = cms.PSet( \n segmCleanerMode = cms.int32( 2 ),\n Reco2DAlgoName = cms.string( \"DTCombinatorialPatternReco\" ),\n recAlgoConfig = cms.PSet( \n tTrigMode = cms.string( \"DTTTrigSyncFromDB\" ),\n minTime = cms.double( -3.0 ),\n stepTwoFromDigi = cms.bool( False ),\n doVdriftCorr = cms.bool( True ),\n debug = cms.untracked.bool( False ),\n maxTime = cms.double( 420.0 ),\n tTrigModeConfig = cms.PSet( \n vPropWire = cms.double( 24.4 ),\n doTOFCorrection = cms.bool( True ),\n tofCorrType = cms.int32( 0 ),\n wirePropCorrType = cms.int32( 0 ),\n tTrigLabel = cms.string( \"\" ),\n doWirePropCorrection = cms.bool( True ),\n doT0Correction = cms.bool( True ),\n debug = cms.untracked.bool( False )\n ),\n useUncertDB = cms.bool( True )\n ),\n nSharedHitsMax = cms.int32( 2 ),\n hit_afterT0_resolution = cms.double( 0.03 ),\n Reco2DAlgoConfig = cms.PSet( \n segmCleanerMode = cms.int32( 2 ),\n recAlgoConfig = cms.PSet( \n tTrigMode = cms.string( \"DTTTrigSyncFromDB\" ),\n minTime = cms.double( -3.0 ),\n stepTwoFromDigi = cms.bool( False ),\n doVdriftCorr = cms.bool( True ),\n debug = cms.untracked.bool( False ),\n maxTime = cms.double( 420.0 ),\n tTrigModeConfig = cms.PSet( \n vPropWire = cms.double( 24.4 ),\n doTOFCorrection = cms.bool( True ),\n tofCorrType = cms.int32( 0 ),\n wirePropCorrType = cms.int32( 0 ),\n tTrigLabel = cms.string( \"\" ),\n doWirePropCorrection = cms.bool( True ),\n doT0Correction = cms.bool( True ),\n debug = cms.untracked.bool( False )\n ),\n useUncertDB = cms.bool( True )\n ),\n nSharedHitsMax = cms.int32( 2 ),\n AlphaMaxPhi = cms.double( 1.0 ),\n hit_afterT0_resolution = cms.double( 0.03 ),\n MaxAllowedHits = cms.uint32( 50 ),\n performT0_vdriftSegCorrection = cms.bool( False ),\n AlphaMaxTheta = cms.double( 0.9 ),\n debug = cms.untracked.bool( False ),\n recAlgo = cms.string( \"DTLinearDriftFromDBAlgo\" ),\n nUnSharedHitsMin = cms.int32( 2 ),\n performT0SegCorrection = cms.bool( False ),\n perform_delta_rejecting = cms.bool( False )\n ),\n performT0_vdriftSegCorrection = cms.bool( False ),\n debug = cms.untracked.bool( False ),\n recAlgo = cms.string( \"DTLinearDriftFromDBAlgo\" ),\n nUnSharedHitsMin = cms.int32( 2 ),\n AllDTRecHits = cms.bool( True ),\n performT0SegCorrection = cms.bool( False ),\n perform_delta_rejecting = cms.bool( False )\n )\n)\nhltMuonCSCDigis = cms.EDProducer( \"CSCDCCUnpacker\",\n PrintEventNumber = cms.untracked.bool( False ),\n SuppressZeroLCT = cms.untracked.bool( True ),\n UseExaminer = cms.bool( True ),\n Debug = cms.untracked.bool( False ),\n ErrorMask = cms.uint32( 0x0 ),\n InputObjects = cms.InputTag( \"rawDataCollector\" ),\n ExaminerMask = cms.uint32( 0x1febf3f6 ),\n runDQM = cms.untracked.bool( False ),\n UnpackStatusDigis = cms.bool( False ),\n VisualFEDInspect = cms.untracked.bool( False ),\n FormatedEventDump = cms.untracked.bool( False ),\n UseFormatStatus = cms.bool( True ),\n UseSelectiveUnpacking = cms.bool( True ),\n VisualFEDShort = cms.untracked.bool( False )\n)\nhltCsc2DRecHits = cms.EDProducer( \"CSCRecHitDProducer\",\n XTasymmetry_ME1b = cms.double( 0.0 ),\n XTasymmetry_ME1a = cms.double( 0.0 ),\n ConstSyst_ME1a = cms.double( 0.022 ),\n ConstSyst_ME1b = cms.double( 0.007 ),\n XTasymmetry_ME41 = cms.double( 0.0 ),\n CSCStripxtalksOffset = cms.double( 0.03 ),\n CSCUseCalibrations = cms.bool( True ),\n CSCUseTimingCorrections = cms.bool( True ),\n CSCNoOfTimeBinsForDynamicPedestal = cms.int32( 2 ),\n XTasymmetry_ME22 = cms.double( 0.0 ),\n UseFivePoleFit = cms.bool( True ),\n XTasymmetry_ME21 = cms.double( 0.0 ),\n ConstSyst_ME21 = cms.double( 0.0 ),\n CSCDebug = cms.untracked.bool( False ),\n ConstSyst_ME22 = cms.double( 0.0 ),\n CSCUseGasGainCorrections = cms.bool( False ),\n XTasymmetry_ME31 = cms.double( 0.0 ),\n readBadChambers = cms.bool( True ),\n NoiseLevel_ME13 = cms.double( 8.0 ),\n NoiseLevel_ME12 = cms.double( 9.0 ),\n NoiseLevel_ME32 = cms.double( 9.0 ),\n NoiseLevel_ME31 = cms.double( 9.0 ),\n XTasymmetry_ME32 = cms.double( 0.0 ),\n ConstSyst_ME41 = cms.double( 0.0 ),\n CSCStripClusterSize = cms.untracked.int32( 3 ),\n CSCStripClusterChargeCut = cms.double( 25.0 ),\n CSCStripPeakThreshold = cms.double( 10.0 ),\n readBadChannels = cms.bool( False ),\n UseParabolaFit = cms.bool( False ),\n XTasymmetry_ME13 = cms.double( 0.0 ),\n XTasymmetry_ME12 = cms.double( 0.0 ),\n wireDigiTag = cms.InputTag( 'hltMuonCSCDigis','MuonCSCWireDigi' ),\n ConstSyst_ME12 = cms.double( 0.0 ),\n ConstSyst_ME13 = cms.double( 0.0 ),\n ConstSyst_ME32 = cms.double( 0.0 ),\n ConstSyst_ME31 = cms.double( 0.0 ),\n UseAverageTime = cms.bool( False ),\n NoiseLevel_ME1a = cms.double( 7.0 ),\n NoiseLevel_ME1b = cms.double( 8.0 ),\n CSCWireClusterDeltaT = cms.int32( 1 ),\n CSCUseStaticPedestals = cms.bool( False ),\n stripDigiTag = cms.InputTag( 'hltMuonCSCDigis','MuonCSCStripDigi' ),\n CSCstripWireDeltaTime = cms.int32( 8 ),\n NoiseLevel_ME21 = cms.double( 9.0 ),\n NoiseLevel_ME22 = cms.double( 9.0 ),\n NoiseLevel_ME41 = cms.double( 9.0 )\n)\nhltCscSegments = cms.EDProducer( \"CSCSegmentProducer\",\n inputObjects = cms.InputTag( \"hltCsc2DRecHits\" ),\n algo_psets = cms.VPSet( \n cms.PSet( chamber_types = cms.vstring( 'ME1/a',\n 'ME1/b',\n 'ME1/2',\n 'ME1/3',\n 'ME2/1',\n 'ME2/2',\n 'ME3/1',\n 'ME3/2',\n 'ME4/1',\n 'ME4/2' ),\n algo_name = cms.string( \"CSCSegAlgoST\" ),\n parameters_per_chamber_type = cms.vint32( 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 ),\n algo_psets = cms.VPSet( \n cms.PSet( maxRatioResidualPrune = cms.double( 3.0 ),\n yweightPenalty = cms.double( 1.5 ),\n maxRecHitsInCluster = cms.int32( 20 ),\n dPhiFineMax = cms.double( 0.025 ),\n preClusteringUseChaining = cms.bool( True ),\n ForceCovariance = cms.bool( False ),\n hitDropLimit6Hits = cms.double( 0.3333 ),\n NormChi2Cut2D = cms.double( 20.0 ),\n BPMinImprovement = cms.double( 10000.0 ),\n Covariance = cms.double( 0.0 ),\n tanPhiMax = cms.double( 0.5 ),\n SeedBig = cms.double( 0.0015 ),\n onlyBestSegment = cms.bool( False ),\n dRPhiFineMax = cms.double( 8.0 ),\n SeedSmall = cms.double( 2.0E-4 ),\n curvePenalty = cms.double( 2.0 ),\n dXclusBoxMax = cms.double( 4.0 ),\n BrutePruning = cms.bool( True ),\n curvePenaltyThreshold = cms.double( 0.85 ),\n CorrectTheErrors = cms.bool( True ),\n hitDropLimit4Hits = cms.double( 0.6 ),\n useShowering = cms.bool( False ),\n CSCDebug = cms.untracked.bool( False ),\n tanThetaMax = cms.double( 1.2 ),\n NormChi2Cut3D = cms.double( 10.0 ),\n minHitsPerSegment = cms.int32( 3 ),\n ForceCovarianceAll = cms.bool( False ),\n yweightPenaltyThreshold = cms.double( 1.0 ),\n prePrunLimit = cms.double( 3.17 ),\n hitDropLimit5Hits = cms.double( 0.8 ),\n preClustering = cms.bool( True ),\n prePrun = cms.bool( True ),\n maxDPhi = cms.double( 999.0 ),\n maxDTheta = cms.double( 999.0 ),\n Pruning = cms.bool( True ),\n dYclusBoxMax = cms.double( 8.0 )\n ),\n cms.PSet( maxRatioResidualPrune = cms.double( 3.0 ),\n yweightPenalty = cms.double( 1.5 ),\n maxRecHitsInCluster = cms.int32( 24 ),\n dPhiFineMax = cms.double( 0.025 ),\n preClusteringUseChaining = cms.bool( True ),\n ForceCovariance = cms.bool( False ),\n hitDropLimit6Hits = cms.double( 0.3333 ),\n NormChi2Cut2D = cms.double( 20.0 ),\n BPMinImprovement = cms.double( 10000.0 ),\n Covariance = cms.double( 0.0 ),\n tanPhiMax = cms.double( 0.5 ),\n SeedBig = cms.double( 0.0015 ),\n onlyBestSegment = cms.bool( False ),\n dRPhiFineMax = cms.double( 8.0 ),\n SeedSmall = cms.double( 2.0E-4 ),\n curvePenalty = cms.double( 2.0 ),\n dXclusBoxMax = cms.double( 4.0 ),\n BrutePruning = cms.bool( True ),\n curvePenaltyThreshold = cms.double( 0.85 ),\n CorrectTheErrors = cms.bool( True ),\n hitDropLimit4Hits = cms.double( 0.6 ),\n useShowering = cms.bool( False ),\n CSCDebug = cms.untracked.bool( False ),\n tanThetaMax = cms.double( 1.2 ),\n NormChi2Cut3D = cms.double( 10.0 ),\n minHitsPerSegment = cms.int32( 3 ),\n ForceCovarianceAll = cms.bool( False ),\n yweightPenaltyThreshold = cms.double( 1.0 ),\n prePrunLimit = cms.double( 3.17 ),\n hitDropLimit5Hits = cms.double( 0.8 ),\n preClustering = cms.bool( True ),\n prePrun = cms.bool( True ),\n maxDPhi = cms.double( 999.0 ),\n maxDTheta = cms.double( 999.0 ),\n Pruning = cms.bool( True ),\n dYclusBoxMax = cms.double( 8.0 )\n )\n )\n )\n ),\n algo_type = cms.int32( 1 )\n)\nhltMuonRPCDigis = cms.EDProducer( \"RPCUnpackingModule\",\n InputLabel = cms.InputTag( \"rawDataCollector\" ),\n doSynchro = cms.bool( False )\n)\nhltRpcRecHits = cms.EDProducer( \"RPCRecHitProducer\",\n recAlgoConfig = cms.PSet( ),\n deadvecfile = cms.FileInPath( \"RecoLocalMuon/RPCRecHit/data/RPCDeadVec.dat\" ),\n rpcDigiLabel = cms.InputTag( \"hltMuonRPCDigis\" ),\n maskvecfile = cms.FileInPath( \"RecoLocalMuon/RPCRecHit/data/RPCMaskVec.dat\" ),\n recAlgo = cms.string( \"RPCRecHitStandardAlgo\" ),\n deadSource = cms.string( \"File\" ),\n maskSource = cms.string( \"File\" )\n)\nhltL2OfflineMuonSeeds = cms.EDProducer( \"MuonSeedGenerator\",\n SMB_21 = cms.vdouble( 1.043, -0.124, 0.0, 0.183, 0.0, 0.0 ),\n SMB_20 = cms.vdouble( 1.011, -0.052, 0.0, 0.188, 0.0, 0.0 ),\n SMB_22 = cms.vdouble( 1.474, -0.758, 0.0, 0.185, 0.0, 0.0 ),\n OL_2213 = cms.vdouble( 0.117, 0.0, 0.0, 0.044, 0.0, 0.0 ),\n SME_11 = cms.vdouble( 3.295, -1.527, 0.112, 0.378, 0.02, 0.0 ),\n SME_13 = cms.vdouble( -1.286, 1.711, 0.0, 0.356, 0.0, 0.0 ),\n SME_12 = cms.vdouble( 0.102, 0.599, 0.0, 0.38, 0.0, 0.0 ),\n DT_34_2_scale = cms.vdouble( -11.901897, 0.0 ),\n OL_1213_0_scale = cms.vdouble( -4.488158, 0.0 ),\n OL_1222_0_scale = cms.vdouble( -5.810449, 0.0 ),\n DT_13 = cms.vdouble( 0.315, 0.068, -0.127, 0.051, -0.002, 0.0 ),\n DT_12 = cms.vdouble( 0.183, 0.054, -0.087, 0.028, 0.002, 0.0 ),\n DT_14 = cms.vdouble( 0.359, 0.052, -0.107, 0.072, -0.004, 0.0 ),\n CSC_13_3_scale = cms.vdouble( -1.701268, 0.0 ),\n DT_24_2_scale = cms.vdouble( -6.63094, 0.0 ),\n CSC_23 = cms.vdouble( -0.081, 0.113, -0.029, 0.015, 0.008, 0.0 ),\n CSC_24 = cms.vdouble( 0.004, 0.021, -0.002, 0.053, 0.0, 0.0 ),\n OL_2222 = cms.vdouble( 0.107, 0.0, 0.0, 0.04, 0.0, 0.0 ),\n DT_14_2_scale = cms.vdouble( -4.808546, 0.0 ),\n SMB_10 = cms.vdouble( 1.387, -0.038, 0.0, 0.19, 0.0, 0.0 ),\n SMB_11 = cms.vdouble( 1.247, 0.72, -0.802, 0.229, -0.075, 0.0 ),\n SMB_12 = cms.vdouble( 2.128, -0.956, 0.0, 0.199, 0.0, 0.0 ),\n SME_21 = cms.vdouble( -0.529, 1.194, -0.358, 0.472, 0.086, 0.0 ),\n SME_22 = cms.vdouble( -1.207, 1.491, -0.251, 0.189, 0.243, 0.0 ),\n DT_13_2_scale = cms.vdouble( -4.257687, 0.0 ),\n CSC_34 = cms.vdouble( 0.062, -0.067, 0.019, 0.021, 0.003, 0.0 ),\n SME_22_0_scale = cms.vdouble( -3.457901, 0.0 ),\n DT_24_1_scale = cms.vdouble( -7.490909, 0.0 ),\n OL_1232_0_scale = cms.vdouble( -5.964634, 0.0 ),\n DT_23_1_scale = cms.vdouble( -5.320346, 0.0 ),\n SME_13_0_scale = cms.vdouble( 0.104905, 0.0 ),\n SMB_22_0_scale = cms.vdouble( 1.346681, 0.0 ),\n CSC_12_1_scale = cms.vdouble( -6.434242, 0.0 ),\n DT_34 = cms.vdouble( 0.044, 0.004, -0.013, 0.029, 0.003, 0.0 ),\n SME_32 = cms.vdouble( -0.901, 1.333, -0.47, 0.41, 0.073, 0.0 ),\n SME_31 = cms.vdouble( -1.594, 1.482, -0.317, 0.487, 0.097, 0.0 ),\n CSC_13_2_scale = cms.vdouble( -6.077936, 0.0 ),\n crackEtas = cms.vdouble( 0.2, 1.6, 1.7 ),\n SME_11_0_scale = cms.vdouble( 1.325085, 0.0 ),\n SMB_20_0_scale = cms.vdouble( 1.486168, 0.0 ),\n DT_13_1_scale = cms.vdouble( -4.520923, 0.0 ),\n CSC_24_1_scale = cms.vdouble( -6.055701, 0.0 ),\n CSC_01_1_scale = cms.vdouble( -1.915329, 0.0 ),\n DT_23 = cms.vdouble( 0.13, 0.023, -0.057, 0.028, 0.004, 0.0 ),\n DT_24 = cms.vdouble( 0.176, 0.014, -0.051, 0.051, 0.003, 0.0 ),\n SMB_12_0_scale = cms.vdouble( 2.283221, 0.0 ),\n deltaPhiSearchWindow = cms.double( 0.25 ),\n SMB_30_0_scale = cms.vdouble( -3.629838, 0.0 ),\n SME_42 = cms.vdouble( -0.003, 0.005, 0.005, 0.608, 0.076, 0.0 ),\n SME_41 = cms.vdouble( -0.003, 0.005, 0.005, 0.608, 0.076, 0.0 ),\n deltaEtaSearchWindow = cms.double( 0.2 ),\n CSC_12_2_scale = cms.vdouble( -1.63622, 0.0 ),\n DT_34_1_scale = cms.vdouble( -13.783765, 0.0 ),\n CSC_34_1_scale = cms.vdouble( -11.520507, 0.0 ),\n OL_2213_0_scale = cms.vdouble( -7.239789, 0.0 ),\n SMB_32_0_scale = cms.vdouble( -3.054156, 0.0 ),\n CSC_12_3_scale = cms.vdouble( -1.63622, 0.0 ),\n deltaEtaCrackSearchWindow = cms.double( 0.25 ),\n SME_21_0_scale = cms.vdouble( -0.040862, 0.0 ),\n OL_1232 = cms.vdouble( 0.184, 0.0, 0.0, 0.066, 0.0, 0.0 ),\n DTRecSegmentLabel = cms.InputTag( \"hltDt4DSegments\" ),\n SMB_10_0_scale = cms.vdouble( 2.448566, 0.0 ),\n EnableDTMeasurement = cms.bool( True ),\n CSCRecSegmentLabel = cms.InputTag( \"hltCscSegments\" ),\n CSC_23_2_scale = cms.vdouble( -6.079917, 0.0 ),\n scaleDT = cms.bool( True ),\n DT_12_2_scale = cms.vdouble( -3.518165, 0.0 ),\n OL_1222 = cms.vdouble( 0.848, -0.591, 0.0, 0.062, 0.0, 0.0 ),\n CSC_23_1_scale = cms.vdouble( -19.084285, 0.0 ),\n OL_1213 = cms.vdouble( 0.96, -0.737, 0.0, 0.052, 0.0, 0.0 ),\n CSC_02 = cms.vdouble( 0.612, -0.207, 0.0, 0.067, -0.001, 0.0 ),\n CSC_03 = cms.vdouble( 0.787, -0.338, 0.029, 0.101, -0.008, 0.0 ),\n CSC_01 = cms.vdouble( 0.166, 0.0, 0.0, 0.031, 0.0, 0.0 ),\n SMB_32 = cms.vdouble( 0.67, -0.327, 0.0, 0.22, 0.0, 0.0 ),\n SMB_30 = cms.vdouble( 0.505, -0.022, 0.0, 0.215, 0.0, 0.0 ),\n SMB_31 = cms.vdouble( 0.549, -0.145, 0.0, 0.207, 0.0, 0.0 ),\n crackWindow = cms.double( 0.04 ),\n CSC_14_3_scale = cms.vdouble( -1.969563, 0.0 ),\n SMB_31_0_scale = cms.vdouble( -3.323768, 0.0 ),\n DT_12_1_scale = cms.vdouble( -3.692398, 0.0 ),\n SMB_21_0_scale = cms.vdouble( 1.58384, 0.0 ),\n DT_23_2_scale = cms.vdouble( -5.117625, 0.0 ),\n SME_12_0_scale = cms.vdouble( 2.279181, 0.0 ),\n DT_14_1_scale = cms.vdouble( -5.644816, 0.0 ),\n beamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n SMB_11_0_scale = cms.vdouble( 2.56363, 0.0 ),\n EnableCSCMeasurement = cms.bool( True ),\n CSC_14 = cms.vdouble( 0.606, -0.181, -0.002, 0.111, -0.003, 0.0 ),\n OL_2222_0_scale = cms.vdouble( -7.667231, 0.0 ),\n CSC_13 = cms.vdouble( 0.901, -1.302, 0.533, 0.045, 0.005, 0.0 ),\n CSC_12 = cms.vdouble( -0.161, 0.254, -0.047, 0.042, -0.007, 0.0 )\n)\nhltL2MuonSeeds = cms.EDProducer( \"L2MuonSeedGenerator\",\n ServiceParameters = cms.PSet( \n Propagators = cms.untracked.vstring( 'SteppingHelixPropagatorAny' ),\n RPCLayers = cms.bool( True ),\n UseMuonNavigation = cms.untracked.bool( True )\n ),\n InputObjects = cms.InputTag( \"hltL1extraParticles\" ),\n L1MaxEta = cms.double( 2.5 ),\n OfflineSeedLabel = cms.untracked.InputTag( \"hltL2OfflineMuonSeeds\" ),\n L1MinPt = cms.double( 0.0 ),\n L1MinQuality = cms.uint32( 1 ),\n GMTReadoutCollection = cms.InputTag( \"hltGtDigis\" ),\n UseUnassociatedL1 = cms.bool( False ),\n UseOfflineSeed = cms.untracked.bool( True ),\n Propagator = cms.string( \"SteppingHelixPropagatorAny\" )\n)\nhltL2Muons = cms.EDProducer( \"L2MuonProducer\",\n ServiceParameters = cms.PSet( \n Propagators = cms.untracked.vstring( 'hltESPFastSteppingHelixPropagatorAny',\n 'hltESPFastSteppingHelixPropagatorOpposite' ),\n RPCLayers = cms.bool( True ),\n UseMuonNavigation = cms.untracked.bool( True )\n ),\n InputObjects = cms.InputTag( \"hltL2MuonSeeds\" ),\n SeedTransformerParameters = cms.PSet( \n Fitter = cms.string( \"hltESPKFFittingSmootherForL2Muon\" ),\n MuonRecHitBuilder = cms.string( \"hltESPMuonTransientTrackingRecHitBuilder\" ),\n NMinRecHits = cms.uint32( 2 ),\n UseSubRecHits = cms.bool( False ),\n Propagator = cms.string( \"hltESPFastSteppingHelixPropagatorAny\" ),\n RescaleError = cms.double( 100.0 )\n ),\n L2TrajBuilderParameters = cms.PSet( \n DoRefit = cms.bool( False ),\n SeedPropagator = cms.string( \"hltESPFastSteppingHelixPropagatorAny\" ),\n FilterParameters = cms.PSet( \n NumberOfSigma = cms.double( 3.0 ),\n FitDirection = cms.string( \"insideOut\" ),\n DTRecSegmentLabel = cms.InputTag( \"hltDt4DSegments\" ),\n MaxChi2 = cms.double( 1000.0 ),\n MuonTrajectoryUpdatorParameters = cms.PSet( \n MaxChi2 = cms.double( 25.0 ),\n RescaleErrorFactor = cms.double( 100.0 ),\n Granularity = cms.int32( 0 ),\n ExcludeRPCFromFit = cms.bool( False ),\n UseInvalidHits = cms.bool( True ),\n RescaleError = cms.bool( False )\n ),\n EnableRPCMeasurement = cms.bool( True ),\n CSCRecSegmentLabel = cms.InputTag( \"hltCscSegments\" ),\n EnableDTMeasurement = cms.bool( True ),\n RPCRecSegmentLabel = cms.InputTag( \"hltRpcRecHits\" ),\n Propagator = cms.string( \"hltESPFastSteppingHelixPropagatorAny\" ),\n EnableCSCMeasurement = cms.bool( True )\n ),\n NavigationType = cms.string( \"Standard\" ),\n SeedTransformerParameters = cms.PSet( \n Fitter = cms.string( \"hltESPKFFittingSmootherForL2Muon\" ),\n MuonRecHitBuilder = cms.string( \"hltESPMuonTransientTrackingRecHitBuilder\" ),\n NMinRecHits = cms.uint32( 2 ),\n UseSubRecHits = cms.bool( False ),\n Propagator = cms.string( \"hltESPFastSteppingHelixPropagatorAny\" ),\n RescaleError = cms.double( 100.0 )\n ),\n DoBackwardFilter = cms.bool( True ),\n SeedPosition = cms.string( \"in\" ),\n BWFilterParameters = cms.PSet( \n NumberOfSigma = cms.double( 3.0 ),\n CSCRecSegmentLabel = cms.InputTag( \"hltCscSegments\" ),\n FitDirection = cms.string( \"outsideIn\" ),\n DTRecSegmentLabel = cms.InputTag( \"hltDt4DSegments\" ),\n MaxChi2 = cms.double( 100.0 ),\n MuonTrajectoryUpdatorParameters = cms.PSet( \n MaxChi2 = cms.double( 25.0 ),\n RescaleErrorFactor = cms.double( 100.0 ),\n Granularity = cms.int32( 0 ),\n ExcludeRPCFromFit = cms.bool( False ),\n UseInvalidHits = cms.bool( True ),\n RescaleError = cms.bool( False )\n ),\n EnableRPCMeasurement = cms.bool( True ),\n BWSeedType = cms.string( \"fromGenerator\" ),\n EnableDTMeasurement = cms.bool( True ),\n RPCRecSegmentLabel = cms.InputTag( \"hltRpcRecHits\" ),\n Propagator = cms.string( \"hltESPFastSteppingHelixPropagatorAny\" ),\n EnableCSCMeasurement = cms.bool( True )\n ),\n DoSeedRefit = cms.bool( False )\n ),\n DoSeedRefit = cms.bool( False ),\n TrackLoaderParameters = cms.PSet( \n Smoother = cms.string( \"hltESPKFTrajectorySmootherForMuonTrackLoader\" ),\n DoSmoothing = cms.bool( False ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n MuonUpdatorAtVertexParameters = cms.PSet( \n MaxChi2 = cms.double( 1000000.0 ),\n BeamSpotPosition = cms.vdouble( 0.0, 0.0, 0.0 ),\n Propagator = cms.string( \"hltESPFastSteppingHelixPropagatorOpposite\" ),\n BeamSpotPositionErrors = cms.vdouble( 0.1, 0.1, 5.3 )\n ),\n VertexConstraint = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" )\n ),\n MuonTrajectoryBuilder = cms.string( \"Exhaustive\" )\n)\nhltL2MuonCandidates = cms.EDProducer( \"L2MuonCandidateProducer\",\n InputObjects = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' )\n)\nhltSiPixelDigis = cms.EDProducer( \"SiPixelRawToDigi\",\n UseQualityInfo = cms.bool( False ),\n UsePilotBlade = cms.bool( False ),\n UsePhase1 = cms.bool( False ),\n InputLabel = cms.InputTag( \"rawDataCollector\" ),\n IncludeErrors = cms.bool( False ),\n ErrorList = cms.vint32( ),\n Regions = cms.PSet( ),\n Timing = cms.untracked.bool( False ),\n UserErrorList = cms.vint32( )\n)\nhltSiPixelClusters = cms.EDProducer( \"SiPixelClusterProducer\",\n src = cms.InputTag( \"hltSiPixelDigis\" ),\n ChannelThreshold = cms.int32( 1000 ),\n maxNumberOfClusters = cms.int32( 20000 ),\n VCaltoElectronGain = cms.int32( 65 ),\n MissCalibrate = cms.untracked.bool( True ),\n SplitClusters = cms.bool( False ),\n VCaltoElectronOffset = cms.int32( -414 ),\n payloadType = cms.string( \"HLT\" ),\n SeedThreshold = cms.int32( 1000 ),\n ClusterThreshold = cms.double( 4000.0 )\n)\nhltSiPixelClustersCache = cms.EDProducer( \"SiPixelClusterShapeCacheProducer\",\n src = cms.InputTag( \"hltSiPixelClusters\" ),\n onDemand = cms.bool( False )\n)\nhltSiPixelRecHits = cms.EDProducer( \"SiPixelRecHitConverter\",\n VerboseLevel = cms.untracked.int32( 0 ),\n src = cms.InputTag( \"hltSiPixelClusters\" ),\n CPE = cms.string( \"hltESPPixelCPEGeneric\" )\n)\nhltSiStripExcludedFEDListProducer = cms.EDProducer( \"SiStripExcludedFEDListProducer\",\n ProductLabel = cms.InputTag( \"rawDataCollector\" )\n)\nhltSiStripRawToClustersFacility = cms.EDProducer( \"SiStripClusterizerFromRaw\",\n ProductLabel = cms.InputTag( \"rawDataCollector\" ),\n DoAPVEmulatorCheck = cms.bool( False ),\n Algorithms = cms.PSet( \n SiStripFedZeroSuppressionMode = cms.uint32( 4 ),\n CommonModeNoiseSubtractionMode = cms.string( \"Median\" ),\n PedestalSubtractionFedMode = cms.bool( True ),\n TruncateInSuppressor = cms.bool( True ),\n doAPVRestore = cms.bool( False ),\n useCMMeanMap = cms.bool( False )\n ),\n Clusterizer = cms.PSet( \n ChannelThreshold = cms.double( 2.0 ),\n MaxSequentialBad = cms.uint32( 1 ),\n MaxSequentialHoles = cms.uint32( 0 ),\n Algorithm = cms.string( \"ThreeThresholdAlgorithm\" ),\n MaxAdjacentBad = cms.uint32( 0 ),\n QualityLabel = cms.string( \"\" ),\n SeedThreshold = cms.double( 3.0 ),\n ClusterThreshold = cms.double( 5.0 ),\n setDetId = cms.bool( True ),\n RemoveApvShots = cms.bool( True ),\n clusterChargeCut = cms.PSet( refToPSet_ = cms.string( \"HLTSiStripClusterChargeCutNone\" ) )\n ),\n onDemand = cms.bool( True )\n)\nhltSiStripClusters = cms.EDProducer( \"MeasurementTrackerEventProducer\",\n inactivePixelDetectorLabels = cms.VInputTag( ),\n stripClusterProducer = cms.string( \"hltSiStripRawToClustersFacility\" ),\n pixelClusterProducer = cms.string( \"hltSiPixelClusters\" ),\n switchOffPixelsIfEmpty = cms.bool( True ),\n inactiveStripDetectorLabels = cms.VInputTag( 'hltSiStripExcludedFEDListProducer' ),\n skipClusters = cms.InputTag( \"\" ),\n measurementTracker = cms.string( \"hltESPMeasurementTracker\" )\n)\nhltL3TrajSeedOIState = cms.EDProducer( \"TSGFromL2Muon\",\n TkSeedGenerator = cms.PSet( \n propagatorCompatibleName = cms.string( \"hltESPSteppingHelixPropagatorOpposite\" ),\n option = cms.uint32( 3 ),\n maxChi2 = cms.double( 40.0 ),\n errorMatrixPset = cms.PSet( \n atIP = cms.bool( True ),\n action = cms.string( \"use\" ),\n errorMatrixValuesPSet = cms.PSet( \n pf3_V12 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V13 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V11 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 3.0, 3.0, 3.0, 5.0, 4.0, 5.0, 10.0, 7.0, 10.0, 10.0, 10.0, 10.0 )\n ),\n pf3_V14 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V15 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n yAxis = cms.vdouble( 0.0, 1.0, 1.4, 10.0 ),\n pf3_V33 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 3.0, 3.0, 3.0, 5.0, 4.0, 5.0, 10.0, 7.0, 10.0, 10.0, 10.0, 10.0 )\n ),\n zAxis = cms.vdouble( -3.14159, 3.14159 ),\n pf3_V44 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 3.0, 3.0, 3.0, 5.0, 4.0, 5.0, 10.0, 7.0, 10.0, 10.0, 10.0, 10.0 )\n ),\n xAxis = cms.vdouble( 0.0, 13.0, 30.0, 70.0, 1000.0 ),\n pf3_V22 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 3.0, 3.0, 3.0, 5.0, 4.0, 5.0, 10.0, 7.0, 10.0, 10.0, 10.0, 10.0 )\n ),\n pf3_V23 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V45 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V55 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 3.0, 3.0, 3.0, 5.0, 4.0, 5.0, 10.0, 7.0, 10.0, 10.0, 10.0, 10.0 )\n ),\n pf3_V34 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V35 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V25 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V24 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n )\n )\n ),\n propagatorName = cms.string( \"hltESPSteppingHelixPropagatorAlong\" ),\n manySeeds = cms.bool( False ),\n copyMuonRecHit = cms.bool( False ),\n ComponentName = cms.string( \"TSGForRoadSearch\" ),\n MeasurementTrackerEvent = cms.InputTag( \"hltSiStripClusters\" )\n ),\n ServiceParameters = cms.PSet( \n Propagators = cms.untracked.vstring( 'hltESPSteppingHelixPropagatorOpposite',\n 'hltESPSteppingHelixPropagatorAlong' ),\n RPCLayers = cms.bool( True ),\n UseMuonNavigation = cms.untracked.bool( True )\n ),\n MuonCollectionLabel = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' ),\n MuonTrackingRegionBuilder = cms.PSet( ),\n PCut = cms.double( 2.5 ),\n TrackerSeedCleaner = cms.PSet( ),\n PtCut = cms.double( 1.0 )\n)\nhltL3TrackCandidateFromL2OIState = cms.EDProducer( \"CkfTrajectoryMaker\",\n src = cms.InputTag( \"hltL3TrajSeedOIState\" ),\n reverseTrajectories = cms.bool( True ),\n TransientInitialStateEstimatorParameters = cms.PSet( \n propagatorAlongTISE = cms.string( \"PropagatorWithMaterial\" ),\n numberMeasurementsForFit = cms.int32( 4 ),\n propagatorOppositeTISE = cms.string( \"PropagatorWithMaterialOpposite\" )\n ),\n TrajectoryCleaner = cms.string( \"hltESPTrajectoryCleanerBySharedHits\" ),\n MeasurementTrackerEvent = cms.InputTag( \"hltSiStripClusters\" ),\n cleanTrajectoryAfterInOut = cms.bool( False ),\n useHitsSplitting = cms.bool( False ),\n RedundantSeedCleaner = cms.string( \"CachingSeedCleanerBySharedInput\" ),\n doSeedingRegionRebuilding = cms.bool( False ),\n trackCandidateAlso = cms.bool( True ),\n TrajectoryBuilderPSet = cms.PSet( refToPSet_ = cms.string( \"HLTPSetMuonCkfTrajectoryBuilderSeedHit\" ) ),\n NavigationSchool = cms.string( \"SimpleNavigationSchool\" ),\n TrajectoryBuilder = cms.string( \"hltESPMuonCkfTrajectoryBuilderSeedHit\" ),\n maxNSeeds = cms.uint32( 100000 )\n)\nhltL3TkTracksFromL2OIState = cms.EDProducer( \"TrackProducer\",\n src = cms.InputTag( \"hltL3TrackCandidateFromL2OIState\" ),\n SimpleMagneticField = cms.string( \"\" ),\n clusterRemovalInfo = cms.InputTag( \"\" ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n MeasurementTrackerEvent = cms.InputTag( \"hltSiStripClusters\" ),\n Fitter = cms.string( \"hltESPKFFittingSmoother\" ),\n useHitsSplitting = cms.bool( False ),\n MeasurementTracker = cms.string( \"\" ),\n AlgorithmName = cms.string( \"hltIterX\" ),\n alias = cms.untracked.string( \"\" ),\n NavigationSchool = cms.string( \"\" ),\n TrajectoryInEvent = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n GeometricInnerState = cms.bool( True ),\n useSimpleMF = cms.bool( False ),\n Propagator = cms.string( \"PropagatorWithMaterial\" )\n)\nhltL3MuonsOIState = cms.EDProducer( \"L3MuonProducer\",\n ServiceParameters = cms.PSet( \n Propagators = cms.untracked.vstring( 'hltESPSmartPropagatorAny',\n 'SteppingHelixPropagatorAny',\n 'hltESPSmartPropagator',\n 'hltESPSteppingHelixPropagatorOpposite' ),\n RPCLayers = cms.bool( True ),\n UseMuonNavigation = cms.untracked.bool( True )\n ),\n L3TrajBuilderParameters = cms.PSet( \n ScaleTECyFactor = cms.double( -1.0 ),\n GlbRefitterParameters = cms.PSet( \n TrackerSkipSection = cms.int32( -1 ),\n DoPredictionsOnly = cms.bool( False ),\n PropDirForCosmics = cms.bool( False ),\n HitThreshold = cms.int32( 1 ),\n MuonHitsOption = cms.int32( 1 ),\n Chi2CutRPC = cms.double( 1.0 ),\n Fitter = cms.string( \"hltESPL3MuKFTrajectoryFitter\" ),\n DTRecSegmentLabel = cms.InputTag( \"hltDt4DSegments\" ),\n TrackerRecHitBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n MuonRecHitBuilder = cms.string( \"hltESPMuonTransientTrackingRecHitBuilder\" ),\n RefitDirection = cms.string( \"insideOut\" ),\n CSCRecSegmentLabel = cms.InputTag( \"hltCscSegments\" ),\n Chi2CutCSC = cms.double( 150.0 ),\n Chi2CutDT = cms.double( 10.0 ),\n RefitRPCHits = cms.bool( True ),\n SkipStation = cms.int32( -1 ),\n Propagator = cms.string( \"hltESPSmartPropagatorAny\" ),\n TrackerSkipSystem = cms.int32( -1 ),\n DYTthrs = cms.vint32( 30, 15 )\n ),\n ScaleTECxFactor = cms.double( -1.0 ),\n TrackerRecHitBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n MuonRecHitBuilder = cms.string( \"hltESPMuonTransientTrackingRecHitBuilder\" ),\n MuonTrackingRegionBuilder = cms.PSet( \n EtaR_UpperLimit_Par1 = cms.double( 0.25 ),\n EtaR_UpperLimit_Par2 = cms.double( 0.15 ),\n OnDemand = cms.double( -1.0 ),\n Rescale_Dz = cms.double( 3.0 ),\n vertexCollection = cms.InputTag( \"pixelVertices\" ),\n Rescale_phi = cms.double( 3.0 ),\n Eta_fixed = cms.double( 0.2 ),\n DeltaZ_Region = cms.double( 15.9 ),\n MeasurementTrackerName = cms.string( \"hltESPMeasurementTracker\" ),\n PhiR_UpperLimit_Par2 = cms.double( 0.2 ),\n Eta_min = cms.double( 0.05 ),\n Phi_fixed = cms.double( 0.2 ),\n DeltaR = cms.double( 0.2 ),\n EscapePt = cms.double( 1.5 ),\n UseFixedRegion = cms.bool( False ),\n PhiR_UpperLimit_Par1 = cms.double( 0.6 ),\n Rescale_eta = cms.double( 3.0 ),\n Phi_min = cms.double( 0.05 ),\n UseVertex = cms.bool( False ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" )\n ),\n RefitRPCHits = cms.bool( True ),\n PCut = cms.double( 2.5 ),\n TrackTransformer = cms.PSet( \n DoPredictionsOnly = cms.bool( False ),\n Fitter = cms.string( \"hltESPL3MuKFTrajectoryFitter\" ),\n TrackerRecHitBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n Smoother = cms.string( \"hltESPKFTrajectorySmootherForMuonTrackLoader\" ),\n MuonRecHitBuilder = cms.string( \"hltESPMuonTransientTrackingRecHitBuilder\" ),\n RefitDirection = cms.string( \"insideOut\" ),\n RefitRPCHits = cms.bool( True ),\n Propagator = cms.string( \"hltESPSmartPropagatorAny\" )\n ),\n GlobalMuonTrackMatcher = cms.PSet( \n Pt_threshold1 = cms.double( 0.0 ),\n DeltaDCut_3 = cms.double( 15.0 ),\n MinP = cms.double( 2.5 ),\n MinPt = cms.double( 1.0 ),\n Chi2Cut_1 = cms.double( 50.0 ),\n Pt_threshold2 = cms.double( 9.99999999E8 ),\n LocChi2Cut = cms.double( 0.001 ),\n Eta_threshold = cms.double( 1.2 ),\n Quality_3 = cms.double( 7.0 ),\n Quality_2 = cms.double( 15.0 ),\n Chi2Cut_2 = cms.double( 50.0 ),\n Chi2Cut_3 = cms.double( 200.0 ),\n DeltaDCut_1 = cms.double( 40.0 ),\n DeltaRCut_2 = cms.double( 0.2 ),\n DeltaRCut_3 = cms.double( 1.0 ),\n DeltaDCut_2 = cms.double( 10.0 ),\n DeltaRCut_1 = cms.double( 0.1 ),\n Propagator = cms.string( \"hltESPSmartPropagator\" ),\n Quality_1 = cms.double( 20.0 )\n ),\n PtCut = cms.double( 1.0 ),\n TrackerPropagator = cms.string( \"SteppingHelixPropagatorAny\" ),\n tkTrajLabel = cms.InputTag( \"hltL3TkTracksFromL2OIState\" ),\n tkTrajBeamSpot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n tkTrajMaxChi2 = cms.double( 9999.0 ),\n tkTrajMaxDXYBeamSpot = cms.double( 0.2 ),\n tkTrajVertex = cms.InputTag( \"pixelVertices\" ),\n tkTrajUseVertex = cms.bool( False )\n ),\n TrackLoaderParameters = cms.PSet( \n PutTkTrackIntoEvent = cms.untracked.bool( False ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n SmoothTkTrack = cms.untracked.bool( False ),\n MuonSeededTracksInstance = cms.untracked.string( \"L2Seeded\" ),\n Smoother = cms.string( \"hltESPKFTrajectorySmootherForMuonTrackLoader\" ),\n MuonUpdatorAtVertexParameters = cms.PSet( \n MaxChi2 = cms.double( 1000000.0 ),\n Propagator = cms.string( \"hltESPSteppingHelixPropagatorOpposite\" ),\n BeamSpotPositionErrors = cms.vdouble( 0.1, 0.1, 5.3 )\n ),\n VertexConstraint = cms.bool( False ),\n DoSmoothing = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" )\n ),\n MuonCollectionLabel = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' )\n)\nhltL3TrajSeedOIHit = cms.EDProducer( \"TSGFromL2Muon\",\n TkSeedGenerator = cms.PSet( \n PSetNames = cms.vstring( 'skipTSG',\n 'iterativeTSG' ),\n L3TkCollectionA = cms.InputTag( \"hltL3MuonsOIState\" ),\n iterativeTSG = cms.PSet( \n ErrorRescaling = cms.double( 3.0 ),\n beamSpot = cms.InputTag( \"unused\" ),\n MaxChi2 = cms.double( 40.0 ),\n errorMatrixPset = cms.PSet( \n atIP = cms.bool( True ),\n action = cms.string( \"use\" ),\n errorMatrixValuesPSet = cms.PSet( \n pf3_V12 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V13 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V11 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 3.0, 3.0, 3.0, 5.0, 4.0, 5.0, 10.0, 7.0, 10.0, 10.0, 10.0, 10.0 )\n ),\n pf3_V14 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V15 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n yAxis = cms.vdouble( 0.0, 1.0, 1.4, 10.0 ),\n pf3_V33 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 3.0, 3.0, 3.0, 5.0, 4.0, 5.0, 10.0, 7.0, 10.0, 10.0, 10.0, 10.0 )\n ),\n zAxis = cms.vdouble( -3.14159, 3.14159 ),\n pf3_V44 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 3.0, 3.0, 3.0, 5.0, 4.0, 5.0, 10.0, 7.0, 10.0, 10.0, 10.0, 10.0 )\n ),\n xAxis = cms.vdouble( 0.0, 13.0, 30.0, 70.0, 1000.0 ),\n pf3_V22 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 3.0, 3.0, 3.0, 5.0, 4.0, 5.0, 10.0, 7.0, 10.0, 10.0, 10.0, 10.0 )\n ),\n pf3_V23 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V45 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V55 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 3.0, 3.0, 3.0, 5.0, 4.0, 5.0, 10.0, 7.0, 10.0, 10.0, 10.0, 10.0 )\n ),\n pf3_V34 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V35 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V25 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V24 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n )\n )\n ),\n UpdateState = cms.bool( True ),\n MeasurementTrackerName = cms.string( \"hltESPMeasurementTracker\" ),\n SelectState = cms.bool( False ),\n SigmaZ = cms.double( 25.0 ),\n ResetMethod = cms.string( \"matrix\" ),\n ComponentName = cms.string( \"TSGFromPropagation\" ),\n UseVertexState = cms.bool( True ),\n Propagator = cms.string( \"hltESPSmartPropagatorAnyOpposite\" ),\n MeasurementTrackerEvent = cms.InputTag( \"hltSiStripClusters\" )\n ),\n skipTSG = cms.PSet( ),\n ComponentName = cms.string( \"DualByL2TSG\" )\n ),\n ServiceParameters = cms.PSet( \n Propagators = cms.untracked.vstring( 'PropagatorWithMaterial',\n 'hltESPSmartPropagatorAnyOpposite' ),\n RPCLayers = cms.bool( True ),\n UseMuonNavigation = cms.untracked.bool( True )\n ),\n MuonCollectionLabel = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' ),\n MuonTrackingRegionBuilder = cms.PSet( ),\n PCut = cms.double( 2.5 ),\n TrackerSeedCleaner = cms.PSet( \n cleanerFromSharedHits = cms.bool( True ),\n ptCleaner = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n directionCleaner = cms.bool( True )\n ),\n PtCut = cms.double( 1.0 )\n)\nhltL3TrackCandidateFromL2OIHit = cms.EDProducer( \"CkfTrajectoryMaker\",\n src = cms.InputTag( \"hltL3TrajSeedOIHit\" ),\n reverseTrajectories = cms.bool( True ),\n TransientInitialStateEstimatorParameters = cms.PSet( \n propagatorAlongTISE = cms.string( \"PropagatorWithMaterial\" ),\n numberMeasurementsForFit = cms.int32( 4 ),\n propagatorOppositeTISE = cms.string( \"PropagatorWithMaterialOpposite\" )\n ),\n TrajectoryCleaner = cms.string( \"hltESPTrajectoryCleanerBySharedHits\" ),\n MeasurementTrackerEvent = cms.InputTag( \"hltSiStripClusters\" ),\n cleanTrajectoryAfterInOut = cms.bool( False ),\n useHitsSplitting = cms.bool( False ),\n RedundantSeedCleaner = cms.string( \"CachingSeedCleanerBySharedInput\" ),\n doSeedingRegionRebuilding = cms.bool( False ),\n trackCandidateAlso = cms.bool( True ),\n TrajectoryBuilderPSet = cms.PSet( refToPSet_ = cms.string( \"HLTPSetMuonCkfTrajectoryBuilder\" ) ),\n NavigationSchool = cms.string( \"SimpleNavigationSchool\" ),\n TrajectoryBuilder = cms.string( \"hltESPMuonCkfTrajectoryBuilder\" ),\n maxNSeeds = cms.uint32( 100000 )\n)\nhltL3TkTracksFromL2OIHit = cms.EDProducer( \"TrackProducer\",\n src = cms.InputTag( \"hltL3TrackCandidateFromL2OIHit\" ),\n SimpleMagneticField = cms.string( \"\" ),\n clusterRemovalInfo = cms.InputTag( \"\" ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n MeasurementTrackerEvent = cms.InputTag( \"hltSiStripClusters\" ),\n Fitter = cms.string( \"hltESPKFFittingSmoother\" ),\n useHitsSplitting = cms.bool( False ),\n MeasurementTracker = cms.string( \"\" ),\n AlgorithmName = cms.string( \"hltIterX\" ),\n alias = cms.untracked.string( \"\" ),\n NavigationSchool = cms.string( \"\" ),\n TrajectoryInEvent = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n GeometricInnerState = cms.bool( True ),\n useSimpleMF = cms.bool( False ),\n Propagator = cms.string( \"PropagatorWithMaterial\" )\n)\nhltL3MuonsOIHit = cms.EDProducer( \"L3MuonProducer\",\n ServiceParameters = cms.PSet( \n Propagators = cms.untracked.vstring( 'hltESPSmartPropagatorAny',\n 'SteppingHelixPropagatorAny',\n 'hltESPSmartPropagator',\n 'hltESPSteppingHelixPropagatorOpposite' ),\n RPCLayers = cms.bool( True ),\n UseMuonNavigation = cms.untracked.bool( True )\n ),\n L3TrajBuilderParameters = cms.PSet( \n ScaleTECyFactor = cms.double( -1.0 ),\n GlbRefitterParameters = cms.PSet( \n TrackerSkipSection = cms.int32( -1 ),\n DoPredictionsOnly = cms.bool( False ),\n PropDirForCosmics = cms.bool( False ),\n HitThreshold = cms.int32( 1 ),\n MuonHitsOption = cms.int32( 1 ),\n Chi2CutRPC = cms.double( 1.0 ),\n Fitter = cms.string( \"hltESPL3MuKFTrajectoryFitter\" ),\n DTRecSegmentLabel = cms.InputTag( \"hltDt4DSegments\" ),\n TrackerRecHitBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n MuonRecHitBuilder = cms.string( \"hltESPMuonTransientTrackingRecHitBuilder\" ),\n RefitDirection = cms.string( \"insideOut\" ),\n CSCRecSegmentLabel = cms.InputTag( \"hltCscSegments\" ),\n Chi2CutCSC = cms.double( 150.0 ),\n Chi2CutDT = cms.double( 10.0 ),\n RefitRPCHits = cms.bool( True ),\n SkipStation = cms.int32( -1 ),\n Propagator = cms.string( \"hltESPSmartPropagatorAny\" ),\n TrackerSkipSystem = cms.int32( -1 ),\n DYTthrs = cms.vint32( 30, 15 )\n ),\n ScaleTECxFactor = cms.double( -1.0 ),\n TrackerRecHitBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n MuonRecHitBuilder = cms.string( \"hltESPMuonTransientTrackingRecHitBuilder\" ),\n MuonTrackingRegionBuilder = cms.PSet( \n EtaR_UpperLimit_Par1 = cms.double( 0.25 ),\n EtaR_UpperLimit_Par2 = cms.double( 0.15 ),\n OnDemand = cms.double( -1.0 ),\n Rescale_Dz = cms.double( 3.0 ),\n vertexCollection = cms.InputTag( \"pixelVertices\" ),\n Rescale_phi = cms.double( 3.0 ),\n Eta_fixed = cms.double( 0.2 ),\n DeltaZ_Region = cms.double( 15.9 ),\n MeasurementTrackerName = cms.string( \"hltESPMeasurementTracker\" ),\n PhiR_UpperLimit_Par2 = cms.double( 0.2 ),\n Eta_min = cms.double( 0.05 ),\n Phi_fixed = cms.double( 0.2 ),\n DeltaR = cms.double( 0.2 ),\n EscapePt = cms.double( 1.5 ),\n UseFixedRegion = cms.bool( False ),\n PhiR_UpperLimit_Par1 = cms.double( 0.6 ),\n Rescale_eta = cms.double( 3.0 ),\n Phi_min = cms.double( 0.05 ),\n UseVertex = cms.bool( False ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" )\n ),\n RefitRPCHits = cms.bool( True ),\n PCut = cms.double( 2.5 ),\n TrackTransformer = cms.PSet( \n DoPredictionsOnly = cms.bool( False ),\n Fitter = cms.string( \"hltESPL3MuKFTrajectoryFitter\" ),\n TrackerRecHitBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n Smoother = cms.string( \"hltESPKFTrajectorySmootherForMuonTrackLoader\" ),\n MuonRecHitBuilder = cms.string( \"hltESPMuonTransientTrackingRecHitBuilder\" ),\n RefitDirection = cms.string( \"insideOut\" ),\n RefitRPCHits = cms.bool( True ),\n Propagator = cms.string( \"hltESPSmartPropagatorAny\" )\n ),\n GlobalMuonTrackMatcher = cms.PSet( \n Pt_threshold1 = cms.double( 0.0 ),\n DeltaDCut_3 = cms.double( 15.0 ),\n MinP = cms.double( 2.5 ),\n MinPt = cms.double( 1.0 ),\n Chi2Cut_1 = cms.double( 50.0 ),\n Pt_threshold2 = cms.double( 9.99999999E8 ),\n LocChi2Cut = cms.double( 0.001 ),\n Eta_threshold = cms.double( 1.2 ),\n Quality_3 = cms.double( 7.0 ),\n Quality_2 = cms.double( 15.0 ),\n Chi2Cut_2 = cms.double( 50.0 ),\n Chi2Cut_3 = cms.double( 200.0 ),\n DeltaDCut_1 = cms.double( 40.0 ),\n DeltaRCut_2 = cms.double( 0.2 ),\n DeltaRCut_3 = cms.double( 1.0 ),\n DeltaDCut_2 = cms.double( 10.0 ),\n DeltaRCut_1 = cms.double( 0.1 ),\n Propagator = cms.string( \"hltESPSmartPropagator\" ),\n Quality_1 = cms.double( 20.0 )\n ),\n PtCut = cms.double( 1.0 ),\n TrackerPropagator = cms.string( \"SteppingHelixPropagatorAny\" ),\n tkTrajLabel = cms.InputTag( \"hltL3TkTracksFromL2OIHit\" ),\n tkTrajBeamSpot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n tkTrajMaxChi2 = cms.double( 9999.0 ),\n tkTrajMaxDXYBeamSpot = cms.double( 0.2 ),\n tkTrajVertex = cms.InputTag( \"pixelVertices\" ),\n tkTrajUseVertex = cms.bool( False )\n ),\n TrackLoaderParameters = cms.PSet( \n PutTkTrackIntoEvent = cms.untracked.bool( False ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n SmoothTkTrack = cms.untracked.bool( False ),\n MuonSeededTracksInstance = cms.untracked.string( \"L2Seeded\" ),\n Smoother = cms.string( \"hltESPKFTrajectorySmootherForMuonTrackLoader\" ),\n MuonUpdatorAtVertexParameters = cms.PSet( \n MaxChi2 = cms.double( 1000000.0 ),\n Propagator = cms.string( \"hltESPSteppingHelixPropagatorOpposite\" ),\n BeamSpotPositionErrors = cms.vdouble( 0.1, 0.1, 5.3 )\n ),\n VertexConstraint = cms.bool( False ),\n DoSmoothing = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" )\n ),\n MuonCollectionLabel = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' )\n)\nhltL3TkFromL2OICombination = cms.EDProducer( \"L3TrackCombiner\",\n labels = cms.VInputTag( 'hltL3MuonsOIState','hltL3MuonsOIHit' )\n)\nhltPixelLayerTriplets = cms.EDProducer( \"SeedingLayersEDProducer\",\n layerList = cms.vstring( 'BPix1+BPix2+BPix3',\n 'BPix1+BPix2+FPix1_pos',\n 'BPix1+BPix2+FPix1_neg',\n 'BPix1+FPix1_pos+FPix2_pos',\n 'BPix1+FPix1_neg+FPix2_neg' ),\n MTOB = cms.PSet( ),\n TEC = cms.PSet( ),\n MTID = cms.PSet( ),\n FPix = cms.PSet( \n useErrorsFromParam = cms.bool( True ),\n hitErrorRPhi = cms.double( 0.0051 ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n hitErrorRZ = cms.double( 0.0036 )\n ),\n MTEC = cms.PSet( ),\n MTIB = cms.PSet( ),\n TID = cms.PSet( ),\n TOB = cms.PSet( ),\n BPix = cms.PSet( \n useErrorsFromParam = cms.bool( True ),\n hitErrorRPhi = cms.double( 0.0027 ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n hitErrorRZ = cms.double( 0.006 )\n ),\n TIB = cms.PSet( )\n)\nhltPixelLayerPairs = cms.EDProducer( \"SeedingLayersEDProducer\",\n layerList = cms.vstring( 'BPix1+BPix2',\n 'BPix1+BPix3',\n 'BPix2+BPix3',\n 'BPix1+FPix1_pos',\n 'BPix1+FPix1_neg',\n 'BPix1+FPix2_pos',\n 'BPix1+FPix2_neg',\n 'BPix2+FPix1_pos',\n 'BPix2+FPix1_neg',\n 'BPix2+FPix2_pos',\n 'BPix2+FPix2_neg',\n 'FPix1_pos+FPix2_pos',\n 'FPix1_neg+FPix2_neg' ),\n MTOB = cms.PSet( ),\n TEC = cms.PSet( ),\n MTID = cms.PSet( ),\n FPix = cms.PSet( \n useErrorsFromParam = cms.bool( True ),\n hitErrorRPhi = cms.double( 0.0051 ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n hitErrorRZ = cms.double( 0.0036 )\n ),\n MTEC = cms.PSet( ),\n MTIB = cms.PSet( ),\n TID = cms.PSet( ),\n TOB = cms.PSet( ),\n BPix = cms.PSet( \n useErrorsFromParam = cms.bool( True ),\n hitErrorRPhi = cms.double( 0.0027 ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n hitErrorRZ = cms.double( 0.006 )\n ),\n TIB = cms.PSet( )\n)\nhltMixedLayerPairs = cms.EDProducer( \"SeedingLayersEDProducer\",\n layerList = cms.vstring( 'BPix1+BPix2',\n 'BPix1+BPix3',\n 'BPix2+BPix3',\n 'BPix1+FPix1_pos',\n 'BPix1+FPix1_neg',\n 'BPix1+FPix2_pos',\n 'BPix1+FPix2_neg',\n 'BPix2+FPix1_pos',\n 'BPix2+FPix1_neg',\n 'BPix2+FPix2_pos',\n 'BPix2+FPix2_neg',\n 'FPix1_pos+FPix2_pos',\n 'FPix1_neg+FPix2_neg',\n 'FPix2_pos+TEC1_pos',\n 'FPix2_pos+TEC2_pos',\n 'TEC1_pos+TEC2_pos',\n 'TEC2_pos+TEC3_pos',\n 'FPix2_neg+TEC1_neg',\n 'FPix2_neg+TEC2_neg',\n 'TEC1_neg+TEC2_neg',\n 'TEC2_neg+TEC3_neg' ),\n MTOB = cms.PSet( ),\n TEC = cms.PSet( \n useRingSlector = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n minRing = cms.int32( 1 ),\n maxRing = cms.int32( 1 ),\n clusterChargeCut = cms.PSet( refToPSet_ = cms.string( \"HLTSiStripClusterChargeCutNone\" ) )\n ),\n MTID = cms.PSet( ),\n FPix = cms.PSet( \n useErrorsFromParam = cms.bool( True ),\n hitErrorRPhi = cms.double( 0.0051 ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n hitErrorRZ = cms.double( 0.0036 )\n ),\n MTEC = cms.PSet( ),\n MTIB = cms.PSet( ),\n TID = cms.PSet( ),\n TOB = cms.PSet( ),\n BPix = cms.PSet( \n useErrorsFromParam = cms.bool( True ),\n hitErrorRPhi = cms.double( 0.0027 ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n hitErrorRZ = cms.double( 0.006 )\n ),\n TIB = cms.PSet( )\n)\nhltL3TrajSeedIOHit = cms.EDProducer( \"TSGFromL2Muon\",\n TkSeedGenerator = cms.PSet( \n PSetNames = cms.vstring( 'skipTSG',\n 'iterativeTSG' ),\n L3TkCollectionA = cms.InputTag( \"hltL3TkFromL2OICombination\" ),\n iterativeTSG = cms.PSet( \n firstTSG = cms.PSet( \n ComponentName = cms.string( \"TSGFromOrderedHits\" ),\n OrderedHitsFactoryPSet = cms.PSet( \n ComponentName = cms.string( \"StandardHitTripletGenerator\" ),\n GeneratorPSet = cms.PSet( \n useBending = cms.bool( True ),\n useFixedPreFiltering = cms.bool( False ),\n maxElement = cms.uint32( 0 ),\n phiPreFiltering = cms.double( 0.3 ),\n extraHitRPhitolerance = cms.double( 0.06 ),\n useMultScattering = cms.bool( True ),\n ComponentName = cms.string( \"PixelTripletHLTGenerator\" ),\n extraHitRZtolerance = cms.double( 0.06 ),\n SeedComparitorPSet = cms.PSet( ComponentName = cms.string( \"none\" ) )\n ),\n SeedingLayers = cms.InputTag( \"hltPixelLayerTriplets\" )\n ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" )\n ),\n PSetNames = cms.vstring( 'firstTSG',\n 'secondTSG' ),\n ComponentName = cms.string( \"CombinedTSG\" ),\n thirdTSG = cms.PSet( \n PSetNames = cms.vstring( 'endcapTSG',\n 'barrelTSG' ),\n barrelTSG = cms.PSet( ),\n endcapTSG = cms.PSet( \n ComponentName = cms.string( \"TSGFromOrderedHits\" ),\n OrderedHitsFactoryPSet = cms.PSet( \n maxElement = cms.uint32( 0 ),\n ComponentName = cms.string( \"StandardHitPairGenerator\" ),\n useOnDemandTracker = cms.untracked.int32( 0 ),\n SeedingLayers = cms.InputTag( \"hltMixedLayerPairs\" )\n ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" )\n ),\n etaSeparation = cms.double( 2.0 ),\n ComponentName = cms.string( \"DualByEtaTSG\" )\n ),\n secondTSG = cms.PSet( \n ComponentName = cms.string( \"TSGFromOrderedHits\" ),\n OrderedHitsFactoryPSet = cms.PSet( \n maxElement = cms.uint32( 0 ),\n ComponentName = cms.string( \"StandardHitPairGenerator\" ),\n useOnDemandTracker = cms.untracked.int32( 0 ),\n SeedingLayers = cms.InputTag( \"hltPixelLayerPairs\" )\n ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" )\n )\n ),\n skipTSG = cms.PSet( ),\n ComponentName = cms.string( \"DualByL2TSG\" )\n ),\n ServiceParameters = cms.PSet( \n Propagators = cms.untracked.vstring( 'PropagatorWithMaterial' ),\n RPCLayers = cms.bool( True ),\n UseMuonNavigation = cms.untracked.bool( True )\n ),\n MuonCollectionLabel = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' ),\n MuonTrackingRegionBuilder = cms.PSet( \n EtaR_UpperLimit_Par1 = cms.double( 0.25 ),\n EtaR_UpperLimit_Par2 = cms.double( 0.15 ),\n OnDemand = cms.double( -1.0 ),\n Rescale_Dz = cms.double( 3.0 ),\n vertexCollection = cms.InputTag( \"pixelVertices\" ),\n Rescale_phi = cms.double( 3.0 ),\n Eta_fixed = cms.double( 0.2 ),\n DeltaZ_Region = cms.double( 15.9 ),\n MeasurementTrackerName = cms.string( \"hltESPMeasurementTracker\" ),\n PhiR_UpperLimit_Par2 = cms.double( 0.2 ),\n Eta_min = cms.double( 0.1 ),\n Phi_fixed = cms.double( 0.2 ),\n DeltaR = cms.double( 0.2 ),\n EscapePt = cms.double( 1.5 ),\n UseFixedRegion = cms.bool( False ),\n PhiR_UpperLimit_Par1 = cms.double( 0.6 ),\n Rescale_eta = cms.double( 3.0 ),\n Phi_min = cms.double( 0.1 ),\n UseVertex = cms.bool( False ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" )\n ),\n PCut = cms.double( 2.5 ),\n TrackerSeedCleaner = cms.PSet( \n cleanerFromSharedHits = cms.bool( True ),\n ptCleaner = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n directionCleaner = cms.bool( True )\n ),\n PtCut = cms.double( 1.0 )\n)\nhltL3TrackCandidateFromL2IOHit = cms.EDProducer( \"CkfTrajectoryMaker\",\n src = cms.InputTag( \"hltL3TrajSeedIOHit\" ),\n reverseTrajectories = cms.bool( False ),\n TransientInitialStateEstimatorParameters = cms.PSet( \n propagatorAlongTISE = cms.string( \"PropagatorWithMaterial\" ),\n numberMeasurementsForFit = cms.int32( 4 ),\n propagatorOppositeTISE = cms.string( \"PropagatorWithMaterialOpposite\" )\n ),\n TrajectoryCleaner = cms.string( \"hltESPTrajectoryCleanerBySharedHits\" ),\n MeasurementTrackerEvent = cms.InputTag( \"hltSiStripClusters\" ),\n cleanTrajectoryAfterInOut = cms.bool( False ),\n useHitsSplitting = cms.bool( False ),\n RedundantSeedCleaner = cms.string( \"CachingSeedCleanerBySharedInput\" ),\n doSeedingRegionRebuilding = cms.bool( False ),\n trackCandidateAlso = cms.bool( True ),\n TrajectoryBuilderPSet = cms.PSet( refToPSet_ = cms.string( \"HLTPSetMuonCkfTrajectoryBuilder\" ) ),\n NavigationSchool = cms.string( \"SimpleNavigationSchool\" ),\n TrajectoryBuilder = cms.string( \"hltESPMuonCkfTrajectoryBuilder\" ),\n maxNSeeds = cms.uint32( 100000 )\n)\nhltL3TkTracksFromL2IOHit = cms.EDProducer( \"TrackProducer\",\n src = cms.InputTag( \"hltL3TrackCandidateFromL2IOHit\" ),\n SimpleMagneticField = cms.string( \"\" ),\n clusterRemovalInfo = cms.InputTag( \"\" ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n MeasurementTrackerEvent = cms.InputTag( \"hltSiStripClusters\" ),\n Fitter = cms.string( \"hltESPKFFittingSmoother\" ),\n useHitsSplitting = cms.bool( False ),\n MeasurementTracker = cms.string( \"\" ),\n AlgorithmName = cms.string( \"hltIterX\" ),\n alias = cms.untracked.string( \"\" ),\n NavigationSchool = cms.string( \"\" ),\n TrajectoryInEvent = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n GeometricInnerState = cms.bool( True ),\n useSimpleMF = cms.bool( False ),\n Propagator = cms.string( \"PropagatorWithMaterial\" )\n)\nhltL3MuonsIOHit = cms.EDProducer( \"L3MuonProducer\",\n ServiceParameters = cms.PSet( \n Propagators = cms.untracked.vstring( 'hltESPSmartPropagatorAny',\n 'SteppingHelixPropagatorAny',\n 'hltESPSmartPropagator',\n 'hltESPSteppingHelixPropagatorOpposite' ),\n RPCLayers = cms.bool( True ),\n UseMuonNavigation = cms.untracked.bool( True )\n ),\n L3TrajBuilderParameters = cms.PSet( \n ScaleTECyFactor = cms.double( -1.0 ),\n GlbRefitterParameters = cms.PSet( \n TrackerSkipSection = cms.int32( -1 ),\n DoPredictionsOnly = cms.bool( False ),\n PropDirForCosmics = cms.bool( False ),\n HitThreshold = cms.int32( 1 ),\n MuonHitsOption = cms.int32( 1 ),\n Chi2CutRPC = cms.double( 1.0 ),\n Fitter = cms.string( \"hltESPL3MuKFTrajectoryFitter\" ),\n DTRecSegmentLabel = cms.InputTag( \"hltDt4DSegments\" ),\n TrackerRecHitBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n MuonRecHitBuilder = cms.string( \"hltESPMuonTransientTrackingRecHitBuilder\" ),\n RefitDirection = cms.string( \"insideOut\" ),\n CSCRecSegmentLabel = cms.InputTag( \"hltCscSegments\" ),\n Chi2CutCSC = cms.double( 150.0 ),\n Chi2CutDT = cms.double( 10.0 ),\n RefitRPCHits = cms.bool( True ),\n SkipStation = cms.int32( -1 ),\n Propagator = cms.string( \"hltESPSmartPropagatorAny\" ),\n TrackerSkipSystem = cms.int32( -1 ),\n DYTthrs = cms.vint32( 30, 15 )\n ),\n ScaleTECxFactor = cms.double( -1.0 ),\n TrackerRecHitBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n MuonRecHitBuilder = cms.string( \"hltESPMuonTransientTrackingRecHitBuilder\" ),\n MuonTrackingRegionBuilder = cms.PSet( \n EtaR_UpperLimit_Par1 = cms.double( 0.25 ),\n EtaR_UpperLimit_Par2 = cms.double( 0.15 ),\n OnDemand = cms.double( -1.0 ),\n Rescale_Dz = cms.double( 3.0 ),\n vertexCollection = cms.InputTag( \"pixelVertices\" ),\n Rescale_phi = cms.double( 3.0 ),\n Eta_fixed = cms.double( 0.2 ),\n DeltaZ_Region = cms.double( 15.9 ),\n MeasurementTrackerName = cms.string( \"hltESPMeasurementTracker\" ),\n PhiR_UpperLimit_Par2 = cms.double( 0.2 ),\n Eta_min = cms.double( 0.05 ),\n Phi_fixed = cms.double( 0.2 ),\n DeltaR = cms.double( 0.2 ),\n EscapePt = cms.double( 1.5 ),\n UseFixedRegion = cms.bool( False ),\n PhiR_UpperLimit_Par1 = cms.double( 0.6 ),\n Rescale_eta = cms.double( 3.0 ),\n Phi_min = cms.double( 0.05 ),\n UseVertex = cms.bool( False ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" )\n ),\n RefitRPCHits = cms.bool( True ),\n PCut = cms.double( 2.5 ),\n TrackTransformer = cms.PSet( \n DoPredictionsOnly = cms.bool( False ),\n Fitter = cms.string( \"hltESPL3MuKFTrajectoryFitter\" ),\n TrackerRecHitBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n Smoother = cms.string( \"hltESPKFTrajectorySmootherForMuonTrackLoader\" ),\n MuonRecHitBuilder = cms.string( \"hltESPMuonTransientTrackingRecHitBuilder\" ),\n RefitDirection = cms.string( \"insideOut\" ),\n RefitRPCHits = cms.bool( True ),\n Propagator = cms.string( \"hltESPSmartPropagatorAny\" )\n ),\n GlobalMuonTrackMatcher = cms.PSet( \n Pt_threshold1 = cms.double( 0.0 ),\n DeltaDCut_3 = cms.double( 15.0 ),\n MinP = cms.double( 2.5 ),\n MinPt = cms.double( 1.0 ),\n Chi2Cut_1 = cms.double( 50.0 ),\n Pt_threshold2 = cms.double( 9.99999999E8 ),\n LocChi2Cut = cms.double( 0.001 ),\n Eta_threshold = cms.double( 1.2 ),\n Quality_3 = cms.double( 7.0 ),\n Quality_2 = cms.double( 15.0 ),\n Chi2Cut_2 = cms.double( 50.0 ),\n Chi2Cut_3 = cms.double( 200.0 ),\n DeltaDCut_1 = cms.double( 40.0 ),\n DeltaRCut_2 = cms.double( 0.2 ),\n DeltaRCut_3 = cms.double( 1.0 ),\n DeltaDCut_2 = cms.double( 10.0 ),\n DeltaRCut_1 = cms.double( 0.1 ),\n Propagator = cms.string( \"hltESPSmartPropagator\" ),\n Quality_1 = cms.double( 20.0 )\n ),\n PtCut = cms.double( 1.0 ),\n TrackerPropagator = cms.string( \"SteppingHelixPropagatorAny\" ),\n tkTrajLabel = cms.InputTag( \"hltL3TkTracksFromL2IOHit\" ),\n tkTrajBeamSpot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n tkTrajMaxChi2 = cms.double( 9999.0 ),\n tkTrajMaxDXYBeamSpot = cms.double( 0.2 ),\n tkTrajVertex = cms.InputTag( \"pixelVertices\" ),\n tkTrajUseVertex = cms.bool( False )\n ),\n TrackLoaderParameters = cms.PSet( \n PutTkTrackIntoEvent = cms.untracked.bool( False ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n SmoothTkTrack = cms.untracked.bool( False ),\n MuonSeededTracksInstance = cms.untracked.string( \"L2Seeded\" ),\n Smoother = cms.string( \"hltESPKFTrajectorySmootherForMuonTrackLoader\" ),\n MuonUpdatorAtVertexParameters = cms.PSet( \n MaxChi2 = cms.double( 1000000.0 ),\n Propagator = cms.string( \"hltESPSteppingHelixPropagatorOpposite\" ),\n BeamSpotPositionErrors = cms.vdouble( 0.1, 0.1, 5.3 )\n ),\n VertexConstraint = cms.bool( False ),\n DoSmoothing = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" )\n ),\n MuonCollectionLabel = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' )\n)\nhltL3TrajectorySeed = cms.EDProducer( \"L3MuonTrajectorySeedCombiner\",\n labels = cms.VInputTag( 'hltL3TrajSeedIOHit','hltL3TrajSeedOIState','hltL3TrajSeedOIHit' )\n)\nhltL3TrackCandidateFromL2 = cms.EDProducer( \"L3TrackCandCombiner\",\n labels = cms.VInputTag( 'hltL3TrackCandidateFromL2IOHit','hltL3TrackCandidateFromL2OIHit','hltL3TrackCandidateFromL2OIState' )\n)\nhltL3TkTracksMergeStep1 = cms.EDProducer( \"TrackListMerger\",\n ShareFrac = cms.double( 0.19 ),\n writeOnlyTrkQuals = cms.bool( False ),\n MinPT = cms.double( 0.05 ),\n allowFirstHitShare = cms.bool( True ),\n copyExtras = cms.untracked.bool( True ),\n Epsilon = cms.double( -0.001 ),\n selectedTrackQuals = cms.VInputTag( 'hltL3TkTracksFromL2OIState','hltL3TkTracksFromL2OIHit' ),\n indivShareFrac = cms.vdouble( 1.0, 1.0 ),\n MaxNormalizedChisq = cms.double( 1000.0 ),\n copyMVA = cms.bool( False ),\n FoundHitBonus = cms.double( 100.0 ),\n setsToMerge = cms.VPSet( \n cms.PSet( pQual = cms.bool( False ),\n tLists = cms.vint32( 0, 1 )\n )\n ),\n MinFound = cms.int32( 3 ),\n hasSelector = cms.vint32( 0, 0 ),\n TrackProducers = cms.VInputTag( 'hltL3TkTracksFromL2OIState','hltL3TkTracksFromL2OIHit' ),\n LostHitPenalty = cms.double( 0.0 ),\n newQuality = cms.string( \"confirmed\" )\n)\nhltL3TkTracksFromL2 = cms.EDProducer( \"TrackListMerger\",\n ShareFrac = cms.double( 0.19 ),\n writeOnlyTrkQuals = cms.bool( False ),\n MinPT = cms.double( 0.05 ),\n allowFirstHitShare = cms.bool( True ),\n copyExtras = cms.untracked.bool( True ),\n Epsilon = cms.double( -0.001 ),\n selectedTrackQuals = cms.VInputTag( 'hltL3TkTracksMergeStep1','hltL3TkTracksFromL2IOHit' ),\n indivShareFrac = cms.vdouble( 1.0, 1.0 ),\n MaxNormalizedChisq = cms.double( 1000.0 ),\n copyMVA = cms.bool( False ),\n FoundHitBonus = cms.double( 100.0 ),\n setsToMerge = cms.VPSet( \n cms.PSet( pQual = cms.bool( False ),\n tLists = cms.vint32( 0, 1 )\n )\n ),\n MinFound = cms.int32( 3 ),\n hasSelector = cms.vint32( 0, 0 ),\n TrackProducers = cms.VInputTag( 'hltL3TkTracksMergeStep1','hltL3TkTracksFromL2IOHit' ),\n LostHitPenalty = cms.double( 0.0 ),\n newQuality = cms.string( \"confirmed\" )\n)\nhltL3MuonsLinksCombination = cms.EDProducer( \"L3TrackLinksCombiner\",\n labels = cms.VInputTag( 'hltL3MuonsOIState','hltL3MuonsOIHit','hltL3MuonsIOHit' )\n)\nhltL3Muons = cms.EDProducer( \"L3TrackCombiner\",\n labels = cms.VInputTag( 'hltL3MuonsOIState','hltL3MuonsOIHit','hltL3MuonsIOHit' )\n)\nhltL3MuonCandidates = cms.EDProducer( \"L3MuonCandidateProducer\",\n InputLinksObjects = cms.InputTag( \"hltL3MuonsLinksCombination\" ),\n InputObjects = cms.InputTag( \"hltL3Muons\" ),\n MuonPtOption = cms.string( \"Tracker\" )\n)\nhltPixelTracks = cms.EDProducer( \"PixelTrackProducer\",\n useFilterWithES = cms.bool( False ),\n FilterPSet = cms.PSet( \n chi2 = cms.double( 1000.0 ),\n nSigmaTipMaxTolerance = cms.double( 0.0 ),\n ComponentName = cms.string( \"PixelTrackFilterByKinematics\" ),\n nSigmaInvPtTolerance = cms.double( 0.0 ),\n ptMin = cms.double( 0.1 ),\n tipMax = cms.double( 1.0 )\n ),\n passLabel = cms.string( \"Pixel triplet primary tracks with vertex constraint\" ),\n FitterPSet = cms.PSet( \n ComponentName = cms.string( \"PixelFitterByHelixProjections\" ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n fixImpactParameter = cms.double( 0.0 )\n ),\n RegionFactoryPSet = cms.PSet( \n ComponentName = cms.string( \"GlobalRegionProducerFromBeamSpot\" ),\n RegionPSet = cms.PSet( \n precise = cms.bool( True ),\n originRadius = cms.double( 0.2 ),\n ptMin = cms.double( 0.9 ),\n originHalfLength = cms.double( 24.0 ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" )\n )\n ),\n CleanerPSet = cms.PSet( ComponentName = cms.string( \"PixelTrackCleanerBySharedHits\" ) ),\n OrderedHitsFactoryPSet = cms.PSet( \n ComponentName = cms.string( \"StandardHitTripletGenerator\" ),\n GeneratorPSet = cms.PSet( \n useBending = cms.bool( True ),\n useFixedPreFiltering = cms.bool( False ),\n maxElement = cms.uint32( 100000 ),\n phiPreFiltering = cms.double( 0.3 ),\n extraHitRPhitolerance = cms.double( 0.06 ),\n useMultScattering = cms.bool( True ),\n SeedComparitorPSet = cms.PSet( \n ComponentName = cms.string( \"LowPtClusterShapeSeedComparitor\" ),\n clusterShapeCacheSrc = cms.InputTag( \"hltSiPixelClustersCache\" )\n ),\n extraHitRZtolerance = cms.double( 0.06 ),\n ComponentName = cms.string( \"PixelTripletHLTGenerator\" )\n ),\n SeedingLayers = cms.InputTag( \"hltPixelLayerTriplets\" )\n )\n)\nhltPixelVertices = cms.EDProducer( \"PixelVertexProducer\",\n WtAverage = cms.bool( True ),\n Method2 = cms.bool( True ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n PVcomparer = cms.PSet( refToPSet_ = cms.string( \"HLTPSetPvClusterComparerForIT\" ) ),\n Verbosity = cms.int32( 0 ),\n UseError = cms.bool( True ),\n TrackCollection = cms.InputTag( \"hltPixelTracks\" ),\n PtMin = cms.double( 1.0 ),\n NTrkMin = cms.int32( 2 ),\n ZOffset = cms.double( 5.0 ),\n Finder = cms.string( \"DivisiveVertexFinder\" ),\n ZSeparation = cms.double( 0.05 )\n)\nhltTrimmedPixelVertices = cms.EDProducer( \"PixelVertexCollectionTrimmer\",\n minSumPt2 = cms.double( 0.0 ),\n PVcomparer = cms.PSet( refToPSet_ = cms.string( \"HLTPSetPvClusterComparerForIT\" ) ),\n maxVtx = cms.uint32( 100 ),\n fractionSumPt2 = cms.double( 0.3 ),\n src = cms.InputTag( \"hltPixelVertices\" )\n)\nhltIter0PFLowPixelSeedsFromPixelTracks = cms.EDProducer( \"SeedGeneratorFromProtoTracksEDProducer\",\n useEventsWithNoVertex = cms.bool( True ),\n originHalfLength = cms.double( 0.3 ),\n useProtoTrackKinematics = cms.bool( False ),\n usePV = cms.bool( False ),\n InputVertexCollection = cms.InputTag( \"hltTrimmedPixelVertices\" ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n InputCollection = cms.InputTag( \"hltPixelTracks\" ),\n originRadius = cms.double( 0.1 )\n)\nhltIter0PFlowCkfTrackCandidates = cms.EDProducer( \"CkfTrackCandidateMaker\",\n src = cms.InputTag( \"hltIter0PFLowPixelSeedsFromPixelTracks\" ),\n maxSeedsBeforeCleaning = cms.uint32( 1000 ),\n SimpleMagneticField = cms.string( \"ParabolicMf\" ),\n TransientInitialStateEstimatorParameters = cms.PSet( \n propagatorAlongTISE = cms.string( \"PropagatorWithMaterialParabolicMf\" ),\n numberMeasurementsForFit = cms.int32( 4 ),\n propagatorOppositeTISE = cms.string( \"PropagatorWithMaterialParabolicMfOpposite\" )\n ),\n TrajectoryCleaner = cms.string( \"hltESPTrajectoryCleanerBySharedHits\" ),\n MeasurementTrackerEvent = cms.InputTag( \"hltSiStripClusters\" ),\n cleanTrajectoryAfterInOut = cms.bool( False ),\n useHitsSplitting = cms.bool( False ),\n RedundantSeedCleaner = cms.string( \"CachingSeedCleanerBySharedInput\" ),\n doSeedingRegionRebuilding = cms.bool( False ),\n maxNSeeds = cms.uint32( 100000 ),\n TrajectoryBuilderPSet = cms.PSet( refToPSet_ = cms.string( \"HLTIter0PSetTrajectoryBuilderIT\" ) ),\n NavigationSchool = cms.string( \"SimpleNavigationSchool\" ),\n TrajectoryBuilder = cms.string( \"\" )\n)\nhltIter0PFlowCtfWithMaterialTracks = cms.EDProducer( \"TrackProducer\",\n src = cms.InputTag( \"hltIter0PFlowCkfTrackCandidates\" ),\n SimpleMagneticField = cms.string( \"ParabolicMf\" ),\n clusterRemovalInfo = cms.InputTag( \"\" ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n MeasurementTrackerEvent = cms.InputTag( \"hltSiStripClusters\" ),\n Fitter = cms.string( \"hltESPFittingSmootherIT\" ),\n useHitsSplitting = cms.bool( False ),\n MeasurementTracker = cms.string( \"\" ),\n AlgorithmName = cms.string( \"hltIter0\" ),\n alias = cms.untracked.string( \"ctfWithMaterialTracks\" ),\n NavigationSchool = cms.string( \"\" ),\n TrajectoryInEvent = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n GeometricInnerState = cms.bool( True ),\n useSimpleMF = cms.bool( True ),\n Propagator = cms.string( \"hltESPRungeKuttaTrackerPropagator\" )\n)\nhltIter0PFlowTrackSelectionHighPurity = cms.EDProducer( \"AnalyticalTrackSelector\",\n max_d0 = cms.double( 100.0 ),\n minNumber3DLayers = cms.uint32( 0 ),\n max_lostHitFraction = cms.double( 1.0 ),\n applyAbsCutsIfNoPV = cms.bool( False ),\n qualityBit = cms.string( \"highPurity\" ),\n minNumberLayers = cms.uint32( 3 ),\n chi2n_par = cms.double( 0.7 ),\n useVtxError = cms.bool( False ),\n nSigmaZ = cms.double( 3.0 ),\n dz_par2 = cms.vdouble( 0.4, 4.0 ),\n applyAdaptedPVCuts = cms.bool( True ),\n min_eta = cms.double( -9999.0 ),\n dz_par1 = cms.vdouble( 0.35, 4.0 ),\n copyTrajectories = cms.untracked.bool( True ),\n vtxNumber = cms.int32( -1 ),\n max_d0NoPV = cms.double( 100.0 ),\n keepAllTracks = cms.bool( False ),\n maxNumberLostLayers = cms.uint32( 1 ),\n beamspot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n max_relpterr = cms.double( 9999.0 ),\n copyExtras = cms.untracked.bool( True ),\n max_z0NoPV = cms.double( 100.0 ),\n vertexCut = cms.string( \"tracksSize>=3\" ),\n max_z0 = cms.double( 100.0 ),\n useVertices = cms.bool( True ),\n min_nhits = cms.uint32( 0 ),\n src = cms.InputTag( \"hltIter0PFlowCtfWithMaterialTracks\" ),\n max_minMissHitOutOrIn = cms.int32( 99 ),\n chi2n_no1Dmod_par = cms.double( 9999.0 ),\n vertices = cms.InputTag( \"hltTrimmedPixelVertices\" ),\n max_eta = cms.double( 9999.0 ),\n d0_par2 = cms.vdouble( 0.4, 4.0 ),\n d0_par1 = cms.vdouble( 0.3, 4.0 ),\n res_par = cms.vdouble( 0.003, 0.001 ),\n minHitsToBypassChecks = cms.uint32( 20 )\n)\nhltTrackIter0RefsForJets4Iter1 = cms.EDProducer( \"ChargedRefCandidateProducer\",\n src = cms.InputTag( \"hltIter0PFlowTrackSelectionHighPurity\" ),\n particleType = cms.string( \"pi+\" )\n)\nhltAK4Iter0TrackJets4Iter1 = cms.EDProducer( \"FastjetJetProducer\",\n Active_Area_Repeats = cms.int32( 5 ),\n doAreaFastjet = cms.bool( False ),\n voronoiRfact = cms.double( 0.9 ),\n maxBadHcalCells = cms.uint32( 9999999 ),\n doAreaDiskApprox = cms.bool( False ),\n maxRecoveredEcalCells = cms.uint32( 9999999 ),\n jetType = cms.string( \"TrackJet\" ),\n minSeed = cms.uint32( 14327 ),\n Ghost_EtaMax = cms.double( 6.0 ),\n doRhoFastjet = cms.bool( False ),\n jetAlgorithm = cms.string( \"AntiKt\" ),\n nSigmaPU = cms.double( 1.0 ),\n GhostArea = cms.double( 0.01 ),\n Rho_EtaMax = cms.double( 4.4 ),\n maxBadEcalCells = cms.uint32( 9999999 ),\n useDeterministicSeed = cms.bool( True ),\n doPVCorrection = cms.bool( False ),\n maxRecoveredHcalCells = cms.uint32( 9999999 ),\n rParam = cms.double( 0.4 ),\n maxProblematicHcalCells = cms.uint32( 9999999 ),\n doOutputJets = cms.bool( True ),\n src = cms.InputTag( \"hltTrackIter0RefsForJets4Iter1\" ),\n inputEtMin = cms.double( 0.1 ),\n puPtMin = cms.double( 0.0 ),\n srcPVs = cms.InputTag( \"hltTrimmedPixelVertices\" ),\n jetPtMin = cms.double( 1.0 ),\n radiusPU = cms.double( 0.4 ),\n maxProblematicEcalCells = cms.uint32( 9999999 ),\n doPUOffsetCorr = cms.bool( False ),\n inputEMin = cms.double( 0.0 ),\n useMassDropTagger = cms.bool( False ),\n muMin = cms.double( -1.0 ),\n subtractorName = cms.string( \"\" ),\n muCut = cms.double( -1.0 ),\n subjetPtMin = cms.double( -1.0 ),\n useTrimming = cms.bool( False ),\n muMax = cms.double( -1.0 ),\n yMin = cms.double( -1.0 ),\n useFiltering = cms.bool( False ),\n rFilt = cms.double( -1.0 ),\n yMax = cms.double( -1.0 ),\n zcut = cms.double( -1.0 ),\n MinVtxNdof = cms.int32( 0 ),\n MaxVtxZ = cms.double( 30.0 ),\n UseOnlyVertexTracks = cms.bool( False ),\n dRMin = cms.double( -1.0 ),\n nFilt = cms.int32( -1 ),\n usePruning = cms.bool( False ),\n maxDepth = cms.int32( -1 ),\n yCut = cms.double( -1.0 ),\n DzTrVtxMax = cms.double( 0.5 ),\n UseOnlyOnePV = cms.bool( True ),\n rcut_factor = cms.double( -1.0 ),\n sumRecHits = cms.bool( False ),\n trimPtFracMin = cms.double( -1.0 ),\n dRMax = cms.double( -1.0 ),\n DxyTrVtxMax = cms.double( 0.2 ),\n useCMSBoostedTauSeedingAlgorithm = cms.bool( False )\n)\nhltIter0TrackAndTauJets4Iter1 = cms.EDProducer( \"TauJetSelectorForHLTTrackSeeding\",\n fractionMinCaloInTauCone = cms.double( 0.7 ),\n fractionMaxChargedPUInCaloCone = cms.double( 0.3 ),\n tauConeSize = cms.double( 0.2 ),\n ptTrkMaxInCaloCone = cms.double( 1.0 ),\n isolationConeSize = cms.double( 0.5 ),\n inputTrackJetTag = cms.InputTag( \"hltAK4Iter0TrackJets4Iter1\" ),\n nTrkMaxInCaloCone = cms.int32( 0 ),\n inputCaloJetTag = cms.InputTag( \"hltAK4CaloJetsPFEt5\" ),\n etaMinCaloJet = cms.double( -2.7 ),\n etaMaxCaloJet = cms.double( 2.7 ),\n ptMinCaloJet = cms.double( 5.0 ),\n inputTrackTag = cms.InputTag( \"hltIter0PFlowTrackSelectionHighPurity\" )\n)\nhltIter1ClustersRefRemoval = cms.EDProducer( \"TrackClusterRemover\",\n minNumberOfLayersWithMeasBeforeFiltering = cms.int32( 0 ),\n maxChi2 = cms.double( 9.0 ),\n trajectories = cms.InputTag( \"hltIter0PFlowTrackSelectionHighPurity\" ),\n oldClusterRemovalInfo = cms.InputTag( \"\" ),\n stripClusters = cms.InputTag( \"hltSiStripRawToClustersFacility\" ),\n overrideTrkQuals = cms.InputTag( \"\" ),\n pixelClusters = cms.InputTag( \"hltSiPixelClusters\" ),\n TrackQuality = cms.string( \"highPurity\" )\n)\nhltIter1MaskedMeasurementTrackerEvent = cms.EDProducer( \"MaskedMeasurementTrackerEventProducer\",\n clustersToSkip = cms.InputTag( \"hltIter1ClustersRefRemoval\" ),\n OnDemand = cms.bool( False ),\n src = cms.InputTag( \"hltSiStripClusters\" )\n)\nhltIter1PixelLayerTriplets = cms.EDProducer( \"SeedingLayersEDProducer\",\n layerList = cms.vstring( 'BPix1+BPix2+BPix3',\n 'BPix1+BPix2+FPix1_pos',\n 'BPix1+BPix2+FPix1_neg',\n 'BPix1+FPix1_pos+FPix2_pos',\n 'BPix1+FPix1_neg+FPix2_neg' ),\n MTOB = cms.PSet( ),\n TEC = cms.PSet( ),\n MTID = cms.PSet( ),\n FPix = cms.PSet( \n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n hitErrorRZ = cms.double( 0.0036 ),\n useErrorsFromParam = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n skipClusters = cms.InputTag( \"hltIter1ClustersRefRemoval\" ),\n hitErrorRPhi = cms.double( 0.0051 )\n ),\n MTEC = cms.PSet( ),\n MTIB = cms.PSet( ),\n TID = cms.PSet( ),\n TOB = cms.PSet( ),\n BPix = cms.PSet( \n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n hitErrorRZ = cms.double( 0.006 ),\n useErrorsFromParam = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n skipClusters = cms.InputTag( \"hltIter1ClustersRefRemoval\" ),\n hitErrorRPhi = cms.double( 0.0027 )\n ),\n TIB = cms.PSet( )\n)\nhltIter1PFlowPixelSeeds = cms.EDProducer( \"SeedGeneratorFromRegionHitsEDProducer\",\n RegionFactoryPSet = cms.PSet( \n ComponentName = cms.string( \"CandidateSeededTrackingRegionsProducer\" ),\n RegionPSet = cms.PSet( \n precise = cms.bool( True ),\n originRadius = cms.double( 0.05 ),\n searchOpt = cms.bool( True ),\n ptMin = cms.double( 0.5 ),\n measurementTrackerName = cms.string( \"hltIter1MaskedMeasurementTrackerEvent\" ),\n mode = cms.string( \"VerticesFixed\" ),\n maxNRegions = cms.int32( 100 ),\n maxNVertices = cms.int32( 10 ),\n deltaPhi = cms.double( 1.0 ),\n deltaEta = cms.double( 1.0 ),\n zErrorBeamSpot = cms.double( 15.0 ),\n nSigmaZBeamSpot = cms.double( 3.0 ),\n zErrorVetex = cms.double( 0.1 ),\n vertexCollection = cms.InputTag( \"hltTrimmedPixelVertices\" ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n input = cms.InputTag( \"hltIter0TrackAndTauJets4Iter1\" )\n )\n ),\n SeedComparitorPSet = cms.PSet( ComponentName = cms.string( \"none\" ) ),\n ClusterCheckPSet = cms.PSet( \n PixelClusterCollectionLabel = cms.InputTag( \"hltSiPixelClusters\" ),\n MaxNumberOfCosmicClusters = cms.uint32( 50000 ),\n doClusterCheck = cms.bool( False ),\n ClusterCollectionLabel = cms.InputTag( \"hltSiStripClusters\" ),\n MaxNumberOfPixelClusters = cms.uint32( 10000 )\n ),\n OrderedHitsFactoryPSet = cms.PSet( \n maxElement = cms.uint32( 0 ),\n ComponentName = cms.string( \"StandardHitTripletGenerator\" ),\n GeneratorPSet = cms.PSet( \n useBending = cms.bool( True ),\n useFixedPreFiltering = cms.bool( False ),\n maxElement = cms.uint32( 100000 ),\n phiPreFiltering = cms.double( 0.3 ),\n extraHitRPhitolerance = cms.double( 0.032 ),\n useMultScattering = cms.bool( True ),\n ComponentName = cms.string( \"PixelTripletHLTGenerator\" ),\n extraHitRZtolerance = cms.double( 0.037 ),\n SeedComparitorPSet = cms.PSet( ComponentName = cms.string( \"none\" ) )\n ),\n SeedingLayers = cms.InputTag( \"hltIter1PixelLayerTriplets\" )\n ),\n SeedCreatorPSet = cms.PSet( \n ComponentName = cms.string( \"SeedFromConsecutiveHitsTripletOnlyCreator\" ),\n propagator = cms.string( \"PropagatorWithMaterialParabolicMf\" ),\n SeedMomentumForBOFF = cms.double( 5.0 ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" )\n ),\n TTRHBuilder = cms.string( \"(unused)\" )\n)\nhltIter1PFlowCkfTrackCandidates = cms.EDProducer( \"CkfTrackCandidateMaker\",\n src = cms.InputTag( \"hltIter1PFlowPixelSeeds\" ),\n maxSeedsBeforeCleaning = cms.uint32( 1000 ),\n SimpleMagneticField = cms.string( \"ParabolicMf\" ),\n TransientInitialStateEstimatorParameters = cms.PSet( \n propagatorAlongTISE = cms.string( \"PropagatorWithMaterialParabolicMf\" ),\n numberMeasurementsForFit = cms.int32( 4 ),\n propagatorOppositeTISE = cms.string( \"PropagatorWithMaterialParabolicMfOpposite\" )\n ),\n TrajectoryCleaner = cms.string( \"hltESPTrajectoryCleanerBySharedHits\" ),\n MeasurementTrackerEvent = cms.InputTag( \"hltIter1MaskedMeasurementTrackerEvent\" ),\n cleanTrajectoryAfterInOut = cms.bool( False ),\n useHitsSplitting = cms.bool( False ),\n RedundantSeedCleaner = cms.string( \"CachingSeedCleanerBySharedInput\" ),\n doSeedingRegionRebuilding = cms.bool( False ),\n maxNSeeds = cms.uint32( 100000 ),\n TrajectoryBuilderPSet = cms.PSet( refToPSet_ = cms.string( \"HLTIter1PSetTrajectoryBuilderIT\" ) ),\n NavigationSchool = cms.string( \"SimpleNavigationSchool\" ),\n TrajectoryBuilder = cms.string( \"\" )\n)\nhltIter1PFlowCtfWithMaterialTracks = cms.EDProducer( \"TrackProducer\",\n src = cms.InputTag( \"hltIter1PFlowCkfTrackCandidates\" ),\n SimpleMagneticField = cms.string( \"ParabolicMf\" ),\n clusterRemovalInfo = cms.InputTag( \"\" ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n MeasurementTrackerEvent = cms.InputTag( \"hltIter1MaskedMeasurementTrackerEvent\" ),\n Fitter = cms.string( \"hltESPFittingSmootherIT\" ),\n useHitsSplitting = cms.bool( False ),\n MeasurementTracker = cms.string( \"\" ),\n AlgorithmName = cms.string( \"hltIter1\" ),\n alias = cms.untracked.string( \"ctfWithMaterialTracks\" ),\n NavigationSchool = cms.string( \"\" ),\n TrajectoryInEvent = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n GeometricInnerState = cms.bool( True ),\n useSimpleMF = cms.bool( True ),\n Propagator = cms.string( \"hltESPRungeKuttaTrackerPropagator\" )\n)\nhltIter1PFlowTrackSelectionHighPurityLoose = cms.EDProducer( \"AnalyticalTrackSelector\",\n max_d0 = cms.double( 100.0 ),\n minNumber3DLayers = cms.uint32( 0 ),\n max_lostHitFraction = cms.double( 1.0 ),\n applyAbsCutsIfNoPV = cms.bool( False ),\n qualityBit = cms.string( \"highPurity\" ),\n minNumberLayers = cms.uint32( 3 ),\n chi2n_par = cms.double( 0.7 ),\n useVtxError = cms.bool( False ),\n nSigmaZ = cms.double( 3.0 ),\n dz_par2 = cms.vdouble( 0.9, 3.0 ),\n applyAdaptedPVCuts = cms.bool( True ),\n min_eta = cms.double( -9999.0 ),\n dz_par1 = cms.vdouble( 0.8, 3.0 ),\n copyTrajectories = cms.untracked.bool( True ),\n vtxNumber = cms.int32( -1 ),\n max_d0NoPV = cms.double( 100.0 ),\n keepAllTracks = cms.bool( False ),\n maxNumberLostLayers = cms.uint32( 1 ),\n beamspot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n max_relpterr = cms.double( 9999.0 ),\n copyExtras = cms.untracked.bool( True ),\n max_z0NoPV = cms.double( 100.0 ),\n vertexCut = cms.string( \"tracksSize>=3\" ),\n max_z0 = cms.double( 100.0 ),\n useVertices = cms.bool( True ),\n min_nhits = cms.uint32( 0 ),\n src = cms.InputTag( \"hltIter1PFlowCtfWithMaterialTracks\" ),\n max_minMissHitOutOrIn = cms.int32( 99 ),\n chi2n_no1Dmod_par = cms.double( 9999.0 ),\n vertices = cms.InputTag( \"hltTrimmedPixelVertices\" ),\n max_eta = cms.double( 9999.0 ),\n d0_par2 = cms.vdouble( 0.9, 3.0 ),\n d0_par1 = cms.vdouble( 0.85, 3.0 ),\n res_par = cms.vdouble( 0.003, 0.001 ),\n minHitsToBypassChecks = cms.uint32( 20 )\n)\nhltIter1PFlowTrackSelectionHighPurityTight = cms.EDProducer( \"AnalyticalTrackSelector\",\n max_d0 = cms.double( 100.0 ),\n minNumber3DLayers = cms.uint32( 0 ),\n max_lostHitFraction = cms.double( 1.0 ),\n applyAbsCutsIfNoPV = cms.bool( False ),\n qualityBit = cms.string( \"highPurity\" ),\n minNumberLayers = cms.uint32( 5 ),\n chi2n_par = cms.double( 0.4 ),\n useVtxError = cms.bool( False ),\n nSigmaZ = cms.double( 3.0 ),\n dz_par2 = cms.vdouble( 1.0, 4.0 ),\n applyAdaptedPVCuts = cms.bool( True ),\n min_eta = cms.double( -9999.0 ),\n dz_par1 = cms.vdouble( 1.0, 4.0 ),\n copyTrajectories = cms.untracked.bool( True ),\n vtxNumber = cms.int32( -1 ),\n max_d0NoPV = cms.double( 100.0 ),\n keepAllTracks = cms.bool( False ),\n maxNumberLostLayers = cms.uint32( 1 ),\n beamspot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n max_relpterr = cms.double( 9999.0 ),\n copyExtras = cms.untracked.bool( True ),\n max_z0NoPV = cms.double( 100.0 ),\n vertexCut = cms.string( \"tracksSize>=3\" ),\n max_z0 = cms.double( 100.0 ),\n useVertices = cms.bool( True ),\n min_nhits = cms.uint32( 0 ),\n src = cms.InputTag( \"hltIter1PFlowCtfWithMaterialTracks\" ),\n max_minMissHitOutOrIn = cms.int32( 99 ),\n chi2n_no1Dmod_par = cms.double( 9999.0 ),\n vertices = cms.InputTag( \"hltTrimmedPixelVertices\" ),\n max_eta = cms.double( 9999.0 ),\n d0_par2 = cms.vdouble( 1.0, 4.0 ),\n d0_par1 = cms.vdouble( 1.0, 4.0 ),\n res_par = cms.vdouble( 0.003, 0.001 ),\n minHitsToBypassChecks = cms.uint32( 20 )\n)\nhltIter1PFlowTrackSelectionHighPurity = cms.EDProducer( \"TrackListMerger\",\n ShareFrac = cms.double( 0.19 ),\n writeOnlyTrkQuals = cms.bool( False ),\n MinPT = cms.double( 0.05 ),\n allowFirstHitShare = cms.bool( True ),\n copyExtras = cms.untracked.bool( True ),\n Epsilon = cms.double( -0.001 ),\n selectedTrackQuals = cms.VInputTag( 'hltIter1PFlowTrackSelectionHighPurityLoose','hltIter1PFlowTrackSelectionHighPurityTight' ),\n indivShareFrac = cms.vdouble( 1.0, 1.0 ),\n MaxNormalizedChisq = cms.double( 1000.0 ),\n copyMVA = cms.bool( False ),\n FoundHitBonus = cms.double( 5.0 ),\n setsToMerge = cms.VPSet( \n cms.PSet( pQual = cms.bool( False ),\n tLists = cms.vint32( 0, 1 )\n )\n ),\n MinFound = cms.int32( 3 ),\n hasSelector = cms.vint32( 0, 0 ),\n TrackProducers = cms.VInputTag( 'hltIter1PFlowTrackSelectionHighPurityLoose','hltIter1PFlowTrackSelectionHighPurityTight' ),\n LostHitPenalty = cms.double( 20.0 ),\n newQuality = cms.string( \"confirmed\" )\n)\nhltIter1Merged = cms.EDProducer( \"TrackListMerger\",\n ShareFrac = cms.double( 0.19 ),\n writeOnlyTrkQuals = cms.bool( False ),\n MinPT = cms.double( 0.05 ),\n allowFirstHitShare = cms.bool( True ),\n copyExtras = cms.untracked.bool( True ),\n Epsilon = cms.double( -0.001 ),\n selectedTrackQuals = cms.VInputTag( 'hltIter0PFlowTrackSelectionHighPurity','hltIter1PFlowTrackSelectionHighPurity' ),\n indivShareFrac = cms.vdouble( 1.0, 1.0 ),\n MaxNormalizedChisq = cms.double( 1000.0 ),\n copyMVA = cms.bool( False ),\n FoundHitBonus = cms.double( 5.0 ),\n setsToMerge = cms.VPSet( \n cms.PSet( pQual = cms.bool( False ),\n tLists = cms.vint32( 0, 1 )\n )\n ),\n MinFound = cms.int32( 3 ),\n hasSelector = cms.vint32( 0, 0 ),\n TrackProducers = cms.VInputTag( 'hltIter0PFlowTrackSelectionHighPurity','hltIter1PFlowTrackSelectionHighPurity' ),\n LostHitPenalty = cms.double( 20.0 ),\n newQuality = cms.string( \"confirmed\" )\n)\nhltIter1TrackRefsForJets4Iter2 = cms.EDProducer( \"ChargedRefCandidateProducer\",\n src = cms.InputTag( \"hltIter1Merged\" ),\n particleType = cms.string( \"pi+\" )\n)\nhltAK4Iter1TrackJets4Iter2 = cms.EDProducer( \"FastjetJetProducer\",\n Active_Area_Repeats = cms.int32( 5 ),\n doAreaFastjet = cms.bool( False ),\n voronoiRfact = cms.double( 0.9 ),\n maxBadHcalCells = cms.uint32( 9999999 ),\n doAreaDiskApprox = cms.bool( False ),\n maxRecoveredEcalCells = cms.uint32( 9999999 ),\n jetType = cms.string( \"TrackJet\" ),\n minSeed = cms.uint32( 14327 ),\n Ghost_EtaMax = cms.double( 6.0 ),\n doRhoFastjet = cms.bool( False ),\n jetAlgorithm = cms.string( \"AntiKt\" ),\n nSigmaPU = cms.double( 1.0 ),\n GhostArea = cms.double( 0.01 ),\n Rho_EtaMax = cms.double( 4.4 ),\n maxBadEcalCells = cms.uint32( 9999999 ),\n useDeterministicSeed = cms.bool( True ),\n doPVCorrection = cms.bool( False ),\n maxRecoveredHcalCells = cms.uint32( 9999999 ),\n rParam = cms.double( 0.4 ),\n maxProblematicHcalCells = cms.uint32( 9999999 ),\n doOutputJets = cms.bool( True ),\n src = cms.InputTag( \"hltIter1TrackRefsForJets4Iter2\" ),\n inputEtMin = cms.double( 0.1 ),\n puPtMin = cms.double( 0.0 ),\n srcPVs = cms.InputTag( \"hltTrimmedPixelVertices\" ),\n jetPtMin = cms.double( 7.5 ),\n radiusPU = cms.double( 0.4 ),\n maxProblematicEcalCells = cms.uint32( 9999999 ),\n doPUOffsetCorr = cms.bool( False ),\n inputEMin = cms.double( 0.0 ),\n useMassDropTagger = cms.bool( False ),\n muMin = cms.double( -1.0 ),\n subtractorName = cms.string( \"\" ),\n muCut = cms.double( -1.0 ),\n subjetPtMin = cms.double( -1.0 ),\n useTrimming = cms.bool( False ),\n muMax = cms.double( -1.0 ),\n yMin = cms.double( -1.0 ),\n useFiltering = cms.bool( False ),\n rFilt = cms.double( -1.0 ),\n yMax = cms.double( -1.0 ),\n zcut = cms.double( -1.0 ),\n MinVtxNdof = cms.int32( 0 ),\n MaxVtxZ = cms.double( 30.0 ),\n UseOnlyVertexTracks = cms.bool( False ),\n dRMin = cms.double( -1.0 ),\n nFilt = cms.int32( -1 ),\n usePruning = cms.bool( False ),\n maxDepth = cms.int32( -1 ),\n yCut = cms.double( -1.0 ),\n DzTrVtxMax = cms.double( 0.5 ),\n UseOnlyOnePV = cms.bool( True ),\n rcut_factor = cms.double( -1.0 ),\n sumRecHits = cms.bool( False ),\n trimPtFracMin = cms.double( -1.0 ),\n dRMax = cms.double( -1.0 ),\n DxyTrVtxMax = cms.double( 0.2 ),\n useCMSBoostedTauSeedingAlgorithm = cms.bool( False )\n)\nhltIter1TrackAndTauJets4Iter2 = cms.EDProducer( \"TauJetSelectorForHLTTrackSeeding\",\n fractionMinCaloInTauCone = cms.double( 0.7 ),\n fractionMaxChargedPUInCaloCone = cms.double( 0.3 ),\n tauConeSize = cms.double( 0.2 ),\n ptTrkMaxInCaloCone = cms.double( 1.4 ),\n isolationConeSize = cms.double( 0.5 ),\n inputTrackJetTag = cms.InputTag( \"hltAK4Iter1TrackJets4Iter2\" ),\n nTrkMaxInCaloCone = cms.int32( 0 ),\n inputCaloJetTag = cms.InputTag( \"hltAK4CaloJetsPFEt5\" ),\n etaMinCaloJet = cms.double( -2.7 ),\n etaMaxCaloJet = cms.double( 2.7 ),\n ptMinCaloJet = cms.double( 5.0 ),\n inputTrackTag = cms.InputTag( \"hltIter1Merged\" )\n)\nhltIter2ClustersRefRemoval = cms.EDProducer( \"TrackClusterRemover\",\n minNumberOfLayersWithMeasBeforeFiltering = cms.int32( 0 ),\n maxChi2 = cms.double( 16.0 ),\n trajectories = cms.InputTag( \"hltIter1PFlowTrackSelectionHighPurity\" ),\n oldClusterRemovalInfo = cms.InputTag( \"hltIter1ClustersRefRemoval\" ),\n stripClusters = cms.InputTag( \"hltSiStripRawToClustersFacility\" ),\n overrideTrkQuals = cms.InputTag( \"\" ),\n pixelClusters = cms.InputTag( \"hltSiPixelClusters\" ),\n TrackQuality = cms.string( \"highPurity\" )\n)\nhltIter2MaskedMeasurementTrackerEvent = cms.EDProducer( \"MaskedMeasurementTrackerEventProducer\",\n clustersToSkip = cms.InputTag( \"hltIter2ClustersRefRemoval\" ),\n OnDemand = cms.bool( False ),\n src = cms.InputTag( \"hltSiStripClusters\" )\n)\nhltIter2PixelLayerPairs = cms.EDProducer( \"SeedingLayersEDProducer\",\n layerList = cms.vstring( 'BPix1+BPix2',\n 'BPix1+BPix3',\n 'BPix2+BPix3',\n 'BPix1+FPix1_pos',\n 'BPix1+FPix1_neg',\n 'BPix1+FPix2_pos',\n 'BPix1+FPix2_neg',\n 'BPix2+FPix1_pos',\n 'BPix2+FPix1_neg',\n 'BPix2+FPix2_pos',\n 'BPix2+FPix2_neg',\n 'FPix1_pos+FPix2_pos',\n 'FPix1_neg+FPix2_neg' ),\n MTOB = cms.PSet( ),\n TEC = cms.PSet( ),\n MTID = cms.PSet( ),\n FPix = cms.PSet( \n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n hitErrorRZ = cms.double( 0.0036 ),\n useErrorsFromParam = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n skipClusters = cms.InputTag( \"hltIter2ClustersRefRemoval\" ),\n hitErrorRPhi = cms.double( 0.0051 )\n ),\n MTEC = cms.PSet( ),\n MTIB = cms.PSet( ),\n TID = cms.PSet( ),\n TOB = cms.PSet( ),\n BPix = cms.PSet( \n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n hitErrorRZ = cms.double( 0.006 ),\n useErrorsFromParam = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n skipClusters = cms.InputTag( \"hltIter2ClustersRefRemoval\" ),\n hitErrorRPhi = cms.double( 0.0027 )\n ),\n TIB = cms.PSet( )\n)\nhltIter2PFlowPixelSeeds = cms.EDProducer( \"SeedGeneratorFromRegionHitsEDProducer\",\n RegionFactoryPSet = cms.PSet( \n ComponentName = cms.string( \"CandidateSeededTrackingRegionsProducer\" ),\n RegionPSet = cms.PSet( \n precise = cms.bool( True ),\n originRadius = cms.double( 0.025 ),\n searchOpt = cms.bool( True ),\n originZPos = cms.double( 0.0 ),\n ptMin = cms.double( 1.2 ),\n measurementTrackerName = cms.string( \"hltIter2MaskedMeasurementTrackerEvent\" ),\n mode = cms.string( \"VerticesFixed\" ),\n maxNRegions = cms.int32( 100 ),\n maxNVertices = cms.int32( 10 ),\n deltaPhi = cms.double( 0.8 ),\n deltaEta = cms.double( 0.8 ),\n zErrorBeamSpot = cms.double( 15.0 ),\n nSigmaZBeamSpot = cms.double( 3.0 ),\n zErrorVetex = cms.double( 0.05 ),\n vertexCollection = cms.InputTag( \"hltTrimmedPixelVertices\" ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n input = cms.InputTag( \"hltIter1TrackAndTauJets4Iter2\" )\n )\n ),\n SeedComparitorPSet = cms.PSet( ComponentName = cms.string( \"none\" ) ),\n ClusterCheckPSet = cms.PSet( \n PixelClusterCollectionLabel = cms.InputTag( \"hltSiPixelClusters\" ),\n MaxNumberOfCosmicClusters = cms.uint32( 50000 ),\n doClusterCheck = cms.bool( False ),\n ClusterCollectionLabel = cms.InputTag( \"hltSiStripClusters\" ),\n MaxNumberOfPixelClusters = cms.uint32( 10000 )\n ),\n OrderedHitsFactoryPSet = cms.PSet( \n maxElement = cms.uint32( 0 ),\n ComponentName = cms.string( \"StandardHitPairGenerator\" ),\n GeneratorPSet = cms.PSet( \n maxElement = cms.uint32( 100000 ),\n SeedComparitorPSet = cms.PSet( ComponentName = cms.string( \"none\" ) )\n ),\n SeedingLayers = cms.InputTag( \"hltIter2PixelLayerPairs\" )\n ),\n SeedCreatorPSet = cms.PSet( \n ComponentName = cms.string( \"SeedFromConsecutiveHitsCreator\" ),\n propagator = cms.string( \"PropagatorWithMaterialParabolicMf\" ),\n SeedMomentumForBOFF = cms.double( 5.0 ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" )\n ),\n TTRHBuilder = cms.string( \"(unused)\" )\n)\nhltIter2PFlowCkfTrackCandidates = cms.EDProducer( \"CkfTrackCandidateMaker\",\n src = cms.InputTag( \"hltIter2PFlowPixelSeeds\" ),\n maxSeedsBeforeCleaning = cms.uint32( 1000 ),\n SimpleMagneticField = cms.string( \"ParabolicMf\" ),\n TransientInitialStateEstimatorParameters = cms.PSet( \n propagatorAlongTISE = cms.string( \"PropagatorWithMaterialParabolicMf\" ),\n numberMeasurementsForFit = cms.int32( 4 ),\n propagatorOppositeTISE = cms.string( \"PropagatorWithMaterialParabolicMfOpposite\" )\n ),\n TrajectoryCleaner = cms.string( \"hltESPTrajectoryCleanerBySharedHits\" ),\n MeasurementTrackerEvent = cms.InputTag( \"hltIter2MaskedMeasurementTrackerEvent\" ),\n cleanTrajectoryAfterInOut = cms.bool( False ),\n useHitsSplitting = cms.bool( False ),\n RedundantSeedCleaner = cms.string( \"CachingSeedCleanerBySharedInput\" ),\n doSeedingRegionRebuilding = cms.bool( False ),\n maxNSeeds = cms.uint32( 100000 ),\n TrajectoryBuilderPSet = cms.PSet( refToPSet_ = cms.string( \"HLTIter2PSetTrajectoryBuilderIT\" ) ),\n NavigationSchool = cms.string( \"SimpleNavigationSchool\" ),\n TrajectoryBuilder = cms.string( \"\" )\n)\nhltIter2PFlowCtfWithMaterialTracks = cms.EDProducer( \"TrackProducer\",\n src = cms.InputTag( \"hltIter2PFlowCkfTrackCandidates\" ),\n SimpleMagneticField = cms.string( \"ParabolicMf\" ),\n clusterRemovalInfo = cms.InputTag( \"\" ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n MeasurementTrackerEvent = cms.InputTag( \"hltIter2MaskedMeasurementTrackerEvent\" ),\n Fitter = cms.string( \"hltESPFittingSmootherIT\" ),\n useHitsSplitting = cms.bool( False ),\n MeasurementTracker = cms.string( \"\" ),\n AlgorithmName = cms.string( \"hltIter2\" ),\n alias = cms.untracked.string( \"ctfWithMaterialTracks\" ),\n NavigationSchool = cms.string( \"\" ),\n TrajectoryInEvent = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n GeometricInnerState = cms.bool( True ),\n useSimpleMF = cms.bool( True ),\n Propagator = cms.string( \"hltESPRungeKuttaTrackerPropagator\" )\n)\nhltIter2PFlowTrackSelectionHighPurity = cms.EDProducer( \"AnalyticalTrackSelector\",\n max_d0 = cms.double( 100.0 ),\n minNumber3DLayers = cms.uint32( 0 ),\n max_lostHitFraction = cms.double( 1.0 ),\n applyAbsCutsIfNoPV = cms.bool( False ),\n qualityBit = cms.string( \"highPurity\" ),\n minNumberLayers = cms.uint32( 3 ),\n chi2n_par = cms.double( 0.7 ),\n useVtxError = cms.bool( False ),\n nSigmaZ = cms.double( 3.0 ),\n dz_par2 = cms.vdouble( 0.4, 4.0 ),\n applyAdaptedPVCuts = cms.bool( True ),\n min_eta = cms.double( -9999.0 ),\n dz_par1 = cms.vdouble( 0.35, 4.0 ),\n copyTrajectories = cms.untracked.bool( True ),\n vtxNumber = cms.int32( -1 ),\n max_d0NoPV = cms.double( 100.0 ),\n keepAllTracks = cms.bool( False ),\n maxNumberLostLayers = cms.uint32( 1 ),\n beamspot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n max_relpterr = cms.double( 9999.0 ),\n copyExtras = cms.untracked.bool( True ),\n max_z0NoPV = cms.double( 100.0 ),\n vertexCut = cms.string( \"tracksSize>=3\" ),\n max_z0 = cms.double( 100.0 ),\n useVertices = cms.bool( True ),\n min_nhits = cms.uint32( 0 ),\n src = cms.InputTag( \"hltIter2PFlowCtfWithMaterialTracks\" ),\n max_minMissHitOutOrIn = cms.int32( 99 ),\n chi2n_no1Dmod_par = cms.double( 9999.0 ),\n vertices = cms.InputTag( \"hltTrimmedPixelVertices\" ),\n max_eta = cms.double( 9999.0 ),\n d0_par2 = cms.vdouble( 0.4, 4.0 ),\n d0_par1 = cms.vdouble( 0.3, 4.0 ),\n res_par = cms.vdouble( 0.003, 0.001 ),\n minHitsToBypassChecks = cms.uint32( 20 )\n)\nhltIter2Merged = cms.EDProducer( \"TrackListMerger\",\n ShareFrac = cms.double( 0.19 ),\n writeOnlyTrkQuals = cms.bool( False ),\n MinPT = cms.double( 0.05 ),\n allowFirstHitShare = cms.bool( True ),\n copyExtras = cms.untracked.bool( True ),\n Epsilon = cms.double( -0.001 ),\n selectedTrackQuals = cms.VInputTag( 'hltIter1Merged','hltIter2PFlowTrackSelectionHighPurity' ),\n indivShareFrac = cms.vdouble( 1.0, 1.0 ),\n MaxNormalizedChisq = cms.double( 1000.0 ),\n copyMVA = cms.bool( False ),\n FoundHitBonus = cms.double( 5.0 ),\n setsToMerge = cms.VPSet( \n cms.PSet( pQual = cms.bool( False ),\n tLists = cms.vint32( 0, 1 )\n )\n ),\n MinFound = cms.int32( 3 ),\n hasSelector = cms.vint32( 0, 0 ),\n TrackProducers = cms.VInputTag( 'hltIter1Merged','hltIter2PFlowTrackSelectionHighPurity' ),\n LostHitPenalty = cms.double( 20.0 ),\n newQuality = cms.string( \"confirmed\" )\n)\nhltPFMuonMerging = cms.EDProducer( \"TrackListMerger\",\n ShareFrac = cms.double( 0.19 ),\n writeOnlyTrkQuals = cms.bool( False ),\n MinPT = cms.double( 0.05 ),\n allowFirstHitShare = cms.bool( True ),\n copyExtras = cms.untracked.bool( True ),\n Epsilon = cms.double( -0.001 ),\n selectedTrackQuals = cms.VInputTag( 'hltL3TkTracksFromL2','hltIter2Merged' ),\n indivShareFrac = cms.vdouble( 1.0, 1.0 ),\n MaxNormalizedChisq = cms.double( 1000.0 ),\n copyMVA = cms.bool( False ),\n FoundHitBonus = cms.double( 5.0 ),\n setsToMerge = cms.VPSet( \n cms.PSet( pQual = cms.bool( False ),\n tLists = cms.vint32( 0, 1 )\n )\n ),\n MinFound = cms.int32( 3 ),\n hasSelector = cms.vint32( 0, 0 ),\n TrackProducers = cms.VInputTag( 'hltL3TkTracksFromL2','hltIter2Merged' ),\n LostHitPenalty = cms.double( 20.0 ),\n newQuality = cms.string( \"confirmed\" )\n)\nhltMuonLinks = cms.EDProducer( \"MuonLinksProducerForHLT\",\n pMin = cms.double( 2.5 ),\n InclusiveTrackerTrackCollection = cms.InputTag( \"hltPFMuonMerging\" ),\n shareHitFraction = cms.double( 0.8 ),\n LinkCollection = cms.InputTag( \"hltL3MuonsLinksCombination\" ),\n ptMin = cms.double( 2.5 )\n)\nhltMuons = cms.EDProducer( \"MuonIdProducer\",\n TrackExtractorPSet = cms.PSet( \n Diff_z = cms.double( 0.2 ),\n inputTrackCollection = cms.InputTag( \"hltPFMuonMerging\" ),\n BeamSpotLabel = cms.InputTag( \"hltOnlineBeamSpot\" ),\n ComponentName = cms.string( \"TrackExtractor\" ),\n DR_Max = cms.double( 1.0 ),\n Diff_r = cms.double( 0.1 ),\n Chi2Prob_Min = cms.double( -1.0 ),\n DR_Veto = cms.double( 0.01 ),\n NHits_Min = cms.uint32( 0 ),\n Chi2Ndof_Max = cms.double( 1.0E64 ),\n Pt_Min = cms.double( -1.0 ),\n DepositLabel = cms.untracked.string( \"\" ),\n BeamlineOption = cms.string( \"BeamSpotFromEvent\" )\n ),\n maxAbsEta = cms.double( 3.0 ),\n fillGlobalTrackRefits = cms.bool( False ),\n arbitrationCleanerOptions = cms.PSet( \n Clustering = cms.bool( True ),\n ME1a = cms.bool( True ),\n ClusterDPhi = cms.double( 0.6 ),\n OverlapDTheta = cms.double( 0.02 ),\n Overlap = cms.bool( True ),\n OverlapDPhi = cms.double( 0.0786 ),\n ClusterDTheta = cms.double( 0.02 )\n ),\n globalTrackQualityInputTag = cms.InputTag( \"glbTrackQual\" ),\n addExtraSoftMuons = cms.bool( False ),\n debugWithTruthMatching = cms.bool( False ),\n CaloExtractorPSet = cms.PSet( \n PrintTimeReport = cms.untracked.bool( False ),\n DR_Max = cms.double( 1.0 ),\n DepositInstanceLabels = cms.vstring( 'ecal',\n 'hcal',\n 'ho' ),\n Noise_HE = cms.double( 0.2 ),\n NoiseTow_EB = cms.double( 0.04 ),\n NoiseTow_EE = cms.double( 0.15 ),\n Threshold_H = cms.double( 0.5 ),\n ServiceParameters = cms.PSet( \n Propagators = cms.untracked.vstring( 'hltESPFastSteppingHelixPropagatorAny' ),\n RPCLayers = cms.bool( False ),\n UseMuonNavigation = cms.untracked.bool( False )\n ),\n Threshold_E = cms.double( 0.2 ),\n PropagatorName = cms.string( \"hltESPFastSteppingHelixPropagatorAny\" ),\n DepositLabel = cms.untracked.string( \"Cal\" ),\n UseRecHitsFlag = cms.bool( False ),\n TrackAssociatorParameters = cms.PSet( \n muonMaxDistanceSigmaX = cms.double( 0.0 ),\n muonMaxDistanceSigmaY = cms.double( 0.0 ),\n CSCSegmentCollectionLabel = cms.InputTag( \"hltCscSegments\" ),\n dRHcal = cms.double( 1.0 ),\n dRPreshowerPreselection = cms.double( 0.2 ),\n CaloTowerCollectionLabel = cms.InputTag( \"hltTowerMakerForPF\" ),\n useEcal = cms.bool( False ),\n dREcal = cms.double( 1.0 ),\n dREcalPreselection = cms.double( 1.0 ),\n HORecHitCollectionLabel = cms.InputTag( \"hltHoreco\" ),\n dRMuon = cms.double( 9999.0 ),\n propagateAllDirections = cms.bool( True ),\n muonMaxDistanceX = cms.double( 5.0 ),\n muonMaxDistanceY = cms.double( 5.0 ),\n useHO = cms.bool( False ),\n trajectoryUncertaintyTolerance = cms.double( -1.0 ),\n usePreshower = cms.bool( False ),\n DTRecSegment4DCollectionLabel = cms.InputTag( \"hltDt4DSegments\" ),\n EERecHitCollectionLabel = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEE' ),\n dRHcalPreselection = cms.double( 1.0 ),\n useMuon = cms.bool( False ),\n useCalo = cms.bool( True ),\n accountForTrajectoryChangeCalo = cms.bool( False ),\n EBRecHitCollectionLabel = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEB' ),\n dRMuonPreselection = cms.double( 0.2 ),\n truthMatch = cms.bool( False ),\n HBHERecHitCollectionLabel = cms.InputTag( \"hltHbhereco\" ),\n useHcal = cms.bool( False )\n ),\n Threshold_HO = cms.double( 0.5 ),\n Noise_EE = cms.double( 0.1 ),\n Noise_EB = cms.double( 0.025 ),\n DR_Veto_H = cms.double( 0.1 ),\n CenterConeOnCalIntersection = cms.bool( False ),\n ComponentName = cms.string( \"CaloExtractorByAssociator\" ),\n Noise_HB = cms.double( 0.2 ),\n DR_Veto_E = cms.double( 0.07 ),\n DR_Veto_HO = cms.double( 0.1 ),\n Noise_HO = cms.double( 0.2 )\n ),\n runArbitrationCleaner = cms.bool( False ),\n fillEnergy = cms.bool( True ),\n TrackerKinkFinderParameters = cms.PSet( \n usePosition = cms.bool( False ),\n diagonalOnly = cms.bool( False )\n ),\n TimingFillerParameters = cms.PSet( \n UseDT = cms.bool( True ),\n ErrorDT = cms.double( 6.0 ),\n EcalEnergyCut = cms.double( 0.4 ),\n ErrorEB = cms.double( 2.085 ),\n ErrorCSC = cms.double( 7.4 ),\n CSCTimingParameters = cms.PSet( \n CSCsegments = cms.InputTag( \"hltCscSegments\" ),\n CSCTimeOffset = cms.double( 0.0 ),\n CSCStripTimeOffset = cms.double( 0.0 ),\n MatchParameters = cms.PSet( \n CSCsegments = cms.InputTag( \"hltCscSegments\" ),\n DTsegments = cms.InputTag( \"hltDt4DSegments\" ),\n DTradius = cms.double( 0.01 ),\n TightMatchDT = cms.bool( False ),\n TightMatchCSC = cms.bool( True )\n ),\n debug = cms.bool( False ),\n UseStripTime = cms.bool( True ),\n CSCStripError = cms.double( 7.0 ),\n CSCWireError = cms.double( 8.6 ),\n CSCWireTimeOffset = cms.double( 0.0 ),\n ServiceParameters = cms.PSet( \n Propagators = cms.untracked.vstring( 'hltESPFastSteppingHelixPropagatorAny' ),\n RPCLayers = cms.bool( True )\n ),\n PruneCut = cms.double( 100.0 ),\n UseWireTime = cms.bool( True )\n ),\n DTTimingParameters = cms.PSet( \n HitError = cms.double( 6.0 ),\n DoWireCorr = cms.bool( False ),\n MatchParameters = cms.PSet( \n CSCsegments = cms.InputTag( \"hltCscSegments\" ),\n DTsegments = cms.InputTag( \"hltDt4DSegments\" ),\n DTradius = cms.double( 0.01 ),\n TightMatchDT = cms.bool( False ),\n TightMatchCSC = cms.bool( True )\n ),\n debug = cms.bool( False ),\n DTsegments = cms.InputTag( \"hltDt4DSegments\" ),\n PruneCut = cms.double( 10000.0 ),\n RequireBothProjections = cms.bool( False ),\n HitsMin = cms.int32( 5 ),\n DTTimeOffset = cms.double( 2.7 ),\n DropTheta = cms.bool( True ),\n UseSegmentT0 = cms.bool( False ),\n ServiceParameters = cms.PSet( \n Propagators = cms.untracked.vstring( 'hltESPFastSteppingHelixPropagatorAny' ),\n RPCLayers = cms.bool( True )\n )\n ),\n ErrorEE = cms.double( 6.95 ),\n UseCSC = cms.bool( True ),\n UseECAL = cms.bool( True )\n ),\n inputCollectionTypes = cms.vstring( 'inner tracks',\n 'links',\n 'outer tracks' ),\n minCaloCompatibility = cms.double( 0.6 ),\n ecalDepositName = cms.string( \"ecal\" ),\n minP = cms.double( 10.0 ),\n fillIsolation = cms.bool( True ),\n jetDepositName = cms.string( \"jets\" ),\n hoDepositName = cms.string( \"ho\" ),\n writeIsoDeposits = cms.bool( False ),\n maxAbsPullX = cms.double( 4.0 ),\n maxAbsPullY = cms.double( 9999.0 ),\n minPt = cms.double( 10.0 ),\n TrackAssociatorParameters = cms.PSet( \n muonMaxDistanceSigmaX = cms.double( 0.0 ),\n muonMaxDistanceSigmaY = cms.double( 0.0 ),\n CSCSegmentCollectionLabel = cms.InputTag( \"hltCscSegments\" ),\n dRHcal = cms.double( 9999.0 ),\n dRPreshowerPreselection = cms.double( 0.2 ),\n CaloTowerCollectionLabel = cms.InputTag( \"hltTowerMakerForPF\" ),\n useEcal = cms.bool( True ),\n dREcal = cms.double( 9999.0 ),\n dREcalPreselection = cms.double( 0.05 ),\n HORecHitCollectionLabel = cms.InputTag( \"hltHoreco\" ),\n dRMuon = cms.double( 9999.0 ),\n propagateAllDirections = cms.bool( True ),\n muonMaxDistanceX = cms.double( 5.0 ),\n muonMaxDistanceY = cms.double( 5.0 ),\n useHO = cms.bool( True ),\n trajectoryUncertaintyTolerance = cms.double( -1.0 ),\n usePreshower = cms.bool( False ),\n DTRecSegment4DCollectionLabel = cms.InputTag( \"hltDt4DSegments\" ),\n EERecHitCollectionLabel = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEE' ),\n dRHcalPreselection = cms.double( 0.2 ),\n useMuon = cms.bool( True ),\n useCalo = cms.bool( False ),\n accountForTrajectoryChangeCalo = cms.bool( False ),\n EBRecHitCollectionLabel = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEB' ),\n dRMuonPreselection = cms.double( 0.2 ),\n truthMatch = cms.bool( False ),\n HBHERecHitCollectionLabel = cms.InputTag( \"hltHbhereco\" ),\n useHcal = cms.bool( True )\n ),\n JetExtractorPSet = cms.PSet( \n PrintTimeReport = cms.untracked.bool( False ),\n ExcludeMuonVeto = cms.bool( True ),\n TrackAssociatorParameters = cms.PSet( \n muonMaxDistanceSigmaX = cms.double( 0.0 ),\n muonMaxDistanceSigmaY = cms.double( 0.0 ),\n CSCSegmentCollectionLabel = cms.InputTag( \"hltCscSegments\" ),\n dRHcal = cms.double( 0.5 ),\n dRPreshowerPreselection = cms.double( 0.2 ),\n CaloTowerCollectionLabel = cms.InputTag( \"hltTowerMakerForPF\" ),\n useEcal = cms.bool( False ),\n dREcal = cms.double( 0.5 ),\n dREcalPreselection = cms.double( 0.5 ),\n HORecHitCollectionLabel = cms.InputTag( \"hltHoreco\" ),\n dRMuon = cms.double( 9999.0 ),\n propagateAllDirections = cms.bool( True ),\n muonMaxDistanceX = cms.double( 5.0 ),\n muonMaxDistanceY = cms.double( 5.0 ),\n useHO = cms.bool( False ),\n trajectoryUncertaintyTolerance = cms.double( -1.0 ),\n usePreshower = cms.bool( False ),\n DTRecSegment4DCollectionLabel = cms.InputTag( \"hltDt4DSegments\" ),\n EERecHitCollectionLabel = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEE' ),\n dRHcalPreselection = cms.double( 0.5 ),\n useMuon = cms.bool( False ),\n useCalo = cms.bool( True ),\n accountForTrajectoryChangeCalo = cms.bool( False ),\n EBRecHitCollectionLabel = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEB' ),\n dRMuonPreselection = cms.double( 0.2 ),\n truthMatch = cms.bool( False ),\n HBHERecHitCollectionLabel = cms.InputTag( \"hltHbhereco\" ),\n useHcal = cms.bool( False )\n ),\n ServiceParameters = cms.PSet( \n Propagators = cms.untracked.vstring( 'hltESPFastSteppingHelixPropagatorAny' ),\n RPCLayers = cms.bool( False ),\n UseMuonNavigation = cms.untracked.bool( False )\n ),\n ComponentName = cms.string( \"JetExtractor\" ),\n DR_Max = cms.double( 1.0 ),\n PropagatorName = cms.string( \"hltESPFastSteppingHelixPropagatorAny\" ),\n JetCollectionLabel = cms.InputTag( \"hltAK4CaloJetsPFEt5\" ),\n DR_Veto = cms.double( 0.1 ),\n Threshold = cms.double( 5.0 )\n ),\n fillGlobalTrackQuality = cms.bool( False ),\n minPCaloMuon = cms.double( 1.0E9 ),\n maxAbsDy = cms.double( 9999.0 ),\n fillCaloCompatibility = cms.bool( True ),\n fillMatching = cms.bool( True ),\n MuonCaloCompatibility = cms.PSet( \n allSiPMHO = cms.bool( False ),\n PionTemplateFileName = cms.FileInPath( \"RecoMuon/MuonIdentification/data/MuID_templates_pions_lowPt_3_1_norm.root\" ),\n MuonTemplateFileName = cms.FileInPath( \"RecoMuon/MuonIdentification/data/MuID_templates_muons_lowPt_3_1_norm.root\" ),\n delta_eta = cms.double( 0.02 ),\n delta_phi = cms.double( 0.02 )\n ),\n fillTrackerKink = cms.bool( False ),\n hcalDepositName = cms.string( \"hcal\" ),\n sigmaThresholdToFillCandidateP4WithGlobalFit = cms.double( 2.0 ),\n inputCollectionLabels = cms.VInputTag( 'hltPFMuonMerging','hltMuonLinks','hltL2Muons' ),\n trackDepositName = cms.string( \"tracker\" ),\n maxAbsDx = cms.double( 3.0 ),\n ptThresholdToFillCandidateP4WithGlobalFit = cms.double( 200.0 ),\n minNumberOfMatches = cms.int32( 1 )\n)\nhltEcalPreshowerDigis = cms.EDProducer( \"ESRawToDigi\",\n sourceTag = cms.InputTag( \"rawDataCollector\" ),\n debugMode = cms.untracked.bool( False ),\n InstanceES = cms.string( \"\" ),\n ESdigiCollection = cms.string( \"\" ),\n LookupTable = cms.FileInPath( \"EventFilter/ESDigiToRaw/data/ES_lookup_table.dat\" )\n)\nhltEcalPreshowerRecHit = cms.EDProducer( \"ESRecHitProducer\",\n ESRecoAlgo = cms.int32( 0 ),\n ESrechitCollection = cms.string( \"EcalRecHitsES\" ),\n algo = cms.string( \"ESRecHitWorker\" ),\n ESdigiCollection = cms.InputTag( \"hltEcalPreshowerDigis\" )\n)\nhltParticleFlowRecHitECALUnseeded = cms.EDProducer( \"PFRecHitProducer\",\n producers = cms.VPSet( \n cms.PSet( src = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEB' ),\n qualityTests = cms.VPSet( \n cms.PSet( threshold = cms.double( 0.08 ),\n name = cms.string( \"PFRecHitQTestThreshold\" )\n ),\n cms.PSet( timingCleaning = cms.bool( True ),\n topologicalCleaning = cms.bool( True ),\n cleaningThreshold = cms.double( 2.0 ),\n skipTTRecoveredHits = cms.bool( True ),\n name = cms.string( \"PFRecHitQTestECAL\" )\n )\n ),\n name = cms.string( \"PFEBRecHitCreator\" )\n ),\n cms.PSet( src = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEE' ),\n qualityTests = cms.VPSet( \n cms.PSet( threshold = cms.double( 0.3 ),\n name = cms.string( \"PFRecHitQTestThreshold\" )\n ),\n cms.PSet( timingCleaning = cms.bool( True ),\n topologicalCleaning = cms.bool( True ),\n cleaningThreshold = cms.double( 2.0 ),\n skipTTRecoveredHits = cms.bool( True ),\n name = cms.string( \"PFRecHitQTestECAL\" )\n )\n ),\n name = cms.string( \"PFEERecHitCreator\" )\n )\n ),\n navigator = cms.PSet( \n barrel = cms.PSet( ),\n endcap = cms.PSet( ),\n name = cms.string( \"PFRecHitECALNavigator\" )\n )\n)\nhltParticleFlowRecHitHCAL = cms.EDProducer( \"PFCTRecHitProducer\",\n ECAL_Compensate = cms.bool( False ),\n ECAL_Dead_Code = cms.uint32( 10 ),\n MinLongTiming_Cut = cms.double( -5.0 ),\n ECAL_Compensation = cms.double( 0.5 ),\n MaxLongTiming_Cut = cms.double( 5.0 ),\n weight_HFhad = cms.double( 1.0 ),\n ApplyPulseDPG = cms.bool( False ),\n navigator = cms.PSet( name = cms.string( \"PFRecHitCaloTowerNavigator\" ) ),\n ECAL_Threshold = cms.double( 10.0 ),\n ApplyTimeDPG = cms.bool( False ),\n caloTowers = cms.InputTag( \"hltTowerMakerForPF\" ),\n hcalRecHitsHBHE = cms.InputTag( \"hltHbhereco\" ),\n LongFibre_Fraction = cms.double( 0.1 ),\n MaxShortTiming_Cut = cms.double( 5.0 ),\n HcalMaxAllowedHFLongShortSev = cms.int32( 9 ),\n thresh_Barrel = cms.double( 0.4 ),\n navigation_HF = cms.bool( True ),\n HcalMaxAllowedHFInTimeWindowSev = cms.int32( 9 ),\n HF_Calib_29 = cms.double( 1.07 ),\n LongFibre_Cut = cms.double( 120.0 ),\n EM_Depth = cms.double( 22.0 ),\n weight_HFem = cms.double( 1.0 ),\n LongShortFibre_Cut = cms.double( 1.0E9 ),\n MinShortTiming_Cut = cms.double( -5.0 ),\n HCAL_Calib = cms.bool( True ),\n thresh_HF = cms.double( 0.4 ),\n HcalMaxAllowedHFDigiTimeSev = cms.int32( 9 ),\n thresh_Endcap = cms.double( 0.4 ),\n HcalMaxAllowedChannelStatusSev = cms.int32( 9 ),\n hcalRecHitsHF = cms.InputTag( \"hltHfreco\" ),\n ShortFibre_Cut = cms.double( 60.0 ),\n ApplyLongShortDPG = cms.bool( True ),\n HF_Calib = cms.bool( True ),\n HAD_Depth = cms.double( 47.0 ),\n ShortFibre_Fraction = cms.double( 0.01 ),\n HCAL_Calib_29 = cms.double( 1.35 )\n)\nhltParticleFlowRecHitPSUnseeded = cms.EDProducer( \"PFRecHitProducer\",\n producers = cms.VPSet( \n cms.PSet( src = cms.InputTag( 'hltEcalPreshowerRecHit','EcalRecHitsES' ),\n qualityTests = cms.VPSet( \n cms.PSet( threshold = cms.double( 7.0E-6 ),\n name = cms.string( \"PFRecHitQTestThreshold\" )\n )\n ),\n name = cms.string( \"PFPSRecHitCreator\" )\n )\n ),\n navigator = cms.PSet( name = cms.string( \"PFRecHitPreshowerNavigator\" ) )\n)\nhltParticleFlowClusterECALUncorrectedUnseeded = cms.EDProducer( \"PFClusterProducer\",\n pfClusterBuilder = cms.PSet( \n positionCalc = cms.PSet( \n minFractionInCalc = cms.double( 1.0E-9 ),\n logWeightDenominator = cms.double( 0.08 ),\n minAllowedNormalization = cms.double( 1.0E-9 ),\n posCalcNCrystals = cms.int32( 9 ),\n algoName = cms.string( \"Basic2DGenericPFlowPositionCalc\" )\n ),\n minFracTot = cms.double( 1.0E-20 ),\n positionCalcForConvergence = cms.PSet( \n minFractionInCalc = cms.double( 0.0 ),\n W0 = cms.double( 4.2 ),\n minAllowedNormalization = cms.double( 0.0 ),\n T0_EB = cms.double( 7.4 ),\n X0 = cms.double( 0.89 ),\n T0_ES = cms.double( 1.2 ),\n T0_EE = cms.double( 3.1 ),\n algoName = cms.string( \"ECAL2DPositionCalcWithDepthCorr\" )\n ),\n maxIterations = cms.uint32( 50 ),\n stoppingTolerance = cms.double( 1.0E-8 ),\n minFractionToKeep = cms.double( 1.0E-7 ),\n excludeOtherSeeds = cms.bool( True ),\n showerSigma = cms.double( 1.5 ),\n recHitEnergyNorms = cms.VPSet( \n cms.PSet( detector = cms.string( \"ECAL_BARREL\" ),\n recHitEnergyNorm = cms.double( 0.08 )\n ),\n cms.PSet( detector = cms.string( \"ECAL_ENDCAP\" ),\n recHitEnergyNorm = cms.double( 0.3 )\n )\n ),\n algoName = cms.string( \"Basic2DGenericPFlowClusterizer\" ),\n allCellsPositionCalc = cms.PSet( \n minFractionInCalc = cms.double( 1.0E-9 ),\n logWeightDenominator = cms.double( 0.08 ),\n minAllowedNormalization = cms.double( 1.0E-9 ),\n posCalcNCrystals = cms.int32( -1 ),\n algoName = cms.string( \"Basic2DGenericPFlowPositionCalc\" )\n )\n ),\n positionReCalc = cms.PSet( \n minFractionInCalc = cms.double( 0.0 ),\n W0 = cms.double( 4.2 ),\n minAllowedNormalization = cms.double( 0.0 ),\n T0_EB = cms.double( 7.4 ),\n X0 = cms.double( 0.89 ),\n T0_ES = cms.double( 1.2 ),\n T0_EE = cms.double( 3.1 ),\n algoName = cms.string( \"ECAL2DPositionCalcWithDepthCorr\" )\n ),\n initialClusteringStep = cms.PSet( \n thresholdsByDetector = cms.VPSet( \n cms.PSet( gatheringThreshold = cms.double( 0.08 ),\n detector = cms.string( \"ECAL_BARREL\" ),\n gatheringThresholdPt = cms.double( 0.0 )\n ),\n cms.PSet( gatheringThreshold = cms.double( 0.3 ),\n detector = cms.string( \"ECAL_ENDCAP\" ),\n gatheringThresholdPt = cms.double( 0.0 )\n )\n ),\n useCornerCells = cms.bool( True ),\n algoName = cms.string( \"Basic2DGenericTopoClusterizer\" )\n ),\n energyCorrector = cms.PSet( ),\n recHitCleaners = cms.VPSet( \n cms.PSet( cleaningByDetector = cms.VPSet( \n cms.PSet( doubleSpikeS6S2 = cms.double( 0.04 ),\n fractionThresholdModifier = cms.double( 3.0 ),\n doubleSpikeThresh = cms.double( 10.0 ),\n minS4S1_b = cms.double( -0.024 ),\n singleSpikeThresh = cms.double( 4.0 ),\n detector = cms.string( \"ECAL_BARREL\" ),\n minS4S1_a = cms.double( 0.04 ),\n energyThresholdModifier = cms.double( 2.0 )\n ),\n cms.PSet( doubleSpikeS6S2 = cms.double( -1.0 ),\n fractionThresholdModifier = cms.double( 3.0 ),\n doubleSpikeThresh = cms.double( 1.0E9 ),\n minS4S1_b = cms.double( -0.0125 ),\n singleSpikeThresh = cms.double( 15.0 ),\n detector = cms.string( \"ECAL_ENDCAP\" ),\n minS4S1_a = cms.double( 0.02 ),\n energyThresholdModifier = cms.double( 2.0 )\n )\n),\n algoName = cms.string( \"SpikeAndDoubleSpikeCleaner\" )\n )\n ),\n seedFinder = cms.PSet( \n nNeighbours = cms.int32( 8 ),\n thresholdsByDetector = cms.VPSet( \n cms.PSet( seedingThreshold = cms.double( 0.6 ),\n seedingThresholdPt = cms.double( 0.15 ),\n detector = cms.string( \"ECAL_ENDCAP\" )\n ),\n cms.PSet( seedingThreshold = cms.double( 0.23 ),\n seedingThresholdPt = cms.double( 0.0 ),\n detector = cms.string( \"ECAL_BARREL\" )\n )\n ),\n algoName = cms.string( \"LocalMaximumSeedFinder\" )\n ),\n recHitsSource = cms.InputTag( \"hltParticleFlowRecHitECALUnseeded\" )\n)\nhltParticleFlowClusterPSUnseeded = cms.EDProducer( \"PFClusterProducer\",\n pfClusterBuilder = cms.PSet( \n minFracTot = cms.double( 1.0E-20 ),\n positionCalc = cms.PSet( \n minFractionInCalc = cms.double( 1.0E-9 ),\n logWeightDenominator = cms.double( 6.0E-5 ),\n minAllowedNormalization = cms.double( 1.0E-9 ),\n posCalcNCrystals = cms.int32( -1 ),\n algoName = cms.string( \"Basic2DGenericPFlowPositionCalc\" )\n ),\n maxIterations = cms.uint32( 50 ),\n stoppingTolerance = cms.double( 1.0E-8 ),\n minFractionToKeep = cms.double( 1.0E-7 ),\n excludeOtherSeeds = cms.bool( True ),\n showerSigma = cms.double( 0.3 ),\n recHitEnergyNorms = cms.VPSet( \n cms.PSet( detector = cms.string( \"PS1\" ),\n recHitEnergyNorm = cms.double( 6.0E-5 )\n ),\n cms.PSet( detector = cms.string( \"PS2\" ),\n recHitEnergyNorm = cms.double( 6.0E-5 )\n )\n ),\n algoName = cms.string( \"Basic2DGenericPFlowClusterizer\" )\n ),\n positionReCalc = cms.PSet( ),\n initialClusteringStep = cms.PSet( \n thresholdsByDetector = cms.VPSet( \n cms.PSet( gatheringThreshold = cms.double( 6.0E-5 ),\n detector = cms.string( \"PS1\" ),\n gatheringThresholdPt = cms.double( 0.0 )\n ),\n cms.PSet( gatheringThreshold = cms.double( 6.0E-5 ),\n detector = cms.string( \"PS2\" ),\n gatheringThresholdPt = cms.double( 0.0 )\n )\n ),\n useCornerCells = cms.bool( False ),\n algoName = cms.string( \"Basic2DGenericTopoClusterizer\" )\n ),\n energyCorrector = cms.PSet( ),\n recHitCleaners = cms.VPSet( \n ),\n seedFinder = cms.PSet( \n nNeighbours = cms.int32( 4 ),\n thresholdsByDetector = cms.VPSet( \n cms.PSet( seedingThreshold = cms.double( 1.2E-4 ),\n seedingThresholdPt = cms.double( 0.0 ),\n detector = cms.string( \"PS1\" )\n ),\n cms.PSet( seedingThreshold = cms.double( 1.2E-4 ),\n seedingThresholdPt = cms.double( 0.0 ),\n detector = cms.string( \"PS2\" )\n )\n ),\n algoName = cms.string( \"LocalMaximumSeedFinder\" )\n ),\n recHitsSource = cms.InputTag( \"hltParticleFlowRecHitPSUnseeded\" )\n)\nhltParticleFlowClusterECALUnseeded = cms.EDProducer( \"CorrectedECALPFClusterProducer\",\n inputPS = cms.InputTag( \"hltParticleFlowClusterPSUnseeded\" ),\n minimumPSEnergy = cms.double( 0.0 ),\n energyCorrector = cms.PSet( \n applyCrackCorrections = cms.bool( False ),\n algoName = cms.string( \"PFClusterEMEnergyCorrector\" )\n ),\n inputECAL = cms.InputTag( \"hltParticleFlowClusterECALUncorrectedUnseeded\" )\n)\nhltParticleFlowClusterHCAL = cms.EDProducer( \"PFClusterProducer\",\n pfClusterBuilder = cms.PSet( \n positionCalc = cms.PSet( \n minFractionInCalc = cms.double( 1.0E-9 ),\n logWeightDenominator = cms.double( 0.8 ),\n minAllowedNormalization = cms.double( 1.0E-9 ),\n posCalcNCrystals = cms.int32( 5 ),\n algoName = cms.string( \"Basic2DGenericPFlowPositionCalc\" )\n ),\n minFracTot = cms.double( 1.0E-20 ),\n maxIterations = cms.uint32( 50 ),\n stoppingTolerance = cms.double( 1.0E-8 ),\n minFractionToKeep = cms.double( 1.0E-7 ),\n excludeOtherSeeds = cms.bool( True ),\n showerSigma = cms.double( 10.0 ),\n recHitEnergyNorms = cms.VPSet( \n cms.PSet( detector = cms.string( \"HCAL_BARREL1\" ),\n recHitEnergyNorm = cms.double( 0.8 )\n ),\n cms.PSet( detector = cms.string( \"HCAL_ENDCAP\" ),\n recHitEnergyNorm = cms.double( 0.8 )\n )\n ),\n algoName = cms.string( \"Basic2DGenericPFlowClusterizer\" ),\n allCellsPositionCalc = cms.PSet( \n minFractionInCalc = cms.double( 1.0E-9 ),\n logWeightDenominator = cms.double( 0.8 ),\n minAllowedNormalization = cms.double( 1.0E-9 ),\n posCalcNCrystals = cms.int32( -1 ),\n algoName = cms.string( \"Basic2DGenericPFlowPositionCalc\" )\n )\n ),\n positionReCalc = cms.PSet( ),\n initialClusteringStep = cms.PSet( \n thresholdsByDetector = cms.VPSet( \n cms.PSet( gatheringThreshold = cms.double( 0.8 ),\n detector = cms.string( \"HCAL_BARREL1\" ),\n gatheringThresholdPt = cms.double( 0.0 )\n ),\n cms.PSet( gatheringThreshold = cms.double( 0.8 ),\n detector = cms.string( \"HCAL_ENDCAP\" ),\n gatheringThresholdPt = cms.double( 0.0 )\n )\n ),\n useCornerCells = cms.bool( True ),\n algoName = cms.string( \"Basic2DGenericTopoClusterizer\" )\n ),\n energyCorrector = cms.PSet( ),\n recHitCleaners = cms.VPSet( \n cms.PSet( algoName = cms.string( \"RBXAndHPDCleaner\" ) )\n ),\n seedFinder = cms.PSet( \n nNeighbours = cms.int32( 4 ),\n thresholdsByDetector = cms.VPSet( \n cms.PSet( seedingThreshold = cms.double( 0.8 ),\n seedingThresholdPt = cms.double( 0.0 ),\n detector = cms.string( \"HCAL_BARREL1\" )\n ),\n cms.PSet( seedingThreshold = cms.double( 1.1 ),\n seedingThresholdPt = cms.double( 0.0 ),\n detector = cms.string( \"HCAL_ENDCAP\" )\n )\n ),\n algoName = cms.string( \"LocalMaximumSeedFinder\" )\n ),\n recHitsSource = cms.InputTag( \"hltParticleFlowRecHitHCAL\" )\n)\nhltParticleFlowClusterHFEM = cms.EDProducer( \"PFClusterProducer\",\n pfClusterBuilder = cms.PSet( \n positionCalc = cms.PSet( \n minFractionInCalc = cms.double( 1.0E-9 ),\n logWeightDenominator = cms.double( 0.8 ),\n minAllowedNormalization = cms.double( 1.0E-9 ),\n posCalcNCrystals = cms.int32( 5 ),\n algoName = cms.string( \"Basic2DGenericPFlowPositionCalc\" )\n ),\n minFracTot = cms.double( 1.0E-20 ),\n maxIterations = cms.uint32( 50 ),\n stoppingTolerance = cms.double( 1.0E-8 ),\n minFractionToKeep = cms.double( 1.0E-7 ),\n excludeOtherSeeds = cms.bool( True ),\n showerSigma = cms.double( 10.0 ),\n recHitEnergyNorms = cms.VPSet( \n cms.PSet( detector = cms.string( \"HF_EM\" ),\n recHitEnergyNorm = cms.double( 0.8 )\n )\n ),\n algoName = cms.string( \"Basic2DGenericPFlowClusterizer\" ),\n allCellsPositionCalc = cms.PSet( \n minFractionInCalc = cms.double( 1.0E-9 ),\n logWeightDenominator = cms.double( 0.8 ),\n minAllowedNormalization = cms.double( 1.0E-9 ),\n posCalcNCrystals = cms.int32( -1 ),\n algoName = cms.string( \"Basic2DGenericPFlowPositionCalc\" )\n )\n ),\n positionReCalc = cms.PSet( ),\n initialClusteringStep = cms.PSet( \n thresholdsByDetector = cms.VPSet( \n cms.PSet( gatheringThreshold = cms.double( 0.8 ),\n detector = cms.string( \"HF_EM\" ),\n gatheringThresholdPt = cms.double( 0.0 )\n )\n ),\n useCornerCells = cms.bool( False ),\n algoName = cms.string( \"Basic2DGenericTopoClusterizer\" )\n ),\n energyCorrector = cms.PSet( ),\n recHitCleaners = cms.VPSet( \n cms.PSet( cleaningByDetector = cms.VPSet( \n cms.PSet( doubleSpikeS6S2 = cms.double( -1.0 ),\n fractionThresholdModifier = cms.double( 1.0 ),\n doubleSpikeThresh = cms.double( 1.0E9 ),\n minS4S1_b = cms.double( -0.19 ),\n singleSpikeThresh = cms.double( 80.0 ),\n detector = cms.string( \"HF_EM\" ),\n minS4S1_a = cms.double( 0.11 ),\n energyThresholdModifier = cms.double( 1.0 )\n )\n),\n algoName = cms.string( \"SpikeAndDoubleSpikeCleaner\" )\n )\n ),\n seedFinder = cms.PSet( \n nNeighbours = cms.int32( 0 ),\n thresholdsByDetector = cms.VPSet( \n cms.PSet( seedingThreshold = cms.double( 1.4 ),\n seedingThresholdPt = cms.double( 0.0 ),\n detector = cms.string( \"HF_EM\" )\n )\n ),\n algoName = cms.string( \"LocalMaximumSeedFinder\" )\n ),\n recHitsSource = cms.InputTag( 'hltParticleFlowRecHitHCAL','HFEM' )\n)\nhltParticleFlowClusterHFHAD = cms.EDProducer( \"PFClusterProducer\",\n pfClusterBuilder = cms.PSet( \n positionCalc = cms.PSet( \n minFractionInCalc = cms.double( 1.0E-9 ),\n logWeightDenominator = cms.double( 0.8 ),\n minAllowedNormalization = cms.double( 1.0E-9 ),\n posCalcNCrystals = cms.int32( 5 ),\n algoName = cms.string( \"Basic2DGenericPFlowPositionCalc\" )\n ),\n minFracTot = cms.double( 1.0E-20 ),\n maxIterations = cms.uint32( 50 ),\n stoppingTolerance = cms.double( 1.0E-8 ),\n minFractionToKeep = cms.double( 1.0E-7 ),\n excludeOtherSeeds = cms.bool( True ),\n showerSigma = cms.double( 10.0 ),\n recHitEnergyNorms = cms.VPSet( \n cms.PSet( detector = cms.string( \"HF_HAD\" ),\n recHitEnergyNorm = cms.double( 0.8 )\n )\n ),\n algoName = cms.string( \"Basic2DGenericPFlowClusterizer\" ),\n allCellsPositionCalc = cms.PSet( \n minFractionInCalc = cms.double( 1.0E-9 ),\n logWeightDenominator = cms.double( 0.8 ),\n minAllowedNormalization = cms.double( 1.0E-9 ),\n posCalcNCrystals = cms.int32( -1 ),\n algoName = cms.string( \"Basic2DGenericPFlowPositionCalc\" )\n )\n ),\n positionReCalc = cms.PSet( ),\n initialClusteringStep = cms.PSet( \n thresholdsByDetector = cms.VPSet( \n cms.PSet( gatheringThreshold = cms.double( 0.8 ),\n detector = cms.string( \"HF_HAD\" ),\n gatheringThresholdPt = cms.double( 0.0 )\n )\n ),\n useCornerCells = cms.bool( False ),\n algoName = cms.string( \"Basic2DGenericTopoClusterizer\" )\n ),\n energyCorrector = cms.PSet( ),\n recHitCleaners = cms.VPSet( \n cms.PSet( cleaningByDetector = cms.VPSet( \n cms.PSet( doubleSpikeS6S2 = cms.double( -1.0 ),\n fractionThresholdModifier = cms.double( 1.0 ),\n doubleSpikeThresh = cms.double( 1.0E9 ),\n minS4S1_b = cms.double( -0.08 ),\n singleSpikeThresh = cms.double( 120.0 ),\n detector = cms.string( \"HF_HAD\" ),\n minS4S1_a = cms.double( 0.045 ),\n energyThresholdModifier = cms.double( 1.0 )\n )\n),\n algoName = cms.string( \"SpikeAndDoubleSpikeCleaner\" )\n )\n ),\n seedFinder = cms.PSet( \n nNeighbours = cms.int32( 0 ),\n thresholdsByDetector = cms.VPSet( \n cms.PSet( seedingThreshold = cms.double( 1.4 ),\n seedingThresholdPt = cms.double( 0.0 ),\n detector = cms.string( \"HF_HAD\" )\n )\n ),\n algoName = cms.string( \"LocalMaximumSeedFinder\" )\n ),\n recHitsSource = cms.InputTag( 'hltParticleFlowRecHitHCAL','HFHAD' )\n)\nhltLightPFTracks = cms.EDProducer( \"LightPFTrackProducer\",\n TrackQuality = cms.string( \"none\" ),\n UseQuality = cms.bool( False ),\n TkColList = cms.VInputTag( 'hltPFMuonMerging' )\n)\nhltParticleFlowBlock = cms.EDProducer( \"PFBlockProducer\",\n debug = cms.untracked.bool( False ),\n linkDefinitions = cms.VPSet( \n cms.PSet( useKDTree = cms.bool( True ),\n linkType = cms.string( \"PS1:ECAL\" ),\n linkerName = cms.string( \"PreshowerAndECALLinker\" )\n ),\n cms.PSet( useKDTree = cms.bool( True ),\n linkType = cms.string( \"PS2:ECAL\" ),\n linkerName = cms.string( \"PreshowerAndECALLinker\" )\n ),\n cms.PSet( useKDTree = cms.bool( True ),\n linkType = cms.string( \"TRACK:ECAL\" ),\n linkerName = cms.string( \"TrackAndECALLinker\" )\n ),\n cms.PSet( useKDTree = cms.bool( True ),\n linkType = cms.string( \"TRACK:HCAL\" ),\n linkerName = cms.string( \"TrackAndHCALLinker\" )\n ),\n cms.PSet( useKDTree = cms.bool( False ),\n linkType = cms.string( \"ECAL:HCAL\" ),\n linkerName = cms.string( \"ECALAndHCALLinker\" )\n ),\n cms.PSet( useKDTree = cms.bool( False ),\n linkType = cms.string( \"HFEM:HFHAD\" ),\n linkerName = cms.string( \"HFEMAndHFHADLinker\" )\n )\n ),\n elementImporters = cms.VPSet( \n cms.PSet( importerName = cms.string( \"GeneralTracksImporter\" ),\n useIterativeTracking = cms.bool( False ),\n source = cms.InputTag( \"hltLightPFTracks\" ),\n NHitCuts_byTrackAlgo = cms.vuint32( 3, 3, 3, 3, 3 ),\n muonSrc = cms.InputTag( \"hltMuons\" ),\n DPtOverPtCuts_byTrackAlgo = cms.vdouble( 0.5, 0.5, 0.5, 0.5, 0.5 )\n ),\n cms.PSet( importerName = cms.string( \"ECALClusterImporter\" ),\n source = cms.InputTag( \"hltParticleFlowClusterECALUnseeded\" ),\n BCtoPFCMap = cms.InputTag( \"\" )\n ),\n cms.PSet( importerName = cms.string( \"GenericClusterImporter\" ),\n source = cms.InputTag( \"hltParticleFlowClusterHCAL\" )\n ),\n cms.PSet( importerName = cms.string( \"GenericClusterImporter\" ),\n source = cms.InputTag( \"hltParticleFlowClusterHFEM\" )\n ),\n cms.PSet( importerName = cms.string( \"GenericClusterImporter\" ),\n source = cms.InputTag( \"hltParticleFlowClusterHFHAD\" )\n ),\n cms.PSet( importerName = cms.string( \"GenericClusterImporter\" ),\n source = cms.InputTag( \"hltParticleFlowClusterPSUnseeded\" )\n )\n ),\n verbose = cms.untracked.bool( False )\n)\nhltParticleFlow = cms.EDProducer( \"PFProducer\",\n photon_SigmaiEtaiEta_endcap = cms.double( 0.034 ),\n minPtForPostCleaning = cms.double( 20.0 ),\n pf_nsigma_ECAL = cms.double( 0.0 ),\n GedPhotonValueMap = cms.InputTag( 'tmpGedPhotons','valMapPFEgammaCandToPhoton' ),\n sumPtTrackIsoForPhoton = cms.double( -1.0 ),\n metFactorForFakes = cms.double( 4.0 ),\n muon_HO = cms.vdouble( 0.9, 0.9 ),\n electron_missinghits = cms.uint32( 1 ),\n metSignificanceForCleaning = cms.double( 3.0 ),\n usePFPhotons = cms.bool( False ),\n dptRel_DispVtx = cms.double( 10.0 ),\n nTrackIsoForEgammaSC = cms.uint32( 2 ),\n pf_nsigma_HCAL = cms.double( 1.0 ),\n cosmicRejectionDistance = cms.double( 1.0 ),\n useEGammaFilters = cms.bool( False ),\n useEGammaElectrons = cms.bool( False ),\n nsigma_TRACK = cms.double( 1.0 ),\n useEGammaSupercluster = cms.bool( False ),\n sumPtTrackIsoForEgammaSC_barrel = cms.double( 4.0 ),\n eventFractionForCleaning = cms.double( 0.8 ),\n usePFDecays = cms.bool( False ),\n rejectTracks_Step45 = cms.bool( False ),\n eventFractionForRejection = cms.double( 0.8 ),\n photon_MinEt = cms.double( 10.0 ),\n usePFNuclearInteractions = cms.bool( False ),\n maxSignificance = cms.double( 2.5 ),\n electron_iso_mva_endcap = cms.double( -0.1075 ),\n debug = cms.untracked.bool( False ),\n pf_convID_mvaWeightFile = cms.string( \"RecoParticleFlow/PFProducer/data/MVAnalysis_BDT.weights_pfConversionAug0411.txt\" ),\n calibHF_eta_step = cms.vdouble( 0.0, 2.9, 3.0, 3.2, 4.2, 4.4, 4.6, 4.8, 5.2, 5.4 ),\n ptErrorScale = cms.double( 8.0 ),\n minSignificance = cms.double( 2.5 ),\n minMomentumForPunchThrough = cms.double( 100.0 ),\n pf_conv_mvaCut = cms.double( 0.0 ),\n useCalibrationsFromDB = cms.bool( True ),\n usePFElectrons = cms.bool( False ),\n electron_iso_combIso_endcap = cms.double( 10.0 ),\n photon_combIso = cms.double( 10.0 ),\n electron_iso_mva_barrel = cms.double( -0.1875 ),\n postHFCleaning = cms.bool( False ),\n factors_45 = cms.vdouble( 10.0, 100.0 ),\n cleanedHF = cms.VInputTag( 'hltParticleFlowRecHitHCAL:Cleaned','hltParticleFlowClusterHFHAD:Cleaned','hltParticleFlowClusterHFEM:Cleaned' ),\n coneEcalIsoForEgammaSC = cms.double( 0.3 ),\n minSignificanceReduction = cms.double( 1.4 ),\n photon_SigmaiEtaiEta_barrel = cms.double( 0.0125 ),\n calibHF_b_HADonly = cms.vdouble( 1.27541, 0.85361, 0.86333, 0.89091, 0.94348, 0.94348, 0.9437, 1.0034, 1.0444, 1.0444 ),\n minPixelHits = cms.int32( 1 ),\n maxDPtOPt = cms.double( 1.0 ),\n useHO = cms.bool( False ),\n pf_electron_output_col = cms.string( \"electrons\" ),\n electron_noniso_mvaCut = cms.double( -0.1 ),\n GedElectronValueMap = cms.InputTag( \"gedGsfElectronsTmp\" ),\n useVerticesForNeutral = cms.bool( True ),\n pf_Res_mvaWeightFile = cms.string( \"RecoParticleFlow/PFProducer/data/TMVARegression_BDTG_PFRes.root\" ),\n PFEGammaCandidates = cms.InputTag( \"particleFlowEGamma\" ),\n sumPtTrackIsoSlopeForPhoton = cms.double( -1.0 ),\n coneTrackIsoForEgammaSC = cms.double( 0.3 ),\n minDeltaMet = cms.double( 0.4 ),\n pt_Error = cms.double( 1.0 ),\n useProtectionsForJetMET = cms.bool( True ),\n metFactorForRejection = cms.double( 4.0 ),\n sumPtTrackIsoForEgammaSC_endcap = cms.double( 4.0 ),\n calibHF_use = cms.bool( False ),\n verbose = cms.untracked.bool( False ),\n usePFConversions = cms.bool( False ),\n trackQuality = cms.string( \"highPurity\" ),\n calibPFSCEle_endcap = cms.vdouble( 1.153, -16.5975, 5.668, -0.1772, 16.22, 7.326, 0.0483, -4.068, 9.406 ),\n metFactorForCleaning = cms.double( 4.0 ),\n eventFactorForCosmics = cms.double( 10.0 ),\n egammaElectrons = cms.InputTag( \"\" ),\n minEnergyForPunchThrough = cms.double( 100.0 ),\n minTrackerHits = cms.int32( 8 ),\n iCfgCandConnector = cms.PSet( \n bCalibSecondary = cms.bool( False ),\n bCalibPrimary = cms.bool( False ),\n bCorrect = cms.bool( False ),\n nuclCalibFactors = cms.vdouble( 0.8, 0.15, 0.5, 0.5, 0.05 )\n ),\n rejectTracks_Bad = cms.bool( False ),\n pf_electronID_crackCorrection = cms.bool( False ),\n pf_locC_mvaWeightFile = cms.string( \"RecoParticleFlow/PFProducer/data/TMVARegression_BDTG_PFClusterCorr.root\" ),\n calibHF_a_EMonly = cms.vdouble( 0.96945, 0.96701, 0.76309, 0.82268, 0.87583, 0.89718, 0.98674, 1.4681, 1.458, 1.458 ),\n muons = cms.InputTag( \"hltMuons\" ),\n metFactorForHighEta = cms.double( 25.0 ),\n minHFCleaningPt = cms.double( 5.0 ),\n muon_HCAL = cms.vdouble( 3.0, 3.0 ),\n pf_electron_mvaCut = cms.double( -0.1 ),\n ptFactorForHighEta = cms.double( 2.0 ),\n maxDeltaPhiPt = cms.double( 7.0 ),\n pf_electronID_mvaWeightFile = cms.string( \"RecoParticleFlow/PFProducer/data/MVAnalysis_BDT.weights_PfElectrons23Jan_IntToFloat.txt\" ),\n sumEtEcalIsoForEgammaSC_endcap = cms.double( 2.0 ),\n calibHF_b_EMHAD = cms.vdouble( 1.27541, 0.85361, 0.86333, 0.89091, 0.94348, 0.94348, 0.9437, 1.0034, 1.0444, 1.0444 ),\n pf_GlobC_mvaWeightFile = cms.string( \"RecoParticleFlow/PFProducer/data/TMVARegression_BDTG_PFGlobalCorr.root\" ),\n photon_HoE = cms.double( 0.1 ),\n sumEtEcalIsoForEgammaSC_barrel = cms.double( 1.0 ),\n calibPFSCEle_Fbrem_endcap = cms.vdouble( 0.9, 6.5, -0.0692932, 0.101776, 0.995338, -0.00236548, 0.874998, 1.653, -0.0750184, 0.147, 0.923165, 4.74665E-4, 1.10782 ),\n punchThroughFactor = cms.double( 3.0 ),\n algoType = cms.uint32( 0 ),\n electron_iso_combIso_barrel = cms.double( 10.0 ),\n postMuonCleaning = cms.bool( True ),\n calibPFSCEle_barrel = cms.vdouble( 1.004, -1.536, 22.88, -1.467, 0.3555, 0.6227, 14.65, 2051.0, 25.0, 0.9932, -0.5444, 0.0, 0.5438, 0.7109, 7.645, 0.2904, 0.0 ),\n electron_protectionsForJetMET = cms.PSet( \n maxE = cms.double( 50.0 ),\n maxTrackPOverEele = cms.double( 1.0 ),\n maxEcalEOverP_2 = cms.double( 0.2 ),\n maxHcalEOverEcalE = cms.double( 0.1 ),\n maxEcalEOverP_1 = cms.double( 0.5 ),\n maxHcalEOverP = cms.double( 1.0 ),\n maxEcalEOverPRes = cms.double( 0.2 ),\n maxHcalE = cms.double( 10.0 ),\n maxEeleOverPout = cms.double( 0.2 ),\n maxNtracks = cms.double( 3.0 ),\n maxEleHcalEOverEcalE = cms.double( 0.1 ),\n maxDPhiIN = cms.double( 0.1 ),\n maxEeleOverPoutRes = cms.double( 0.5 )\n ),\n electron_iso_pt = cms.double( 10.0 ),\n isolatedElectronID_mvaWeightFile = cms.string( \"RecoEgamma/ElectronIdentification/data/TMVA_BDTSimpleCat_17Feb2011.weights.xml\" ),\n vertexCollection = cms.InputTag( \"hltPixelVertices\" ),\n X0_Map = cms.string( \"RecoParticleFlow/PFProducer/data/allX0histos.root\" ),\n calibPFSCEle_Fbrem_barrel = cms.vdouble( 0.6, 6.0, -0.0255975, 0.0576727, 0.975442, -5.46394E-4, 1.26147, 25.0, -0.02025, 0.04537, 0.9728, -8.962E-4, 1.172 ),\n blocks = cms.InputTag( \"hltParticleFlowBlock\" ),\n punchThroughMETFactor = cms.double( 4.0 ),\n metSignificanceForRejection = cms.double( 4.0 ),\n photon_protectionsForJetMET = cms.PSet( \n sumPtTrackIsoSlope = cms.double( 0.001 ),\n sumPtTrackIso = cms.double( 2.0 )\n ),\n usePhotonReg = cms.bool( False ),\n dzPV = cms.double( 0.2 ),\n calibHF_a_EMHAD = cms.vdouble( 1.42215, 1.00496, 0.68961, 0.81656, 0.98504, 0.98504, 1.00802, 1.0593, 1.4576, 1.4576 ),\n useRegressionFromDB = cms.bool( False ),\n muon_ECAL = cms.vdouble( 0.5, 0.5 ),\n usePFSCEleCalib = cms.bool( True )\n)\nhltAK4PFJets = cms.EDProducer( \"FastjetJetProducer\",\n Active_Area_Repeats = cms.int32( 5 ),\n doAreaFastjet = cms.bool( False ),\n voronoiRfact = cms.double( -9.0 ),\n maxBadHcalCells = cms.uint32( 9999999 ),\n doAreaDiskApprox = cms.bool( True ),\n maxRecoveredEcalCells = cms.uint32( 9999999 ),\n jetType = cms.string( \"PFJet\" ),\n minSeed = cms.uint32( 0 ),\n Ghost_EtaMax = cms.double( 6.0 ),\n doRhoFastjet = cms.bool( False ),\n jetAlgorithm = cms.string( \"AntiKt\" ),\n nSigmaPU = cms.double( 1.0 ),\n GhostArea = cms.double( 0.01 ),\n Rho_EtaMax = cms.double( 4.4 ),\n maxBadEcalCells = cms.uint32( 9999999 ),\n useDeterministicSeed = cms.bool( True ),\n doPVCorrection = cms.bool( False ),\n maxRecoveredHcalCells = cms.uint32( 9999999 ),\n rParam = cms.double( 0.4 ),\n maxProblematicHcalCells = cms.uint32( 9999999 ),\n doOutputJets = cms.bool( True ),\n src = cms.InputTag( \"hltParticleFlow\" ),\n inputEtMin = cms.double( 0.0 ),\n puPtMin = cms.double( 10.0 ),\n srcPVs = cms.InputTag( \"hltPixelVertices\" ),\n jetPtMin = cms.double( 0.0 ),\n radiusPU = cms.double( 0.4 ),\n maxProblematicEcalCells = cms.uint32( 9999999 ),\n doPUOffsetCorr = cms.bool( False ),\n inputEMin = cms.double( 0.0 ),\n useMassDropTagger = cms.bool( False ),\n muMin = cms.double( -1.0 ),\n subtractorName = cms.string( \"\" ),\n muCut = cms.double( -1.0 ),\n subjetPtMin = cms.double( -1.0 ),\n useTrimming = cms.bool( False ),\n muMax = cms.double( -1.0 ),\n yMin = cms.double( -1.0 ),\n useFiltering = cms.bool( False ),\n rFilt = cms.double( -1.0 ),\n yMax = cms.double( -1.0 ),\n zcut = cms.double( -1.0 ),\n MinVtxNdof = cms.int32( 0 ),\n MaxVtxZ = cms.double( 15.0 ),\n UseOnlyVertexTracks = cms.bool( False ),\n dRMin = cms.double( -1.0 ),\n nFilt = cms.int32( -1 ),\n usePruning = cms.bool( False ),\n maxDepth = cms.int32( -1 ),\n yCut = cms.double( -1.0 ),\n DzTrVtxMax = cms.double( 0.0 ),\n UseOnlyOnePV = cms.bool( False ),\n rcut_factor = cms.double( -1.0 ),\n sumRecHits = cms.bool( False ),\n trimPtFracMin = cms.double( -1.0 ),\n dRMax = cms.double( -1.0 ),\n DxyTrVtxMax = cms.double( 0.0 ),\n useCMSBoostedTauSeedingAlgorithm = cms.bool( False )\n)\nhltFixedGridRhoFastjetAll = cms.EDProducer( \"FixedGridRhoProducerFastjet\",\n gridSpacing = cms.double( 0.55 ),\n maxRapidity = cms.double( 5.0 ),\n pfCandidatesTag = cms.InputTag( \"hltParticleFlow\" )\n)\nhltAK4PFJetsCorrected = cms.EDProducer( \"PFJetCorrectionProducer\",\n src = cms.InputTag( \"hltAK4PFJets\" ),\n correctors = cms.vstring( 'hltESPAK4PFCorrection' )\n)\nhltPFHT = cms.EDProducer( \"HLTHtMhtProducer\",\n usePt = cms.bool( True ),\n minPtJetHt = cms.double( 40.0 ),\n maxEtaJetMht = cms.double( 999.0 ),\n minNJetMht = cms.int32( 0 ),\n jetsLabel = cms.InputTag( \"hltAK4PFJetsCorrected\" ),\n maxEtaJetHt = cms.double( 3.0 ),\n minPtJetMht = cms.double( 0.0 ),\n minNJetHt = cms.int32( 0 ),\n pfCandidatesLabel = cms.InputTag( \"hltParticleFlow\" ),\n excludePFMuons = cms.bool( False )\n)\nhltPFHT800 = cms.EDFilter( \"HLTHtMhtFilter\",\n saveTags = cms.bool( True ),\n mhtLabels = cms.VInputTag( 'hltPFHT' ),\n meffSlope = cms.vdouble( 1.0 ),\n minMeff = cms.vdouble( 0.0 ),\n minMht = cms.vdouble( 0.0 ),\n htLabels = cms.VInputTag( 'hltPFHT' ),\n minHt = cms.vdouble( 800.0 )\n)\nhltBoolEnd = cms.EDFilter( \"HLTBool\",\n result = cms.bool( True )\n)\n\nHLTL1UnpackerSequence = cms.Sequence( hltGtDigis + hltCaloStage1Digis + hltCaloStage1LegacyFormatDigis + hltL1GtObjectMap + hltL1extraParticles )\nHLTBeamSpot = cms.Sequence( hltScalersRawToDigi + hltOnlineBeamSpot )\nHLTBeginSequence = cms.Sequence( hltTriggerType + HLTL1UnpackerSequence + HLTBeamSpot )\nHLTDoFullUnpackingEgammaEcalWithoutPreshowerSequence = cms.Sequence( hltEcalDigis + hltEcalUncalibRecHit + hltEcalDetIdToBeRecovered + hltEcalRecHit )\nHLTDoLocalHcalSequence = cms.Sequence( hltHcalDigis + hltHbhereco + hltHfreco + hltHoreco )\nHLTDoCaloSequence = cms.Sequence( HLTDoFullUnpackingEgammaEcalWithoutPreshowerSequence + HLTDoLocalHcalSequence + hltTowerMakerForAll )\nHLTAK4CaloJetsReconstructionSequence = cms.Sequence( HLTDoCaloSequence + hltAK4CaloJets + hltAK4CaloJetsIDPassed )\nHLTAK4CaloJetsCorrectionSequence = cms.Sequence( hltFixedGridRhoFastjetAllCalo + hltAK4CaloJetsCorrected + hltAK4CaloJetsCorrectedIDPassed )\nHLTAK4CaloJetsSequence = cms.Sequence( HLTAK4CaloJetsReconstructionSequence + HLTAK4CaloJetsCorrectionSequence )\nHLTDoCaloSequencePF = cms.Sequence( HLTDoFullUnpackingEgammaEcalWithoutPreshowerSequence + HLTDoLocalHcalSequence + hltTowerMakerForPF )\nHLTAK4CaloJetsPrePFRecoSequence = cms.Sequence( HLTDoCaloSequencePF + hltAK4CaloJetsPF )\nHLTPreAK4PFJetsRecoSequence = cms.Sequence( HLTAK4CaloJetsPrePFRecoSequence + hltAK4CaloJetsPFEt5 )\nHLTMuonLocalRecoSequence = cms.Sequence( hltMuonDTDigis + hltDt1DRecHits + hltDt4DSegments + hltMuonCSCDigis + hltCsc2DRecHits + hltCscSegments + hltMuonRPCDigis + hltRpcRecHits )\nHLTL2muonrecoNocandSequence = cms.Sequence( HLTMuonLocalRecoSequence + hltL2OfflineMuonSeeds + hltL2MuonSeeds + hltL2Muons )\nHLTL2muonrecoSequence = cms.Sequence( HLTL2muonrecoNocandSequence + hltL2MuonCandidates )\nHLTDoLocalPixelSequence = cms.Sequence( hltSiPixelDigis + hltSiPixelClusters + hltSiPixelClustersCache + hltSiPixelRecHits )\nHLTDoLocalStripSequence = cms.Sequence( hltSiStripExcludedFEDListProducer + hltSiStripRawToClustersFacility + hltSiStripClusters )\nHLTL3muonTkCandidateSequence = cms.Sequence( HLTDoLocalPixelSequence + HLTDoLocalStripSequence + hltL3TrajSeedOIState + hltL3TrackCandidateFromL2OIState + hltL3TkTracksFromL2OIState + hltL3MuonsOIState + hltL3TrajSeedOIHit + hltL3TrackCandidateFromL2OIHit + hltL3TkTracksFromL2OIHit + hltL3MuonsOIHit + hltL3TkFromL2OICombination + hltPixelLayerTriplets + hltPixelLayerPairs + hltMixedLayerPairs + hltL3TrajSeedIOHit + hltL3TrackCandidateFromL2IOHit + hltL3TkTracksFromL2IOHit + hltL3MuonsIOHit + hltL3TrajectorySeed + hltL3TrackCandidateFromL2 )\nHLTL3muonrecoNocandSequence = cms.Sequence( HLTL3muonTkCandidateSequence + hltL3TkTracksMergeStep1 + hltL3TkTracksFromL2 + hltL3MuonsLinksCombination + hltL3Muons )\nHLTL3muonrecoSequence = cms.Sequence( HLTL3muonrecoNocandSequence + hltL3MuonCandidates )\nHLTRecopixelvertexingSequence = cms.Sequence( hltPixelLayerTriplets + hltPixelTracks + hltPixelVertices + hltTrimmedPixelVertices )\nHLTIterativeTrackingIteration0 = cms.Sequence( hltIter0PFLowPixelSeedsFromPixelTracks + hltIter0PFlowCkfTrackCandidates + hltIter0PFlowCtfWithMaterialTracks + hltIter0PFlowTrackSelectionHighPurity )\nHLTIter0TrackAndTauJet4Iter1Sequence = cms.Sequence( hltTrackIter0RefsForJets4Iter1 + hltAK4Iter0TrackJets4Iter1 + hltIter0TrackAndTauJets4Iter1 )\nHLTIterativeTrackingIteration1 = cms.Sequence( hltIter1ClustersRefRemoval + hltIter1MaskedMeasurementTrackerEvent + hltIter1PixelLayerTriplets + hltIter1PFlowPixelSeeds + hltIter1PFlowCkfTrackCandidates + hltIter1PFlowCtfWithMaterialTracks + hltIter1PFlowTrackSelectionHighPurityLoose + hltIter1PFlowTrackSelectionHighPurityTight + hltIter1PFlowTrackSelectionHighPurity )\nHLTIter1TrackAndTauJets4Iter2Sequence = cms.Sequence( hltIter1TrackRefsForJets4Iter2 + hltAK4Iter1TrackJets4Iter2 + hltIter1TrackAndTauJets4Iter2 )\nHLTIterativeTrackingIteration2 = cms.Sequence( hltIter2ClustersRefRemoval + hltIter2MaskedMeasurementTrackerEvent + hltIter2PixelLayerPairs + hltIter2PFlowPixelSeeds + hltIter2PFlowCkfTrackCandidates + hltIter2PFlowCtfWithMaterialTracks + hltIter2PFlowTrackSelectionHighPurity )\nHLTIterativeTrackingIter02 = cms.Sequence( HLTIterativeTrackingIteration0 + HLTIter0TrackAndTauJet4Iter1Sequence + HLTIterativeTrackingIteration1 + hltIter1Merged + HLTIter1TrackAndTauJets4Iter2Sequence + HLTIterativeTrackingIteration2 + hltIter2Merged )\nHLTTrackReconstructionForPF = cms.Sequence( HLTDoLocalPixelSequence + HLTRecopixelvertexingSequence + HLTDoLocalStripSequence + HLTIterativeTrackingIter02 + hltPFMuonMerging + hltMuonLinks + hltMuons )\nHLTPreshowerSequence = cms.Sequence( hltEcalPreshowerDigis + hltEcalPreshowerRecHit )\nHLTParticleFlowSequence = cms.Sequence( HLTPreshowerSequence + hltParticleFlowRecHitECALUnseeded + hltParticleFlowRecHitHCAL + hltParticleFlowRecHitPSUnseeded + hltParticleFlowClusterECALUncorrectedUnseeded + hltParticleFlowClusterPSUnseeded + hltParticleFlowClusterECALUnseeded + hltParticleFlowClusterHCAL + hltParticleFlowClusterHFEM + hltParticleFlowClusterHFHAD + hltLightPFTracks + hltParticleFlowBlock + hltParticleFlow )\nHLTAK4PFJetsReconstructionSequence = cms.Sequence( HLTL2muonrecoSequence + HLTL3muonrecoSequence + HLTTrackReconstructionForPF + HLTParticleFlowSequence + hltAK4PFJets )\nHLTAK4PFJetsCorrectionSequence = cms.Sequence( hltFixedGridRhoFastjetAll + hltAK4PFJetsCorrected )\nHLTAK4PFJetsSequence = cms.Sequence( HLTPreAK4PFJetsRecoSequence + HLTAK4PFJetsReconstructionSequence + HLTAK4PFJetsCorrectionSequence )\nHLTEndSequence = cms.Sequence( hltBoolEnd )\n\nHLT_PFHT800_v1 = cms.Path( HLTBeginSequence + hltL1sL1HTT150ORHTT175 + hltPrePFHT800 + HLTAK4CaloJetsSequence + hltHtMht + hltHt700 + HLTAK4PFJetsSequence + hltPFHT + hltPFHT800 + HLTEndSequence )\n\n\nHLTSchedule = cms.Schedule( *(HLT_PFHT800_v1 ))\n\n\n# CMSSW version specific customizations\nimport os\ncmsswVersion = os.environ['CMSSW_VERSION']\n\n# none for now\n\n# dummyfy hltGetConditions in cff's\nif 'hltGetConditions' in locals() and 'HLTriggerFirstPath' in locals() :\n hltDummyConditions = cms.EDFilter( \"HLTBool\",\n result = cms.bool( True )\n )\n HLTriggerFirstPath.replace(hltGetConditions,hltDummyConditions)\n\n","sub_path":"hlt_PFHT800.py","file_name":"hlt_PFHT800.py","file_ext":"py","file_size_in_byte":189294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"302185402","text":"import numpy as np\r\nimport opendssdirect as dss\r\nimport random\r\n\r\n\r\ndef lossless_distribution_power_flow(dflow, inverter_list, total_iteration=300, tol=1e-3):\r\n # voltage=dflow.slack_voltage**2 + dflow.R.dot(np.transpose(dflow.pc-pg)) + dflow.X.dot(np.transpose(dflow.qc-qg))\r\n # voltage=np.sqrt(np.insert(voltage, 0, dflow.slack_voltage**2))\r\n v = np.zeros(shape=(total_iteration, dflow.nb))\r\n pg = np.zeros(dflow.pc.shape)\r\n qg = np.zeros(dflow.qc.shape)\r\n\r\n for iteration in range(0, total_iteration):\r\n if iteration == 0:\r\n voltage = dflow.slack_voltage ** 2 + dflow.R.dot(np.transpose(dflow.pc)) + dflow.X.dot(\r\n np.transpose(dflow.qc))\r\n voltage = np.sqrt(np.insert(voltage, 0, dflow.slack_voltage ** 2))\r\n else:\r\n for i in range(len(inverter_list)):\r\n n = inverter_list[i].node\r\n pg[n] = inverter_list[i].p_curve(voltage[n])\r\n qg[n] = inverter_list[i].q_curve(voltage[n])\r\n voltage = dflow.slack_voltage ** 2 + dflow.R.dot(np.transpose(dflow.pc - pg)) + dflow.X.dot(\r\n np.transpose(dflow.qc - qg))\r\n voltage = np.sqrt(np.insert(voltage, 0, dflow.slack_voltage ** 2))\r\n v[iteration, :] = voltage\r\n # print('Iteration:'+ str(itr))\r\n # print('Voltage:'+str(voltage))\r\n if iteration > 0:\r\n diff_voltage = abs(v[iteration, :] - v[iteration - 1, :])\r\n if max(diff_voltage) < tol:\r\n for inverter in inverter_list:\r\n inverter.update_voltage(voltage[inverter.node])\r\n # inverterlist[i].updateRegister()\r\n break\r\n return voltage\r\n\r\n\r\ndef opendss_power_flow(filename='Inverter_PMU.dss'):\r\n\r\n dss.run_command('Compile ' + filename)\r\n # dss.Text.Command('Set Loadmult= {}'.format(random.uniform(0.85,1.15)))\r\n # dss.Text.Command('BatchEdit PVSystem..* pctPmpp={}'.format(random.uniform(80,95)))\r\n dss.Solution.Solve()\r\n\r\n return dss\r\n","sub_path":"fabpmu/javascript/power_flow_solution.py","file_name":"power_flow_solution.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"80135949","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\nimport os\nimport sys\nimport codecs\nfrom time import time\nfrom optparse import OptionParser\nfrom TorchNN.utils import read_pkl, SentenceDataUtil\nfrom TorchNN.utils import is_interactive\n\nimport torch\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\n\n\nop = OptionParser()\nop.add_option('--ri', '--root_idx', dest='root_idx', default='./data/test_idx', type='str', help='数据索引根目录')\nop.add_option('--rv', '--root_voc', dest='root_voc', default='./res/voc', type='str', help='字典根目录')\nop.add_option('--pm', '--path_model', dest='path_model', default='./model/sl.model',\n type='str', help='模型路径')\nop.add_option('--ml', dest='max_len', default=None, type='int', help='实例最大长度')\nop.add_option('--bs', '--batch_size', dest='batch_size', default=64, type='int', help='batch size')\nop.add_option('-g', '--cuda', dest='cuda', action='store_true', default=False, help='是否使用GPU加速')\nop.add_option('--nw', dest='nb_work', default=8, type='int', help='加载数据的线程数')\nop.add_option('-o', '--output', dest='output', default='./result.txt',\n type='str', help='预测结果存放路径')\nargv = [] if is_interactive() else sys.argv[1:]\n(opts, args) = op.parse_args(argv)\n\n\n# 加载模型\npath_model = opts.path_model\nsl_model = torch.load(path_model)\nsl_model.set_use_cuda(opts.cuda)\nif opts.max_len:\n sl_model.max_len = opts.max_len\n\n# 初始化数据参数\nroot_idx = opts.root_idx\npath_num = os.path.join(root_idx, 'nums.txt')\nroot_voc = opts.root_voc\nfeature2id_dict = dict()\nfor feature_i in sl_model.features:\n path_f2id = os.path.join(root_voc, 'feature_{0}_2id.pkl'.format(feature_i))\n feature2id_dict[feature_i] = read_pkl(path_f2id)\nlabel2id_dict = read_pkl(os.path.join(root_voc, 'label2id.pkl'))\nhas_label = False\nbatch_size = opts.batch_size\nuse_cuda = opts.cuda\nnum_worker = opts.nb_work\npath_result = opts.output\n\nt0 = time()\n\n# 初始化数据\ndataset = SentenceDataUtil(\n path_num, root_idx, sl_model.max_len, sl_model.features, feature2id_dict, shuffle=False)\ndataset_test = dataset.get_all_data()\ndata_loader_test = DataLoader(\n dataset_test, batch_size=batch_size, shuffle=False, num_workers=num_worker)\n\n# 测试\nlabel2id_dict_rev = dict()\nfor k, v in label2id_dict.items():\n label2id_dict_rev[v] = k\nlabel2id_dict_rev[0] = 'O'\nfile_result = codecs.open(path_result, 'w', encoding='utf-8')\ncurrent_count, total_count = 0, len(dataset_test)\nfor i_batch, sample_batched in enumerate(data_loader_test):\n current_count += sample_batched[sl_model.features[0]].size()[0]\n sys.stdout.write('{0} / {1}\\r'.format(current_count ,total_count))\n for feature_name in sample_batched:\n if use_cuda:\n sample_batched[feature_name] = Variable(sample_batched[feature_name]).cuda()\n else:\n sample_batched[feature_name] = Variable(sample_batched[feature_name])\n targets_list = sl_model.predict(sample_batched)\n for targets in targets_list:\n targets = list(map(lambda d: label2id_dict_rev[d], targets))\n file_result.write('{0}\\n'.format(' '.join(targets)))\nsys.stdout.write('{0} / {1}\\n'.format(current_count ,total_count))\nfile_result.close()\nprint('done in {:.1f}s!'.format(time()-t0))\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"448961970","text":"\"\"\"\nVentilator database connection\n\"\"\"\nimport queue\nfrom datetime import datetime\nfrom pymongo import MongoClient, errors\n\n\nclass DbClient():\n\n def __init__(self, db_queue, addr='mongodb://localhost:27017'):\n self.addr = addr\n self.db = None\n self.queue = db_queue\n\n def store_pressure(self, pressure_val):\n collection = self.db.pressure_values\n self.__store_value(collection, pressure_val)\n\n def store_volume(self, volume_val):\n collection = self.db.volume_values\n self.__store_value(collection, volume_val)\n\n def store_bpm(self, breaths_per_minute_val):\n collection = self.db.breathsperminute_values\n self.__store_value(collection, breaths_per_minute_val)\n\n def store_trigger(self, trigger_val):\n collection = self.db.trigger_values\n self.__store_value(collection, trigger_val)\n\n def store_flow(self, flow_val):\n collection = self.db.flow_values\n self.__store_value(collection, flow_val)\n\n def store_cpu(self, cpu_val):\n collection = self.db.cpu_values\n self.__store_value(collection, cpu_val)\n\n def __store_value(self, collection, val):\n try:\n collection.insert_one({'value': val, 'loggedAt': datetime.utcnow()})\n except errors.ConnectionFailure:\n print(\"Lost connection, client will attempt to reconnect\")\n\n def run(self, name):\n print(\"Starting {}\".format(name))\n\n # Only start MongoClient after fork()\n try:\n self.client = MongoClient(self.addr)\n except errors.ConnectionFailure:\n print(\"Unable to connect, client will attempt to reconnect\")\n\n self.db = self.client.beademing\n while True:\n try:\n msg = self.queue.get()\n except queue.Empty:\n continue\n\n\n try:\n if msg['type'] == 'BPM':\n self.store_bpm(msg['val'])\n elif msg['type'] == 'VOL':\n self.store_volume(msg['val'])\n elif msg['type'] == 'TRIG':\n self.store_trigger(msg['val'])\n elif msg['type'] == 'PRES':\n self.store_pressure(msg['val'])\n elif msg['type'] == 'FLOW':\n self.store_flow(msg['val'])\n elif msg['type'] == 'CPU':\n self.store_cpu(msg['val'])\n except:\n print(\"Invalid message from database\")\n\n","sub_path":"ventilator_database.py","file_name":"ventilator_database.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"278682945","text":"import os\nimport json\nimport boto3\n\n# Names\nBUCKET_NAME = os.environ.get('AWS_BUCKET_NAME')\n\n# S3 Connection\nS3_CLIENT = boto3.client(\n 's3',\n aws_access_key_id=os.environ.get('AWS_ACCESS_KEY'),\n aws_secret_access_key=os.environ.get('AWS_SECRET_ACCESS_KEY')\n)\nS3_RESOURCE = boto3.resource(\n 's3',\n aws_access_key_id=os.environ.get('AWS_ACCESS_KEY'),\n aws_secret_access_key=os.environ.get('AWS_SECRET_ACCESS_KEY')\n)\nBUCKET = S3_RESOURCE.Bucket(BUCKET_NAME)\n\n\ndef fetch_json(path):\n obj = S3_CLIENT.get_object(\n Bucket=BUCKET_NAME, Key=path\n )\n return json.loads(obj['Body'].read())\n\n\ndef put_object(path, data):\n BUCKET.put_object(\n ContentType='application/json',\n Key=path,\n Body=json.dumps(data, indent=2).encode('utf-8'),\n )\n\n\ndef download_file(path, target_path):\n S3_CLIENT.download_file(BUCKET_NAME, path, target_path)\n\n\ndef upload_file(source_path, target_path):\n try:\n S3_CLIENT.upload_file(source_path, BUCKET_NAME, target_path)\n return [True, '']\n\n except Exception as e:\n # This is a catch all exception, edit this part to fit your needs.\n return [False, e]\n\n\ndef delete_object(path):\n S3_RESOURCE.Object(BUCKET_NAME, path).delete()\n","sub_path":"web/util/s3_helper.py","file_name":"s3_helper.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"345022197","text":"# Created by rezafilsoof at 2019-11-14\nimport pandas as pd\n\ndef feature_engineer(df):\n\n # set ID column as index\n df = df.set_index('ID')\n\n # change revenue column to int (False ->0 and True-> 1)\n df['Revenue'] = df['Revenue'].astype(int, errors='raise')\n\n # change weekend column to numeric\n df['Weekend'] = df['Weekend'].astype(int, errors='raise')\n\n # create dummies\n df = pd.get_dummies(df, columns=['VisitorType'], drop_first=False, dummy_na=False)\n\n # change month columns to ordinal numeric values\n df[\"month_num\"] = df[\"Month\"]\n df['month_num'] = df['month_num'].replace('Jan', '1')\n df['month_num'] = df['month_num'].replace('Feb', '2')\n df['month_num'] = df['month_num'].replace('Mar', '3')\n df['month_num'] = df['month_num'].replace('Apr', '4')\n df['month_num'] = df['month_num'].replace('May', '5')\n df['month_num'] = df['month_num'].replace('June', '6')\n df['month_num'] = df['month_num'].replace('Jul', '7')\n df['month_num'] = df['month_num'].replace('Aug', '8')\n df['month_num'] = df['month_num'].replace('Sep', '9')\n df['month_num'] = df['month_num'].replace('Oct', '10')\n df['month_num'] = df['month_num'].replace('Nov', '11')\n df['month_num'] = df['month_num'].replace('Dec', '12')\n\n df['month_num'] = df['month_num'].astype(int, errors='raise')\n df = df.drop(columns=['Month'])\n\n return(df)\n\n\n #\n # from sklearn import preprocessing\n # label_encoder = preprocessing.LabelEncoder()\n #\n # X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)\n #\n # sel = SelectFromModel(RandomForestClassifier(n_estimators=100))\n # sel.fit(X, y)\n #\n # sel.get_support()\n #\n # # selected_feat = X_train.columns[(sel.get_support())]\n # selected_feat = X.columns[(sel.get_support())]\n #\n # print(len(selected_feat))\n #\n # print(selected_feat)\n #\n # # exit()\n #\n #\n #\n #\n #\n #\n # from sklearn.linear_model import SGDClassifier\n # # from sklearn.model_selection import train_test_split\n # from sklearn.metrics import f1_score\n # # target = 'Revenue'\n # x_train, x_cv, y_train, y_cv = train_test_split(X, y, test_size=0.2, stratify=y)\n #\n # def evaluate_metric(model, x_cv, y_cv):\n # return f1_score(y_cv, model.predict(x_cv), average='micro')\n #\n #\n # ##feature importance tester\n # # Input : Dataframe df with m features, number of required features n\n # # Output : Set of n features most useful for model performance\n #\n # def forward_feature_selection(x_train, x_cv, y_train, y_cv, n):\n # feature_set = []\n # for num_features in range(n):\n # metric_list = [] # Choose appropriate metric based on business problem\n # model = RandomForestClassifier() # You can choose any model you like, this technique is model agnostic\n # for feature in x_train.columns:\n # if feature not in feature_set:\n # f_set = feature_set.copy()\n # f_set.append(feature)\n # model.fit(x_train[f_set], y_train)\n # metric_list.append((evaluate_metric(model, x_cv[f_set], y_cv), feature))\n #\n # metric_list.sort(key=lambda x: x[0], reverse=True) # In case metric follows \"the more, the merrier\"\n # feature_set.append(metric_list[0][1])\n # return feature_set\n #\n # f = forward_feature_selection(x_train, x_cv, y_train, y_cv, 19)\n #\n # print(f)\n # ##EDA\n # # logit_model = sm.Logit(y_train, X_train)\n # # result = logit_model.fit()\n # # print(result.summary2())\n","sub_path":"reza/feature_engineering.py","file_name":"feature_engineering.py","file_ext":"py","file_size_in_byte":3658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"279091394","text":"from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash\nfrom flask_sqlalchemy import SQLAlchemy\nfrom .tables import Location, Result\nfrom .api import get_results, get_location\n\n\napp = Flask(__name__)\n\nconf = dict(\n DEBUG=False,\n SECRET_KEY='dev',\n SQLALCHEMY_DATABASE_URI='sqlite:///tables.db'\n)\n\napp.config.update(conf)\napp.config.from_envvar('KNELL_SETTINGS', silent=True)\n\ndb = SQLAlchemy(app)\n\ngbl_zip = ''\n\n\ndef db_add() -> None:\n \"\"\"Retrieve API results, store in DB\"\"\"\n global gbl_zip\n\n result = get_results()\n location = get_location()\n gbl_zip = location[\"zipcode\"]\n\n db.session.add(Location(**location))\n db.session.add(Result(**result))\n\n db.session.commit()\n db.session.close()\n\n\n@app.route('/')\n@app.route('/index')\ndef index() -> render_template:\n global gbl_zip\n if gbl_zip == '':\n locations, results = None, None\n else:\n locations = Location.query.filter_by(zipcode=gbl_zip).order_by(Location.id.desc()).first()\n results = Result.query.filter_by(zipcode=gbl_zip).order_by(Result.id.desc()).first()\n return render_template('index.html', locations=locations, results=results)\n\n\n@app.route('/find', methods=['POST'])\ndef find_info() -> redirect:\n \"\"\"This is what happens when users click buttons.\"\"\"\n db_add()\n return redirect(url_for('index'))\n\n","sub_path":"knell-master/knell/knell.py","file_name":"knell.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"446778581","text":"import ConfigParser\n\n\"\"\"\nTaken from https://wiki.python.org/moin/ConfigParserExamples\n\"\"\"\ndef getConfig(file, section):\n bool_list = ['raise_on_warnings', 'buffered', 'ip_changing', 'logging']\n int_list = ['interval']\n \n dict1 = {}\n \n Config = ConfigParser.ConfigParser()\n Config.read(file)\n options = Config.options(section)\n for option in options:\n if option in bool_list:\n dict1[option] = Config.getboolean(section, option)\n elif option in int_list:\n dict1[option] = Config.getint(section, option)\n else:\n dict1[option] = Config.get(section, option)\n \n return dict1\n\n","sub_path":"util/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"583378492","text":"\"\"\"\nA fairly straightforward macro/hotkey program for Adafruit MACROPAD.\nMacro key setups are stored in the /macros folder (configurable below),\nload up just the ones you're likely to use. Plug into computer's USB port,\nuse dial to select an application macro set, press MACROPAD keys to send\nkey sequences.\n\"\"\"\n\n# pylint: disable=import-error, unused-import, too-few-public-methods\n\nimport os\nimport board\nimport digitalio\nimport displayio\nimport neopixel\nimport rotaryio\nimport keypad\nimport terminalio\nimport usb_hid\nfrom adafruit_display_shapes.rect import Rect\nfrom adafruit_display_text import label\nfrom adafruit_hid.keyboard import Keyboard\nfrom adafruit_hid.keycode import Keycode\nfrom adafruit_hid.keyboard_layout_us import KeyboardLayoutUS\n\n\n# CONFIGURABLES ------------------------\n\nMACRO_FOLDER = '/macros'\n\n\n# CLASSES AND FUNCTIONS ----------------\n\nclass App:\n \"\"\" Class representing a host-side application, for which we have a set\n of macro sequences. \"\"\"\n def __init__(self, appdata):\n self.name = appdata['name']\n self.macros = appdata['macros']\n\n def switch(self):\n \"\"\" Activate application settings; update OLED labels and LED\n colors. \"\"\"\n GROUP[13].text = self.name # Application name\n for i in range(12):\n if i < len(self.macros): # Key in use, set label + LED color\n PIXELS[i] = self.macros[i][0]\n GROUP[i].text = self.macros[i][1]\n else: # Key not in use, no label or LED\n PIXELS[i] = 0\n GROUP[i].text = ''\n PIXELS.show()\n DISPLAY.refresh()\n\n\n# INITIALIZATION -----------------------\n\nDISPLAY = board.DISPLAY\nDISPLAY.auto_refresh = False\nENCODER = rotaryio.IncrementalEncoder(board.ENCODER_B, board.ENCODER_A)\nPIXELS = neopixel.NeoPixel(board.NEOPIXEL, 12, auto_write=False)\nKEYBOARD = Keyboard(usb_hid.devices)\nLAYOUT = KeyboardLayoutUS(KEYBOARD)\nKEYS = keypad.Keys((board.KEY1, board.KEY2, board.KEY3, board.KEY4, board.KEY5,\n board.KEY6, board.KEY7, board.KEY8, board.KEY9, board.KEY10,\n board.KEY11, board.KEY12, board.ENCODER_SWITCH),\n value_when_pressed=False, pull=True)\n\n# Set up displayio group with all labels\nGROUP = displayio.Group(max_size=14)\nfor KEY_INDEX in range(12):\n x = KEY_INDEX % 3\n y = KEY_INDEX // 3\n GROUP.append(label.Label(terminalio.FONT, text='', color=0xFFFFFF,\n anchored_position=((DISPLAY.width - 1) * x / 2,\n DISPLAY.height - 1 -\n (3 - y) * 12),\n anchor_point=(x / 2, 1.0), max_glyphs=15))\nGROUP.append(Rect(0, 0, DISPLAY.width, 12, fill=0xFFFFFF))\nGROUP.append(label.Label(terminalio.FONT, text='', color=0x000000,\n anchored_position=(DISPLAY.width//2, -2),\n anchor_point=(0.5, 0.0), max_glyphs=30))\nDISPLAY.show(GROUP)\n\n# Load all the macro key setups from .py files in MACRO_FOLDER\nAPPS = []\nFILES = os.listdir(MACRO_FOLDER)\nFILES.sort()\nfor FILENAME in FILES:\n if FILENAME.endswith('.py'):\n try:\n module = __import__(MACRO_FOLDER + '/' + FILENAME[:-3])\n APPS.append(App(module.app))\n except (SyntaxError, ImportError, AttributeError, KeyError, NameError,\n IndexError, TypeError) as err:\n pass\n\nif not APPS:\n GROUP[13].text = 'NO MACRO FILES FOUND'\n DISPLAY.refresh()\n while True:\n pass\n\nLAST_POSITION = None\nAPP_INDEX = 0\nAPPS[APP_INDEX].switch()\n\n\n# MAIN LOOP ----------------------------\n\nwhile True:\n POSITION = ENCODER.position\n if POSITION != LAST_POSITION:\n APP_INDEX = POSITION % len(APPS)\n APPS[APP_INDEX].switch()\n LAST_POSITION = POSITION\n\n EVENT = KEYS.events.get()\n if EVENT and EVENT.key_number < len(APPS[APP_INDEX].macros):\n SEQUENCE = APPS[APP_INDEX].macros[EVENT.key_number][2]\n if EVENT.pressed:\n if EVENT.key_number < 12:\n PIXELS[EVENT.key_number] = 0xFFFFFF\n PIXELS.show()\n for item in SEQUENCE:\n if isinstance(item, int):\n if item >= 0:\n KEYBOARD.press(item)\n else:\n KEYBOARD.release(item)\n else:\n LAYOUT.write(item)\n else:\n # Release any still-pressed modifier keys\n for item in SEQUENCE:\n if isinstance(item, int) and item >= 0:\n KEYBOARD.release(item)\n if EVENT.key_number < 12:\n PIXELS[EVENT.key_number] = APPS[APP_INDEX].macros[\n EVENT.key_number][0]\n PIXELS.show()\n","sub_path":"Macropad_Hotkeys/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":4833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"523249795","text":"import json\n\nfrom training_tips_crud import TraningTipsCRUD\n\ndef lambda_handler(event, context):\n \"\"\"\n \n \"\"\"\n\n step = 'Iniciando'\n try:\n status, data = 200, None\n crud = TraningTipsCRUD()\n path_parameters = event.get('pathParameters')\n query_parameters = event.get('queryStringParameters')\n \n if event['httpMethod'] == 'GET':\n step = 'ENTROU NO GET'\n content_id = path_parameters.get('content_id') if path_parameters else None\n step = 'PEGOU O `content_id`'\n if not content_id:\n data = crud.listing()\n status = 200\n \n else:\n jump = query_parameters.get('jump') if query_parameters else None\n \n if jump == 'next':\n data = crud.next_tip(content_id)\n status = 200 if data else 404\n \n else:\n data = crud.item(content_id) \n status = 200 if data else 404\n \n elif event['httpMethod'] == 'POST':\n content = json.loads(event['body'])\n data = crud.create_tip(content)\n status = 201\n \n elif event['httpMethod'] == 'PUT':\n content = json.loads(event['body'])\n data = crud.edit_tip(content)\n status = 200\n \n elif event['httpMethod'] == 'DELETE':\n content_id = path_parameters.get('content_id')\n crud.delete_tip(content_id)\n status = 200\n \n else:\n status, data = 400, {'reason': f\"Operação não reconhecida: [{event['httpMethod']}]\"}\n \n except Exception as err:\n data = {\n 'step': step,\n 'error': str(err)\n }\n \n \n return {\n 'statusCode': status,\n 'body': json.dumps(data),\n 'headers': {\n 'Access-Control-Allow-Origin': '*'\n }\n }\n","sub_path":"serverless_aws/LAMBDA FUNCTIONS/training_tips-crud/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"376799133","text":"from def_metafiles import MetaFileManager\r\nfrom def_system import Debug\r\nimport os\r\n\r\n\r\n# #################################################################################################################### #\r\n# ####################################################### DEFS ####################################################### #\r\n# #################################################################################################################### #\r\n\r\ndef generate_cmpd_representations(sc_modelcombs_ids, sc_runset_id, unix_timestamp, debug_lvl=0):\r\n \"\"\"\r\n\r\n :param sc_modelcombs_ids:\r\n :param sc_runset_id:\r\n :param unix_timestamp:\r\n :param debug_lvl:\r\n :return:\r\n \"\"\"\r\n\r\n # basic checks\r\n if (sc_modelcombs_ids is None) or (type(sc_modelcombs_ids) is not list):\r\n Debug.dl(\"plot_cmpd_representations_lib: First argument must be a list of sc_model_combination_ids.\", 0,\r\n debug_lvl)\r\n return\r\n if sc_runset_id is None:\r\n Debug.dl(\"plot_cmpd_representations_lib: Invalid runset_id: '{0}'.\".format(sc_runset_id), 0, debug_lvl)\r\n return\r\n\r\n # creating guiding objects\r\n meta_mng = MetaFileManager(runset_id=sc_runset_id)\r\n meta_mng.load_all_scmodelcomb_meta_info(debug_lvl=debug_lvl)\r\n meta_mng.load_all_screpresentationcomp_meta_info(ignore_fails=False, debug_lvl=debug_lvl)\r\n\r\n # preparing arguments\r\n the_timestamp_arg = \"\" if unix_timestamp is None else \"-t {0}\".format(unix_timestamp)\r\n\r\n # for each representation of each module, run its plotting function\r\n for cur_sc_modelcomb_id in sc_modelcombs_ids:\r\n cur_reprcmpd_ids = meta_mng.get_all_representationcomps_of_scmodelcomb(cur_sc_modelcomb_id, debug_lvl=debug_lvl)\r\n Debug.dl(\"plot_cmpd_representations_lib: For '{0}':{1}.\".format(cur_sc_modelcomb_id, cur_reprcmpd_ids), 1,\r\n debug_lvl)\r\n\r\n for cur_reprcmpd_id in cur_reprcmpd_ids:\r\n\r\n the_script = meta_mng.get_genscript_of_representation_cmpd(cur_reprcmpd_id, debug_lvl=debug_lvl)\r\n call_command = \"{0} -modelcomb {1} -runsetid {2} {3}\".format(the_script, cur_sc_modelcomb_id, sc_runset_id,\r\n the_timestamp_arg)\r\n Debug.dl(\"plot_cmpd_representations_lib: Calling '{0}'.\".format(call_command), 1, debug_lvl)\r\n os.system(call_command)\r\n","sub_path":"backend/model_3_0_scripts/python/libs/plot_cmpd_representations_lib.py","file_name":"plot_cmpd_representations_lib.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"164852249","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: shiv\n\"\"\"\n\n# CONSTANTS\n_IMAGE_SIZE = 224\n_LIMIT_NUM_OF_FRAMES = 250\n\n_OUTPUT_RGB_NPY = 'rgb.npy'\n_OUTPUT_FLOW_NPY = 'flow.npy'\n\n_TOP_NUM_RESULTS = 5\n","sub_path":"data/kinetics400/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"433291506","text":"from tkinter import *\r\nimport threading\r\nimport time\r\nfrom tkinter import messagebox\r\nfrom tkinter.filedialog import asksaveasfile, askopenfilename\r\nfrom color_thread import ColorThreadPrivate\r\nimport os\r\nimport tkinter.messagebox\r\nfrom varibles import *\r\nimport emoji\r\n\r\n\r\n# The following class inherit from threading and creates a tkinter window which presents a private chat.\r\nclass PrivateChatThread(threading.Thread):\r\n def __init__(self, sender_name, client_name, message, current_socket, command_number, my_database, my_cursor,\r\n color_list):\r\n super(PrivateChatThread, self).__init__()\r\n self.daemon = True\r\n self.__sender_name = sender_name\r\n self.__client_name = client_name[1:]\r\n self.__message = message\r\n self.__current_socket = current_socket\r\n self.__command_number = command_number\r\n self.__my_database = my_database\r\n self.__my_cursor = my_cursor\r\n self.__entry = None\r\n self.__txt = None\r\n self.__window = None\r\n self.__label = None\r\n self.__header_label = None\r\n self.__menu_bar = None\r\n self.__file_menu = None\r\n self.__s = None\r\n self.__my_chat = ''\r\n self.__options_menu = None\r\n self.__color = color_list\r\n\r\n # The following function responsible for ongoing functioning of the private chat.\r\n def run(self):\r\n self.__txt, self.__window, self.__entry, self.__label, self.__header_label = self.create_private_chat_window()\r\n self.__window.title('Chatting with: ' + self.__sender_name)\r\n self.__menu_bar = Menu(self.__window)\r\n self.__file_menu = Menu(self.__menu_bar, tearoff=0)\r\n self.__options_menu = Menu(self.__menu_bar, tearoff=0)\r\n self.create_file_menu()\r\n self.create_options_menu()\r\n self.__window.config(menu=self.__menu_bar)\r\n self.__window.bind('', lambda x: self.save_chat())\r\n self.__window.bind('', lambda x: self.save_chat())\r\n self.__window.bind('', lambda x: self.open_file())\r\n self.__window.bind('', lambda x: self.open_file())\r\n self.__entry.bind('', self.get_input_in_private_message)\r\n if self.__command_number == '6':\r\n self.update_chat(self.__message)\r\n self.__window.protocol(\"WM_DELETE_WINDOW\", self.exit_private_chat)\r\n ColorThreadPrivate(self.__window, self.__txt, self.__entry, self.__client_name).start()\r\n self.__window.mainloop()\r\n\r\n # The following function creates the window's objects.\r\n def create_private_chat_window(self):\r\n window = Tk()\r\n window.iconbitmap('icon.ico')\r\n window.geometry(str(round(window.winfo_screenwidth()/3.072)) + \"x\" +\r\n str(round(window.winfo_screenheight()/1.63)))\r\n window.configure(background=self.get_color('root_background_color'))\r\n window.resizable(height=False, width=False)\r\n window.bind('', lambda x: self.to_printer())\r\n window.bind('', lambda x: self.to_printer())\r\n window.config(bg=self.get_color('root_background_color'))\r\n label = Label(window, background=self.get_color('root_background_color'))\r\n header_label = Label(window)\r\n header_label.pack(side='top', fill='x')\r\n label.pack(fill=\"both\", expand=True)\r\n txt = Text(label, width=150, height=28, wrap=WORD,\r\n background=self.get_color('text_background_color'), fg=self.get_color('text_font_color'))\r\n txt.place(x=0)\r\n self.__my_cursor.execute('SELECT ' + self.__sender_name + ' FROM private_chats WHERE name=%s',\r\n (self.__client_name,))\r\n message_from_database = self.__my_cursor.fetchone()\r\n if message_from_database is not None and message_from_database[0]:\r\n self.__my_chat = message_from_database[0]\r\n txt.config(state=NORMAL)\r\n msg_list = message_from_database[0].split('\\n')\r\n for message in msg_list:\r\n m = message.split()\r\n if m:\r\n message_tag = 'The message date is ' + m[0] + ' ' + m[1]\r\n message = ' '.join(m[1:])\r\n message = emoji.emojize(message)\r\n try:\r\n txt.insert(END, message + '\\n', (message_tag,))\r\n txt.tag_bind(message_tag, \"\", lambda event, date=message_tag: self.show_info(date))\r\n txt.tag_bind(message_tag, \"\", lambda event, date=message_tag: self.show_info(\"\"))\r\n except:\r\n txt.insert(END, \"Your device doesn't support this type of message\" + '\\n')\r\n txt.yview(END)\r\n txt.config(state=DISABLED)\r\n entry = Entry(label, width=200, relief='solid', bd=1, bg=self.get_color('input_background_color'),\r\n fg=self.get_color('input_font_color'), insertbackground=self.get_color('input_font_color'))\r\n self.__s = Scrollbar(label, command=txt.yview, orient=VERTICAL)\r\n entry.place(x=0, y=462)\r\n txt.place(x=0)\r\n txt.configure(yscrollcommand=self.__s.set)\r\n self.__s.pack(side=RIGHT, fill=BOTH)\r\n return txt, window, entry, label, header_label\r\n\r\n # The following function updates the chat and the database with every new message that the client receives.\r\n def update_chat(self, data):\r\n if data != \"\" or data is not None:\r\n sdata = data.split()\r\n if len(sdata) > 2 or self.__txt.compare(\"end-1c\", \"!=\", \"1.0\"):\r\n self.__txt.config(state=NORMAL)\r\n message_tag = 'The message date is ' + DATE + ' ' + str(time.strftime(\"%H:%M\"))\r\n try:\r\n self.__txt.insert(END, data + '\\n', (message_tag,))\r\n self.__txt.tag_bind(message_tag, \"\", lambda event, date=DATE: self.show_info(message_tag))\r\n self.__txt.tag_bind(message_tag, \"\", lambda event, date=DATE: self.show_info(\"\"))\r\n except:\r\n self.__txt.insert(END, \"Your device doesn't support this type of message\" + '\\n')\r\n self.__txt.yview(END)\r\n self.__txt.config(state=DISABLED)\r\n else:\r\n self.__txt.config(state=NORMAL)\r\n message_tag = 'The message date is ' + DATE + ' ' + str(time.strftime(\"%H:%M\"))\r\n self.__txt.insert(END, data + 'Hi I want to start chatting with you' + '\\n', (message_tag,))\r\n self.__txt.tag_bind(message_tag, \"\", lambda event, date=DATE: self.show_info(message_tag))\r\n self.__txt.tag_bind(message_tag, \"\", lambda event, date=DATE: self.show_info(\"\"))\r\n self.__txt.yview(END)\r\n self.__txt.config(state=DISABLED)\r\n self.save_chat_db(data)\r\n\r\n # The following function shows the data and the time of the text that the cursor is hover.\r\n def show_info(self, text):\r\n self.__header_label.configure(text=text)\r\n\r\n # The following function gets the input from the entry widget sending it to server,\r\n # presenting in the current text widget and updating the database.\r\n def get_input_in_private_message(self, event):\r\n text = self.__entry.get()\r\n text = text.strip()\r\n self.__txt.config(state=NORMAL)\r\n if text != \"\" and text is not None:\r\n message_tag = 'The message date is ' + DATE + ' ' + str(time.strftime(\"%H:%M\"))\r\n self.__txt.insert(END, str(time.strftime(\"%H:%M\")) + \" \" + text + '\\n', (message_tag,))\r\n self.__txt.tag_bind(message_tag, \"\", lambda event1, date=message_tag: self.show_info(date))\r\n self.__txt.tag_bind(message_tag, \"\", lambda event1, date=message_tag: self.show_info(\"\"))\r\n self.__txt.yview(END)\r\n self.__txt.config(state=DISABLED)\r\n self.__entry.delete(0, 'end')\r\n self.save_chat_db(str(time.strftime(\"%H:%M\"))+' '+text)\r\n self.__current_socket.write(self.message_by_client(text).encode())\r\n\r\n # The following function building the message structure according to the protocol before sending it to the server.\r\n def message_by_client(self, message):\r\n message = '05' + self.__sender_name + ' ' + message\r\n return str(len(self.__client_name))+self.__client_name + message\r\n\r\n # The following function prevent from the private chat from being closed is being minimized when the\r\n # user wants to close it.\r\n def exit_private_chat(self):\r\n message_box = messagebox.askquestion('Exit Private Chat?', 'Are you sure you want to exit the private chat?',\r\n icon='warning')\r\n if message_box == 'yes':\r\n self.__window.withdraw()\r\n\r\n # The following function gets a specific color from the server.\r\n def get_color(self, var):\r\n self.__my_cursor.execute('SELECT ' + var + ' FROM users WHERE name=%s ', (self.__client_name,))\r\n message_from_database = self.__my_cursor.fetchone()\r\n if message_from_database is not None:\r\n return message_from_database[0]\r\n\r\n # The following function creates the file menu.\r\n def create_file_menu(self):\r\n self.__file_menu.add_command(label='Open', command=self.open_file)\r\n self.__file_menu.add_command(label='Save', command=self.save_chat)\r\n self.__file_menu.add_command(label=\"Print\", command=self.to_printer)\r\n self.__file_menu.add_separator()\r\n self.__file_menu.add_command(label='Exit', command=self.exit_private_chat)\r\n self.__menu_bar.add_cascade(label='File', menu=self.__file_menu)\r\n\r\n # The following function creates the options menu.\r\n def create_options_menu(self):\r\n self.__options_menu.add_command(label='open emoji', command=em)\r\n self.__menu_bar.add_cascade(label=\"Emoji\", menu=self.__options_menu)\r\n\r\n # The following function save the chat to an external text or word file.\r\n def save_chat(self):\r\n files = [('Text Document', '*.txt'), ('Word Document', '.doc')]\r\n file = asksaveasfile(mode='w', filetypes=files, defaultextension=files)\r\n if file is not None:\r\n with open(file.name, \"w\", encoding='utf-8') as document:\r\n document.write(self.__txt.get(0.0, END))\r\n\r\n # The following function open an external text or word file and inserting it to the chat.\r\n def open_file(self):\r\n files = [('Text Document', '*.txt'), ('Word Document', '.doc')]\r\n filename = askopenfilename(parent=self.__window, filetypes=files, defaultextension=files)\r\n if filename is not None and filename != '':\r\n with open(filename, encoding='utf-8', mode='r') as file:\r\n data = file.read()\r\n self.__txt.config(state=NORMAL)\r\n try:\r\n self.__txt.insert(END, data + '\\n')\r\n except:\r\n self.__txt.insert(END, \"Your device doesn't support this type of message\" + '\\n')\r\n self.__txt.yview(END)\r\n self.__txt.config(state=DISABLED)\r\n self.__current_socket.write((self.__client_name + '01' + data).encode())\r\n\r\n # The following function save the 'text' parameter to the database.\r\n def save_chat_db(self, text):\r\n self.__my_chat = self.__my_chat + DATE + ' ' + emoji.demojize(text) + '\\n'\r\n text2save = (self.__my_chat, self.__client_name)\r\n sql = \"UPDATE private_chats SET \" + self.__sender_name + \"=%s WHERE name=%s \"\r\n self.__my_cursor.execute(sql, text2save)\r\n\r\n # The following function printing the chat.\r\n def to_printer(self):\r\n msg = tkinter.messagebox.askquestion('print', 'Do you want to print the chat?')\r\n if msg == 'yes':\r\n with open(\"print.txt\", \"w\", encoding='utf-8') as document:\r\n document.write(self.__txt.get(0.0, END))\r\n os.startfile(\"print.txt\", \"print\")\r\n threading.Timer(30, lambda: delete_file()).start()\r\n\r\n # The accessors of the class:\r\n def get_client_name(self):\r\n return self.__sender_name\r\n\r\n def get_window(self):\r\n return self.__window\r\n\r\n def get_chat_data(self):\r\n return self.__txt.get('1.0', END).strip('\\n')\r\n\r\n def get_chat_txt(self):\r\n return self.__txt\r\n\r\n\r\n# The following function delete the file that to_printer function created.\r\ndef delete_file():\r\n os.remove(\"print.txt\")\r\n\r\n\r\n# The following function opens windows virtual keyboard.\r\ndef em():\r\n os.system(\"wmic process where name='TabTip.exe' delete\")\r\n os.startfile(\"C:\\\\Program Files\\\\Common Files\\\\microsoft shared\\\\ink\\\\TabTip.exe\")\r\n","sub_path":"pyworks1/PrivateChat.py","file_name":"PrivateChat.py","file_ext":"py","file_size_in_byte":12826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"10734604","text":"import time\nimport sys\nsys.path.insert(0,'/home/hungjinh/Research/Pk_hydro/code/gen_Pk')\nfrom Pk_nbkits_illustris import *\n\nstrid_snap = '135'\nsnapdir_hydro = \"/home/hungjinh/Research/baryon_proj/catalog/illustris_cat/Illustris-1/snapdir_135/\"\noutfile_hydro = '/home/hungjinh/Research/Pk_hydro/data/Pk_data_CIC/illustris/raw_data/Pk_ill1_135_hydro1024_aF.pickle'\n\nresol = 1024\nresampler='cic'\ncor_aliasing=False\ncor_deconvolution=True\n\n\n########################################\nTstart=time.time()\n\nPk_hydro_factory = Pk_nbkit_illustris(snapdir=snapdir_hydro,strid_snap=strid_snap,resol=resol,outfile=outfile_hydro,resampler=resampler,cor_aliasing=cor_aliasing,cor_deconvolution=cor_deconvolution)\n\nPk_hydro_factory.make_Mesh_hydro()\n\nTime_mesh=time.time()\n\nPk_hydro_factory.compute_pk()\n\n\n\nTime_fft=time.time()\n\nprint(\"Total mesh time (min):\",(Time_mesh-Tstart)/60.)\nprint(\"Total FFT time (min):\",(Time_fft-Time_mesh)/60.)\n\n\n","sub_path":"code/gen_Pk/gen_ill1/gen_Pk_ill1hydro_aF.py","file_name":"gen_Pk_ill1hydro_aF.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"564014786","text":"import sys\nimport os\nimport pandas as pd\nimport numpy as np\n\nsys.path.append('./..')\nsys.path.append('./../..')\nsys.path.append('./../../..')\n\nimport torch\nfrom torch import FloatTensor as FT\nfrom torch import LongTensor as LT\nfrom torch import nn\nfrom torch.nn import functional as F\nimport os\nfrom collections import OrderedDict\nimport math\nimport numpy as np\nfrom torch.autograd import Variable\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nfrom torch.autograd import Variable\nfrom torch.distributions.normal import Normal\nimport math\nfrom sklearn.cluster import MiniBatchKMeans, KMeans\n\ntry:\n from data_fetcher import data_fetcher\nexcept:\n from .data_fetcher import data_fetcher\n\nDEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n# DEVICE = torch.device(\"cpu\")\nprint('Current device >> ', DEVICE)\nprint('=========================== ')\n\n\ndef get_Activation(act):\n if act == 'tanh': return nn.Tanh()\n if act == 'sigmoid': return nn.Sigmoid()\n if act == 'none': return nn.Identity()\n if act == 'relu': return nn.ReLU()\n return nn.ReLU()\n\n\nclass module_LPT_AE(nn.Module):\n def __init__(\n self,\n data_dim,\n layer_dims, # Provide the half (encoder only)\n op_activation='sigmoid',\n layer_activation='sigmoid',\n dropout=0.05\n ):\n super(module_LPT_AE, self).__init__()\n self.module_encoder = nn.ModuleList()\n self.module_decoder = nn.ModuleList()\n self.num_trained = 0\n self.layer_dims = layer_dims\n self.op_activation = op_activation\n self.layer_activation = layer_activation\n self.dropout_rate = dropout\n self.data_dim = data_dim\n self.num_layers = len(layer_dims)\n self.mode = 'ae' # options are ae, encoder\n\n # =====\n # Adds in 1 layer of AE\n # =====\n def add_layer(self, layer_idx):\n print('Adding layer index ', layer_idx)\n if layer_idx >= self.num_layers:\n exit(1)\n # add in encoder\n if layer_idx == 0:\n inp_dim = self.data_dim\n else:\n inp_dim = self.layer_dims[layer_idx - 1]\n op_dim = self.layer_dims[layer_idx]\n self.module_encoder.append(\n nn.Sequential(\n nn.Linear(inp_dim, op_dim),\n get_Activation(self.layer_activation)\n )\n )\n # Swap the values for decoder\n inp_dim, op_dim = op_dim, inp_dim\n\n # Last layer\n if layer_idx == 0:\n act = self.op_activation\n else:\n act = self.layer_activation\n # Insert at start\n self.module_decoder.insert(\n 0,\n nn.Sequential(\n nn.Linear(inp_dim, op_dim),\n get_Activation(act)\n )\n )\n return\n\n def forward(self, x):\n x1 = x\n for m in self.module_encoder:\n x1 = m(x1)\n z = x1\n x2 = x1\n for m in self.module_decoder:\n x2 = m(x2)\n\n if self.mode == 'dual':\n return z, x2\n elif self.mode == 'encoder':\n return z\n else:\n return x2\n\n # =========================\n # Return params by layer\n # =========================\n def get_trainable_layer_params(self, layer_idx=-1):\n if layer_idx == -1:\n return list(self.parameters())\n\n e = list(self.module_encoder[layer_idx].parameters())\n d = list(self.module_decoder[-(layer_idx + 1)].parameters())\n return e + d\n\n\nclass DCN():\n\n def __init__(\n self,\n device,\n data_dim,\n layer_dims, # Provide the half (encoder only)\n op_activation='sigmoid',\n layer_activation='sigmoid',\n dropout=0.05,\n LR=0.05,\n num_epochs_1=10,\n num_epochs_2=25,\n min_epochs=10,\n batch_size=256,\n k=3,\n Lambda=0.1,\n stop_threshold=0.05,\n checkpoint_dir=None\n ):\n self.log_interval = 100\n self.device = device\n self.num_clusters = k\n self.ae = module_LPT_AE(\n data_dim,\n layer_dims,\n op_activation,\n layer_activation,\n dropout\n )\n self.ae_num_layers = len(layer_dims)\n self.ae = self.ae.to(self.device)\n self.LR = LR\n self.min_epochs = min_epochs\n self.num_epochs_1 = num_epochs_1\n self.num_epochs_2 = num_epochs_2\n self.batch_size = batch_size\n self.latent_dim = layer_dims[-1]\n self.Lambda = Lambda\n self.stop_threshold = stop_threshold\n self.max_loss_dec_epochs = 5\n # ---------------------\n # Dir to save the pre trained model results\n # ---------------------\n self.checkpoint_dir = checkpoint_dir\n if not os.path.exists(self.checkpoint_dir):\n os.mkdir(self.checkpoint_dir)\n self.FLAG_ae_setup = False\n self.break_threshold = 0.001\n return\n\n def pretrain_ae(self, data):\n print('greedy layerwise pretarining started')\n self.ae.mode = 'ae'\n self.ae.train()\n\n num_epochs = self.num_epochs_1\n batch_size = self.batch_size\n log_interval = 1500\n print(' Num AE layers ', self.ae_num_layers)\n\n for l_idx in range(self.ae_num_layers):\n self.ae.add_layer(l_idx)\n self.ae = self.ae.to(self.device)\n print('Current model ', self.ae)\n # train using the data\n params = self.ae.get_trainable_layer_params(layer_idx=l_idx)\n opt = torch.optim.Adam(\n params,\n lr=self.LR\n )\n\n for e in tqdm(range(num_epochs)):\n epoch_loss = []\n np.random.shuffle(data)\n num_batches = data.shape[0] // batch_size + 1\n for b_idx in range(num_batches):\n opt.zero_grad()\n x = data[b_idx * batch_size: (b_idx + 1) * batch_size]\n x = FT(x).to(self.device)\n x_R = self.ae(x)\n b_loss = F.mse_loss(\n input=x, target=x_R\n )\n b_loss.backward()\n opt.step()\n loss_val = b_loss.cpu().data.numpy()\n epoch_loss.append(loss_val)\n if b_idx % log_interval == 0:\n print('Loss {:4f}'.format(loss_val))\n print(' Epoch {} loss {:4f}'.format(e + 1, np.mean(epoch_loss)))\n\n print('Greedy layer-wise pretraining [Done]')\n\n # =======================\n # Now train the entire autoencoder\n # =======================\n\n opt = torch.optim.Adam(\n list(self.ae.parameters()),\n lr=self.LR\n )\n\n for e in tqdm(range(num_epochs)):\n epoch_loss = []\n np.random.shuffle(data)\n num_batches = data.shape[0] // batch_size + 1\n for b_idx in range(num_batches):\n opt.zero_grad()\n x = data[b_idx * batch_size: (b_idx + 1) * batch_size]\n x = FT(x).to(self.device)\n x_R = self.ae(x)\n b_loss = F.mse_loss(\n input=x, target=x_R\n )\n b_loss.backward()\n torch.nn.utils.clip_grad_norm_(self.ae.parameters(), 2)\n opt.step()\n loss_val = b_loss.cpu().data.numpy()\n epoch_loss.append(loss_val)\n if b_idx % log_interval == 0:\n print('Loss {:4f}'.format(loss_val))\n print(' Epoch {} loss {:4f}'.format(e + 1, np.mean(epoch_loss)))\n\n self.FLAG_ae_setup = True\n return\n\n # =========================\n\n # Obtain the centroids\n # =========================\n def init_centroids(self, data):\n print(\"In init_centroids\")\n self.ae.mode = 'dual'\n \n batch_size = self.batch_size\n z = []\n \n num_batches = data.shape[0] // batch_size + 1\n for b in range(num_batches):\n _x = data[b * batch_size: (b + 1) * batch_size]\n _x = FT(_x).to(self.device)\n _z, _ = self.ae(_x)\n z.extend(_z.cpu().data.numpy())\n z = np.array(z)\n\n kmeans = MiniBatchKMeans(\n n_clusters=self.num_clusters,\n random_state=0,\n batch_size=batch_size,\n max_iter=100\n ).fit(z)\n\n centroids = kmeans.cluster_centers_\n print(centroids)\n print('Exiting init_centroids')\n \n return centroids\n\n # =========================\n # Input : embedding of data point\n # Calculate distance of each point from centroid\n # =========================\n def calculate_centroid_distance(self, z):\n\n z1 = z.repeat(1, self.num_clusters).reshape([-1, self.num_clusters, self.latent_dim])\n # Euclidean distance\n _centroids = self.centroids\n dist = torch.sqrt(\n torch.sum(\n (z1 - _centroids) ** 2,\n dim=-1,\n keepdim=False\n )\n )\n return dist\n\n # ------------\n # This should return a matrix of shape [batch, num_cluster]\n # There should be a single 1 in each row\n # ------------\n def get_cluster_assignments(self, z):\n\n _dist_ = self.calculate_centroid_distance(z)\n c_idx = torch.min(\n _dist_,\n dim=1,\n keepdim=False\n )[1]\n c_idx = c_idx.long()\n C = F.one_hot(c_idx, num_classes=self.num_clusters)\n return C\n\n # ------------------\n # Clustering loss\n # cluster_assignments : One hot vector per sample\n # -------------------\n def calc_clus_loss(self, cluster_assignments, z):\n \n _centroids = self.centroids\n P = _centroids[\n torch.max(cluster_assignments, dim=1, keepdim=False)[1]\n ] \n \n Q = z\n dist = torch.sum((P - Q) ** 2, dim=1, keepdim=False)\n _loss = torch.sum(dist, dim=0, keepdim=False)\n \n return _loss\n\n # -----------------\n # # M_k = M_k - (1/ c_k )( M_k - z)\n # -----------------\n def update_cluster_centroids(self, z, C):\n\n counts = torch.sum(C, dim=0, keepdim=False) + 1\n _centroids = self.centroids\n\n z1 = z.repeat(1, self.num_clusters).reshape([-1, self.num_clusters, self.latent_dim])\n z2 = (_centroids - z1) # distance\n # Mask\n mask = C.float()\n mask = mask.repeat(\n 1, self.latent_dim\n ).reshape(\n [-1, self.latent_dim, self.num_clusters, ]\n ).permute([0, 2, 1])\n E = mask * z2\n\n # Sum them along axis for each cluster\n E1 = torch.sum(E, dim=0, keepdim=False)\n denom = torch.reciprocal(counts.float()).unsqueeze(1)\n E2 = denom * E1\n self.centroids = _centroids - E2\n\n return\n\n # =====================================\n\n # main training function\n # =====================================\n def train_model(self, data):\n ae_weights_file = os.path.join(self.checkpoint_dir, 'ae_model.pt')\n if os.path.exists(ae_weights_file) and False:\n if self.FLAG_ae_setup is False:\n # Set up the structure first , then load the weights\n for l_idx in range(self.ae_num_layers):\n self.ae.add_layer(l_idx)\n \n checkpoint = torch.load(ae_weights_file)\n self.ae.load_state_dict(checkpoint['model_state_dict'])\n else:\n self.pretrain_ae(data)\n # torch.save(self.ae.state_dict(), ae_weights_file)\n torch.save({\n 'model_state_dict': self.ae.state_dict()\n }, ae_weights_file)\n self.FLAG_ae_setup = True\n \n self.ae = self.ae.to(self.device)\n # =======================\n self.ae.train()\n self.ae.mode = 'dual'\n \n num_epochs = self.num_epochs_2\n batch_size = self.batch_size\n num_batches = data.shape[0] // batch_size + 1\n _params = list(self.ae.parameters())\n\n opt = torch.optim.Adam(\n _params,\n lr=self.LR\n )\n print('Optimizer ', opt)\n\n # ------------\n # Initial cluster centroids\n # ------------\n self.centroids = self.init_centroids(data)\n self.centroids = FT(self.centroids).to(self.device)\n\n# print('Initial centroids ', self.centroids)\n print('<------------->')\n prev_epoch_loss_mean = 0\n\n for e in tqdm(range(1, num_epochs + 1)):\n print('Epoch :: {}'.format(e))\n np.random.shuffle(data)\n # ------------\n # cluster assignment s_ij\n # ------------\n # cluster_assignments = self.get_cluster_assignments(data)\n\n self.ae.mode = 'dual'\n epoch_loss = []\n \n for b in range(num_batches):\n # ==================\n # Step 1\n # ==================\n \n _x = data [b * batch_size : (b + 1) * batch_size]\n _x = torch.FloatTensor(_x)\n \n _x = _x.to(self.device)\n opt.zero_grad()\n \n z, x_r = self.ae(_x)\n \n b_cluster_assignments = self.get_cluster_assignments(z)\n \n # Calculate the reconstruction loss\n \n ae_loss = F.mse_loss(_x, x_r, reduction='none') \n ae_loss = torch.sum(ae_loss, dim=-1, keepdim=False)\n ae_loss = torch.mean(ae_loss, dim=0, keepdim=False)\n \n # ------------\n # b_cluster_assignments = cluster_assignments[b * batch_size: (b + 1) * batch_size]\n # ------------\n # Calculate the clustering loss\n clustering_loss = self.calc_clus_loss(\n b_cluster_assignments,\n z\n )\n \n loss = ae_loss + self.Lambda * clustering_loss\n loss.backward(retain_graph=True)\n epoch_loss.append(loss.cpu().data.numpy())\n opt.step()\n \n if b % self.log_interval == 0:\n print('Loss {:4f}'.format(loss.cpu().data.numpy()))\n self.ae.eval()\n\n # ==================\n # Step 2 \n # Calculate new assignments for batch samples\n # ==================\n z, _ = self.ae(_x)\n b_cluster_assignments = self.get_cluster_assignments(z)\n\n # ====================\n # Step 3\n # Update the cluster centroids\n\n self.update_cluster_centroids(\n z, b_cluster_assignments\n )\n\n # print(self.centroids)\n epoch_loss_mean = np.mean(epoch_loss)\n diff = abs(epoch_loss_mean - prev_epoch_loss_mean)\n if diff < self.break_threshold and e > self.min_epochs:\n print('Breaking training loss staying same ')\n\n prev_epoch_loss_mean = epoch_loss_mean\n print('Final centroids ', self.centroids)\n\n # -------------\n # Save model\n # -------------\n\n\n\n return\n\n def get_cluster(self, data):\n batch_size = self.batch_size\n num_batches = data.shape[0] // batch_size + 1\n C = []\n for b in range(num_batches):\n _x = data[b * batch_size: (b + 1) * batch_size]\n _x = FT(_x).to(self.device)\n z = self.ae(_x)\n _q_ = self.calc_q_ij(z)\n _c_ = torch.max(\n _q_,\n dim=1,\n keepdim=False\n )[1]\n\n C.append(_c_)\n C = torch.cat(C, dim=0)\n return C.cpu().data.numpy()\n\n # ========================\n # Score a single sample\n # ========================\n def __score_sample(self, x):\n self.ae.eval()\n self.ae.mode = 'encoder'\n z = self.ae(x)\n cluster_idx = torch.max(self.get_cluster_assignments(z), dim=1)[1]\n c = self.centroids[cluster_idx]\n D = torch.sum((c - z) ** 2, dim=1, keepdim=False)\n return D\n\n def score_samples(self, data):\n bs = self.batch_size\n num_batches = data.shape[0] // bs + 1\n res = []\n\n for b in tqdm(range(num_batches)):\n x = data[b * bs: (b + 1)* bs]\n x = FT(x).to(self.device)\n if x.shape[0] == 0 :\n break\n r = self.__score_sample(x)\n r = r.cpu().data.numpy()\n res.extend(r)\n res = np.array(res)\n return res\n\n\n'''\n def get_cluster_assignments(self, z):\n batch_size = self.batch_size\n num_batches = data.shape[0] // batch_size + 1\n self.ae.mode = 'dual'\n C = []\n for b in range(num_batches):\n _x = data[b * batch_size: (b + 1) * batch_size]\n _x = FT(_x).to(self.device)\n \n z, _ = self.ae(_x) # ae.mode is \"dual\" \n _dist_ = self.calc_centroid_distance(z)\n \n c_idx = torch.min(\n _dist_,\n dim=1,\n keepdim=False\n )[1]\n c_idx = c_idx.long() \n c_idx= F.one_hot(c_idx, num_classes=self.num_clusters)\n \n C.append(c_idx) \n C = torch.cat(C,dim=0)\n return C\n\n'''\n","sub_path":"DCN_1/model_dcn.py","file_name":"model_dcn.py","file_ext":"py","file_size_in_byte":17833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"601220037","text":"import random\n\n\nclass Punkt(object):\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n @classmethod\n def losuj_punkt(cls, max_x, max_y):\n x = random.randint(0, max_x)\n y = random.randint(0, max_y)\n return cls(x, y)\n\n\nclass Waz(Punkt):\n def znak(self, liczba):\n if liczba < 0:\n return -1\n if liczba > 0:\n return 1\n return 0\n\n def zrob_krok(self, cel):\n roznica_x = self.x - cel.x\n roznica_y = self.y - cel.y\n self.x -= self.znak(roznica_x)\n self.y -= self.znak(roznica_y)\n\n\nif __name__ == '__main__':\n jedzonko = Punkt.losuj_punkt(500, 500)\n waz = Waz.losuj_punkt(500, 500)\n while waz.x != jedzonko.x or waz.y != jedzonko.y:\n waz.zrob_krok(jedzonko)\n print(waz.x, waz.y)\n","sub_path":"czesc_2/waz.py","file_name":"waz.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"221639049","text":"from random import choice\nfrom tic_tac_toe.game import run_single_game\n\n\ndef run_game_suite(first_player, second_player, number_of_games):\n \"\"\"\n This is used for running a game in the 2 out of 3 or 3 out of 5 format.\n :param first_player: The name of the first player\n :param second_player: The name of the second player\n :param number_of_games: The maximum number of the games.\n \"\"\"\n current_game = 0\n minimum_games_to_won = number_of_games // 2 + 1\n first_player_score = 0\n second_player_score = 0\n\n print(\"\\n{} vs {} in a {} out of {}\".format(first_player, second_player, minimum_games_to_won, number_of_games))\n print(\"FIGHT :)\")\n\n while True:\n player_with_X = first_player\n player_with_0 = second_player\n\n if current_game < number_of_games:\n if current_game % 2 == 1:\n player_with_X = second_player\n player_with_0 = first_player\n else:\n random_choice = choice([1, 2])\n if random_choice == 2:\n player_with_X = second_player\n player_with_0 = first_player\n\n winner = run_single_game(player_with_X, player_with_0)\n\n if winner == first_player:\n first_player_score += 1\n elif winner == second_player:\n second_player_score += 1\n\n # Display current score\n print(\"{} {}-{} {}\".format(first_player, first_player_score, second_player_score, second_player))\n input(\"Press any key to continue...\")\n\n if first_player_score == minimum_games_to_won or second_player_score == minimum_games_to_won:\n return first_player_score, second_player_score\n\n current_game += 1\n","sub_path":"tic_tac_toe/game/suite.py","file_name":"suite.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"394578045","text":"from itertools import chain\nfrom multiprocessing import Process, Queue\nfrom operator import itemgetter\nimport os\n\n\n__all__ = ['parmap']\n\n\ndef split(lst, n):\n k, m = divmod(len(lst), n)\n return [lst[i*k+min(i,m):(i+1)*k+min(i+1,m)] for i in range(n)]\n\n\ndef delegate(pid, q, target, chunk, args, reduction):\n result = [target(*c, *args) for c in chunk]\n\n if reduction:\n q.put((pid, reduction(result)))\n else:\n q.put((pid, result))\n\n\ndef parmap(target, varying, constant=(), reduction=None, ncpus=None, unwrap=True):\n if not ncpus:\n ncpus = os.cpu_count()\n if not unwrap:\n varying = [(v,) for v in varying]\n chunks = split(varying, ncpus)\n q = Queue()\n\n processes = []\n for pid, chunk in enumerate(chunks):\n p = Process(\n target=delegate,\n args=(pid, q, target, chunk, constant, reduction),\n )\n p.start()\n processes.append((pid, p))\n\n result = [q.get() for _ in range(ncpus)]\n\n for pid, p in processes:\n p.join()\n\n result = [v for _, v in sorted(result, key=itemgetter(0))]\n\n if reduction:\n return reduction(result)\n return list(chain.from_iterable(result))\n","sub_path":"ramos/utils/parallel/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"564336519","text":"import pytest\nfrom modules.first_follow import get_first, get_firsts, get_follow, get_rhs, find_first, parse_production, find_first_sole, get_follows\nfrom modules.first_follow import parse_find_first, separate_production, build_parsing_table\nfrom modules.first_follow import lookup_table, parse_input\n\nANSI_RESET = \"\\u001B[0m\"\nANSI_RED = \"\\u001B[31m\"\nANSI_GREEN = \"\\u001B[32m\"\nANSI_YELLOW = \"\\u001B[33m\"\nANSI_BLUE = \"\\u001B[34m\"\nANSI_PURPLE = \"\\u001B[35m\"\n#ANSI_YELLOW = \"\\u001B[36m\"\n\n\n\ndef print_yellow(msg):\n print(f\"{ANSI_YELLOW}{msg}{ANSI_RESET}\")\n\ndef print_purple(msg):\n print(f\"{ANSI_PURPLE}{msg}{ANSI_RESET}\")\n\ndef print_blue(msg):\n print(f\"{ANSI_BLUE}{msg}{ANSI_RESET}\")\n\ndef print_red(msg):\n print(f\"{ANSI_RED}{msg}{ANSI_RESET}\")\n\ndef print_green(msg):\n print(f\"{ANSI_GREEN}{msg}{ANSI_RESET}\")\n\n\n\ndef test_get_first():\n \n # case 1\n case = f\"{ANSI_YELLOW}get first case 1{ANSI_RESET}\"\n gram = {\n 'A' : [['B','X','b','c'] , ['d','e','f'] , ['g','h','i'] , ['𝛆']],\n 'X' : [['q']],\n 'B' : [['s'],['𝛆']]\n }\n\n non_terminal_list = ['A','X','B']\n correct_value = {'A': {'d', 's', 'g', 'q', '𝛆'}, 'X': {'q'}, 'B': {'𝛆', 's'}}\n actual_value = get_firsts(gram, non_terminal_list)\n assert_it(correct_value, actual_value, case)\n\n\n # case 2\n case = f\"{ANSI_YELLOW}get first case 2{ANSI_RESET}\"\n \n gram = {\n 'S': [['A','B','C','D']],\n 'A': [['a'], ['𝛆']],\n 'B': [['C','D'], ['b']],\n 'C': [['c'],['𝛆']],\n 'D': [['A','a'], ['d'],['𝛆']]\n }\n \n non_terminal_list = ['S','A','B','C','D']\n correct_value = {'S': {'a', 'c', '𝛆', 'b', 'd'}, 'A': {'a', '𝛆'}, 'B': {'a', 'c', '𝛆', 'b', 'd'}, 'C': {'c', '𝛆'}, 'D': {'a', '𝛆', 'd'}}\n actual_value = get_firsts(gram, non_terminal_list)\n assert_it(correct_value, actual_value, case)\n\n # case 3 ex_2\n case = f\"{ANSI_YELLOW}get first case 3{ANSI_RESET}\"\n gram = {\n 'S': [['A']],\n 'A': [['a','B',\"A'\"]],\n \"A'\": [['d',\"A'\"],['𝛆']],\n 'B': [['b']],\n \"A'\": [['d',\"A'\"],['𝛆']],\n 'C': [['g']]\n } \n \n non_terminal_list = [\"S\", \"A\" , \"A'\", \"B\", \"C\"]\n correct_value = {'S': {'a'}, 'A': {'a'}, \"A'\": {'d', '𝛆'}, 'B': {'b'}, 'C': {'g'}}\n actual_value = get_firsts(gram, non_terminal_list)\n assert_it(correct_value, actual_value, case)\n\n # case 4 ex3 gatevid\n case = f\"{ANSI_YELLOW}get first case 4{ANSI_RESET}\"\n gram = {\n 'S': [['(','L',')'],['a']],\n 'L': [['S',\"L'\"]],\n \"L'\": [[',','S'],['𝛆']]\n }\n\n non_terminal_list = {\"S\", \"L\", \"L'\"}\n correct_value = {'S': {'(','a'}, 'L': {'(','a'}, \"L'\": {',', '𝛆'}}\n actual_value = get_firsts(gram, non_terminal_list)\n assert_it(correct_value, actual_value, case)\n\n\n case = f\"{ANSI_YELLOW}get first case 5{ANSI_RESET}\"\n gram = {\n 'S': [['A', 'a','A','b'],['B', 'b','B','a']],\n 'A': [['𝛆']],\n 'B': [['𝛆']]\n }\n\n non_terminal_list = {'S', 'A', 'B'}\n correct_value ={'S': {'a', 'b'}, 'A': {'𝛆'}, 'B': {'𝛆'}}\n actual_value = get_firsts(gram, non_terminal_list)\n assert_it(correct_value, actual_value, case)\n\n\n\ndef test_rhs():\n # case 1\n case = f\"{ANSI_YELLOW}get rhs case 1{ANSI_RESET}\"\n gram = {\n 'A' : [['B','X','b','c'] , ['X','e','f'] , ['g','h','i'], ['d','X','e','f'], ['𝛆']],\n 'X' : [['q']],\n 'B' : ['s','𝛆']\n }\n \n correct_value = {}\n actual_value = get_rhs(gram , 'A')\n assert_it(correct_value, actual_value, case)\n\n # case 2\n case = f\"{ANSI_YELLOW}get rhs case 2{ANSI_RESET}\"\n correct_value = {'A' : [['b','c'], ['e', 'f']]}\n actual_value = get_rhs(gram , 'X')\n assert_it(correct_value, actual_value, case)\n\n # case 3\n case = f\"{ANSI_YELLOW}get rhs case 3{ANSI_RESET}\"\n correct_value = {'A' : [['X','b','c']]}\n actual_value = get_rhs(gram , 'B')\n assert_it(correct_value, actual_value, case)\n\n gram = {\n 'S': [['A']],\n 'A': [['a','B',\"A'\"]],\n \"A'\": [['d',\"A'\"],['𝛆']],\n 'B': [['b']],\n 'C': [['g'],['D','g']],\n 'D': [['A','c','d'], ['B'], ['c','A']]\n\n }\n\n # case 4\n case = f\"{ANSI_YELLOW}get rhs case 4{ANSI_RESET}\"\n correct_value = {'S' : [[]], 'D': [['c' , 'd'], []]}\n actual_value = get_rhs(gram , 'A')\n\n print(len(actual_value[\"S\"]))\n assert_it(correct_value, actual_value, case)\n\ndef test_find_first_sole():\n gram = {\n 'S': [['A']],\n 'A': [['a','B',\"A'\"]],\n \"A'\": [['d',\"A'\"],['𝛆']],\n 'B': [['b']],\n 'C': [['g'],['D','g']],\n 'D': [['A','c','d'], ['B'], ['c','A']]\n\n }\n\n case = f\"{ANSI_YELLOW}find first case 1{ANSI_RESET}\"\n #find follow for A\n non_terminal_list = ['S', \"A\", \"A'\", \"B\", \"C\", \"d\"]\n non_terminal_production = {'S' : [['𝛆']], 'D': [['c' , 'd'], ['𝛆']]}\n first, follow = find_first_sole(gram,'A', non_terminal_production, non_terminal_list)\n actual_value = parse_find_first(first, follow) \n correct_value = [{'c'}, {'S','D'}]\n assert_it(correct_value, actual_value, case)\n\n case = f\"{ANSI_YELLOW}find first case 2{ANSI_RESET}\"\n #find follow for A\n non_terminal_production = parse_production(get_rhs(gram, 'D')) \n first, follow = find_first_sole(gram,'D', non_terminal_production, non_terminal_list) \n actual_value = parse_find_first(first, follow)\n correct_value = [{'g'}, set()]\n assert_it(correct_value, actual_value, case)\n\n gram = {\n 'A' : [['B','X','b','c'] , ['d','e','f'] , ['g','h','i'] , ['𝛆']],\n 'X' : [['q']],\n 'B' : [['X', 's'],['𝛆']]\n }\n\n non_terminal_list = ['A','X','B']\n\n case = f\"{ANSI_YELLOW}find first case 3{ANSI_RESET}\"\n #find follow for A\n \n non_terminal_production = parse_production(get_rhs(gram, 'X'))\n first, follow = find_first_sole(gram,'X', non_terminal_production, non_terminal_list)\n actual_value = parse_find_first(first, follow)\n correct_value = [{'b', 's'}, set() ]\n assert_it(correct_value, actual_value, case)\n\n gram = {\n \n 'S' : [['a','B','D','h']],\n 'B' : [['c', 'C']],\n 'C' : [['b','C'],['𝛆']],\n 'D' : [['E','F']],\n 'E' : [['g'],['𝛆']],\n 'F' : [['f'],['𝛆']]\n }\n\n case = f\"{ANSI_YELLOW}find first case 4{ANSI_RESET}\"\n #find follow for A\n non_terminal_list = {'S','B','C','D','E','F'} \n non_terminal_production = parse_production(get_rhs(gram, 'B')) \n first, follow = find_first_sole(gram,'B', non_terminal_production, non_terminal_list)\n actual_value = parse_find_first(first, follow)\n correct_value = [{'g','f','h'}, set() ]\n assert_it(correct_value, actual_value, case)\n\n\n\ndef test_find_first():\n\n gram = {\n 'S': [['A']],\n 'A': [['a','B',\"A'\"]],\n \"A'\": [['d',\"A'\"],['𝛆']],\n 'B': [['b']],\n 'C': [['g'],['D','g']],\n 'D': [['A','c','d'], ['B'], ['c','A']]\n\n }\n\n case = f\"{ANSI_YELLOW}find first case 1{ANSI_RESET}\"\n #find follow for A\n non_terminal_list = ['S', \"A\", \"A'\", \"B\", \"C\", \"d\"]\n non_terminal_production = {'S' : [['𝛆']], 'D': [['c' , 'd'], ['𝛆']]}\n actual_value = find_first(gram, non_terminal_production, non_terminal_list)\n correct_value = None\n assert_it(correct_value, actual_value, case)\n\n \n\ndef test_get_follow():\n gram = {\n \n 'S' : [['a','B','D','h']],\n 'B' : [['c', 'C']],\n 'C' : [['b','C'],['𝛆']],\n 'D' : [['E','F']],\n 'E' : [['g'],['𝛆']],\n 'F' : [['f'],['𝛆']]\n }\n\n case = f\"{ANSI_YELLOW}get follow case 1{ANSI_RESET}\"\n #find follow for A\n non_terminal_list = {'S','B','C','D','E','F'} \n non_terminal = 'S' \n first_set = get_firsts(gram, non_terminal_list)\n start_symbol = 'S'\n\n\n actual_value = get_follow(gram, non_terminal, first_set, start_symbol, non_terminal_list)\n correct_value = {'$'}\n assert_it(correct_value, actual_value, case)\n\n case = f\"{ANSI_YELLOW}get follow case 2{ANSI_RESET}\"\n non_terminal = 'B' \n actual_value = get_follow(gram, non_terminal, first_set, start_symbol, non_terminal_list)\n correct_value = {'g','f','h'}\n assert_it(correct_value, actual_value, case)\n\n case = f\"{ANSI_YELLOW}get follow case 3{ANSI_RESET}\"\n non_terminal = 'C' \n actual_value = get_follow(gram, non_terminal, first_set, start_symbol, non_terminal_list)\n correct_value = {'g','f','h'}\n assert_it(correct_value, actual_value, case)\n\n case = f\"{ANSI_YELLOW}get follow case 4{ANSI_RESET}\"\n non_terminal = 'D' \n actual_value = get_follow(gram, non_terminal, first_set, start_symbol, non_terminal_list)\n correct_value = {'h'}\n assert_it(correct_value, actual_value, case)\n\n case = f\"{ANSI_YELLOW}get follow case 5{ANSI_RESET}\"\n non_terminal = 'E' \n actual_value = get_follow(gram, non_terminal, first_set, start_symbol, non_terminal_list)\n correct_value = {'f','h'}\n assert_it(correct_value, actual_value, case)\n\n case = f\"{ANSI_YELLOW}get follow case 5{ANSI_RESET}\"\n non_terminal = 'F' \n actual_value = get_follow(gram, non_terminal, first_set, start_symbol, non_terminal_list)\n correct_value = {'h'}\n assert_it(correct_value, actual_value, case)\n\n \n gram = {\n 'S': [['A']],\n 'A': [['a','B',\"A'\"]],\n \"A'\": [['d',\"A'\"],['𝛆']],\n 'B': [['b']],\n 'C': [['g'],['D','g']],\n 'D': [['A','c','d'], ['B'], ['c','A']]\n }\n\n ase = f\"{ANSI_YELLOW}get follow case 5{ANSI_RESET}\"\n #find follow for A\n non_terminal_list = {'S','A', \"A'\", 'B','C','D'} \n non_terminal = 'S' \n first_set = get_firsts(gram, non_terminal_list)\n start_symbol = 'S'\n\n\n actual_value = get_follow(gram, non_terminal, first_set, start_symbol, non_terminal_list)\n correct_value = {'$'}\n assert_it(correct_value, actual_value, case)\n\ndef test_get_follows():\n gram = {\n \n 'S' : [['a','B','D','h']],\n 'B' : [['c', 'C']],\n 'C' : [['b','C'],['𝛆']],\n 'D' : [['E','F']],\n 'E' : [['g'],['𝛆']],\n 'F' : [['f'],['𝛆']]\n }\n\n case = f\"{ANSI_YELLOW}get follows case 1{ANSI_RESET}\"\n #find follow for A\n non_terminal_list = {'S','B','C','D','E','F'} \n first_set = get_firsts(gram, non_terminal_list)\n start_symbol = 'S'\n\n actual_value = get_follows(gram, first_set, start_symbol, non_terminal_list)\n correct_value = {'S': {'$'}, 'F': {'h'}, 'C': {'f', 'h', 'g'}, 'B': {'f', 'h', 'g'}, 'E': {'f', 'h'}, 'D': {'h'}}\n\n assert_it(correct_value, actual_value, case)\n\n\n gram = {\n 'S': [['A']],\n 'A': [['a','B',\"A'\"]],\n \"A'\": [['d',\"A'\"],['𝛆']],\n 'B': [['b']],\n 'C': [['g']]\n \n }\n\n case = f\"{ANSI_YELLOW}get follows case 2{ANSI_RESET}\"\n non_terminal_list = {'S', 'A', \"A'\", 'B','C'} \n first_set = get_firsts(gram, non_terminal_list)\n start_symbol = 'S' \n actual_value = get_follows(gram, first_set, start_symbol, non_terminal_list)\n correct_value = {'A': {'$'}, 'C': set(), \"A'\": {'$'}, 'S': {'$'}, 'B': {'$', 'd'}}\n assert_it(correct_value, actual_value, case)\n\n gram = {\n 'S': [['(','L',')'],['a']],\n 'L': [['S',\"L'\"]],\n \"L'\": [[',','S'],['𝛆']]\n }\n\n case = f\"{ANSI_YELLOW}get follows case 3{ANSI_RESET}\"\n non_terminal_list = {'S', 'L', \"L'\"} \n first_set = get_firsts(gram, non_terminal_list)\n start_symbol = 'S' \n actual_value = get_follows(gram, first_set, start_symbol, non_terminal_list)\n correct_value = {'L': {')'}, 'S': {'$', ')', ','}, \"L'\": {')'}}\n assert_it(correct_value, actual_value, case)\n\n gram = {\n 'S': [['A','a','A','b'],['B','b', 'B', 'a']],\n 'A': [['𝛆']],\n 'B': [['𝛆']]\n }\n\n case = f\"{ANSI_YELLOW}get follows case 4{ANSI_RESET}\"\n non_terminal_list = {'S', 'A', 'B'} \n first_set = get_firsts(gram, non_terminal_list)\n start_symbol = 'S' \n actual_value = get_follows(gram, first_set, start_symbol, non_terminal_list)\n correct_value = {'A': {'a', 'b'}, 'S': {'$'}, 'B': {'a','b'}}\n assert_it(correct_value, actual_value, case)\n\n \n\n \n\n\n\n\ndef test_parse_production():\n\n case = f\"{ANSI_YELLOW}parse production case 1{ANSI_RESET}\"\n production_list = {'S' : [[]], 'D': [['c' , 'd'], []]}\n actual_value = parse_production(production_list)\n correct_value = {'S' : [['𝛆']], 'D': [['c' , 'd'], ['𝛆']]}\n assert_it(correct_value, actual_value, case)\n\ndef test_separate_production():\n\n case = f\"{ANSI_YELLOW}Separate Production case 1{ANSI_RESET}\"\n gram = {\n 'A' : [['B','X','b','c'] , ['d','e','f'] , ['g','h','i'] , ['𝛆']],\n 'X' : [['q']],\n 'B' : [['s'],['𝛆']]\n }\n\n actual_value = separate_production(gram)\n correct_value = [{'A': ['B', 'X', 'b', 'c']}, {'A': ['d', 'e', 'f']}, {'A': ['g', 'h', 'i']}, {'A': ['𝛆']}, {'X': ['q']}, {'B': ['s']}, {'B': ['𝛆']}]\n assert_it(correct_value, actual_value, case)\n\ndef test_build_parsing_table():\n\n case = f\"{ANSI_YELLOW}Build Parsing Table case 1{ANSI_RESET}\"\n grammar = {\n \n 'S' : [['a','B','D','h']],\n 'B' : [['c', 'C']],\n 'C' : [['b','C'],['𝛆']],\n 'D' : [['E','F']],\n 'E' : [['g'],['𝛆']],\n 'F' : [['f'],['𝛆']]\n\n }\n\n\n non_terminal_list = {'S','A', 'B','C','D', 'E', 'F'}\n actual_value = build_parsing_table(grammar, non_terminal_list, 'S')\n correct_value = {\n 'S': {'a': {'S': [['a', 'B', 'D', 'h']]}},\n 'B': {'c': {'B': [['c', 'C']]}},\n 'C': {'b': {'C': [['b', 'C']]}, 'h': {'C': [['𝛆']]}, 'g': {'C': [['𝛆']]}, 'f': {'C': [['𝛆']]}},\n 'D': {'g': {'D': [['E', 'F']]}, 'f': {'D': [['E', 'F']]}, 'h': {'D': [['E', 'F']]}},\n 'E': {'g': {'E': [['g']]}, 'h': {'E': [['𝛆']]}, 'f': {'E': [['𝛆']]}},\n 'F': {'f': {'F': [['f']]}, 'h': {'F': [['𝛆']]}}}\n assert_it(correct_value, actual_value, case)\n\n case = f\"{ANSI_YELLOW}Build Parsing Table case 2{ANSI_RESET}\"\n grammar = {\n \n \"E\" : [[\"T\", \"E'\"]],\n \"E'\" : [['+','T', \"E'\"], [\"𝛆\"]],\n \"T\" : [[\"F\",\"T'\"]],\n \"T'\" : [['*','F', \"T'\"],[\"𝛆\"]],\n \"F\" : [['(', \"E\", ')'],['x'], ['y']]\n\n }\n\n non_terminal_list = {\"E\",\"E'\", \"T\", \"T'\", \"F\"}\n actual_value = build_parsing_table(grammar, non_terminal_list, 'E')\n correct_value = {\n 'E': {'(': {'E': [['T', \"E'\"]]}, 'x': {'E': [['T', \"E'\"]]}, 'y': {'E': [['T', \"E'\"]]}},\n \"E'\": {'+': {\"E'\": [['+', 'T', \"E'\"]]}, ')': {\"E'\": [['𝛆']]}, '$': {\"E'\": [['𝛆']]}},\n 'T': {'(': {'T': [['F', \"T'\"]]}, 'x': {'T': [['F', \"T'\"]]}, 'y': {'T': [['F', \"T'\"]]}},\n \"T'\": {'*': {\"T'\": [['*', 'F', \"T'\"]]}, '+': {\"T'\": [['𝛆']]}, '$': {\"T'\": [['𝛆']]}, ')': {\"T'\": [['𝛆']]}},\n 'F': {'(': {'F': [['(', 'E', ')']]}, 'x': {'F': [['x']]}, 'y': {'F': [['y']]}}\n } \n\n assert_it(correct_value, actual_value, case)\n\n case = f\"{ANSI_YELLOW}Build Parsing Table case 3{ANSI_RESET}\"\n grammar = {\n 'S' : [['a','B','a']],\n 'B' : [['b', 'B'],['𝛆']]\n }\n\n\n non_terminal_list = {'S','B'}\n actual_value = build_parsing_table(grammar, non_terminal_list, 'S')\n correct_value = {\n 'S': {'a': {'S': [['a','B','a']]}},\n 'B': {'a': {'B': [['𝛆']]}, 'b': {'B': [['b','B']]} }\n }\n assert_it(correct_value, actual_value, case)\n\n\n\ndef test_lookup_table():\n parsing_table = {\n 'E': {'(': {'E': [['T', \"E'\"]]}, 'x': {'E': [['T', \"E'\"]]}, 'y': {'E': [['T', \"E'\"]]}},\n \"E'\": {'+': {\"E'\": [['+', 'T', \"E'\"]]}, ')': {\"E'\": [['𝛆']]}, '$': {\"E'\": [['𝛆']]}},\n 'T': {'(': {'T': [['F', \"T'\"]]}, 'x': {'T': [['F', \"T'\"]]}, 'y': {'T': [['F', \"T'\"]]}},\n \"T'\": {'*': {\"T'\": [['*', 'F', \"T'\"]]}, '+': {\"T'\": [['𝛆']]}, '$': {\"T'\": [['𝛆']]}, ')': {\"T'\": [['𝛆']]}},\n 'F': {'(': {'F': [['(', 'E', ')']]}, 'x': {'F': [['x']]}, 'y': {'F': [['y']]}}\n }\n\n case = f\"{ANSI_YELLOW}Lookup Table case 1{ANSI_RESET}\"\n actual_value = lookup_table(parsing_table, 'E', '(')\n correct_value = ['T', \"E'\"]\n assert_it(correct_value, actual_value, case)\n\n case = f\"{ANSI_YELLOW}Lookup Table case 2{ANSI_RESET}\"\n actual_value = lookup_table(parsing_table, 'F', '*')\n correct_value = False\n assert_it(correct_value, actual_value, case)\n\n\ndef test_parse_input():\n parsing_table = {\n 'E': {'n': {'E': [['T', 'R']]}, '(': {'E': [['T', 'R']]} },\n 'R': {'+': {'R': [['+', 'E']]}, '*': {'R': [['𝛆']]}, ')': {'R': [['𝛆']]}, '$': {'R': [['𝛆']]} },\n 'T': {'n': {'T': [['F', 'S']]}, '(': {'T': [['F', 'S']]} },\n 'S': {'+': {'S': [['𝛆']]}, ')': {'S': [['𝛆']]}, '$': {'S': [['𝛆']]}, '*': {'S': [['*','T']]} },\n 'F': {'n': {'F': [['n']]}, '(': {'F': [['(','E',')']]}},\n }\n \n non_terminal_list = {\"E\",\"R\",\"T\",\"S\",\"F\"}\n terminal_list = {'n','+','*','(', ')','$'}\n input_list = list(\"n+n*n\")\n #input_list = ['n','+','n','*','n']\n case = f\"{ANSI_YELLOW}Parse Input case 1{ANSI_RESET}\"\n actual_value = parse_input(parsing_table,input_list,'E',non_terminal_list,terminal_list)\n correct_value = True\n assert_it(correct_value, actual_value, case)\n\n \n parsing_table = {\n 'S': {'a': {'S': [['a','B','a']]}},\n 'B': {'a': {'B': [['𝛆']]}, 'b': {'B': [['b','B']]} }\n }\n\n non_terminal_list = {\"S\",\"B\"}\n terminal_list = {'a','b','$'}\n input_list = list(\"abba\")\n case = f\"{ANSI_YELLOW}Parse Input case 2{ANSI_RESET}\"\n actual_value = parse_input(parsing_table,input_list,'S',non_terminal_list,terminal_list)\n correct_value = True\n assert_it(correct_value, actual_value, case)\n\n\n\n\n\n \n\n\ndef assert_it(correct_value, actual_value, case=\"\"):\n assert correct_value == actual_value,\\\n f\"{ANSI_RED}[failed] {case}\"\\\n f\" Expected ( {correct_value} )\\n got\\n ( {actual_value} ){ANSI_RESET}\"\n print_green(f\"[success] {case}\")\n\n\ndef main():\n ###################\n # Run tests\n ###################\n # Sorted by checklist order, feel free to comment/un-comment\n # any of those functions.\n try:\n test_get_first()\n print_blue('*.*.'*15)\n test_rhs()\n print_blue('*.*.'*15)\n test_parse_production()\n print_blue('*.*.'*15)\n test_find_first_sole()\n print_blue('*.*.'*15)\n test_get_follow()\n print_blue('*.*.'*15)\n test_get_follows()\n print_blue('*.*.'*15)\n test_separate_production()\n print_blue('*.*.'*15)\n test_build_parsing_table()\n print_blue('*.*.'*15)\n test_lookup_table()\n print_blue('*.*.'*15)\n test_parse_input()\n\n except AssertionError as e:\n print(\"Test case failed:\\n\", str(e))\n exit(-1)\n\n\nif __name__ == \"__main__\":\n main()\n\"\"\" \n\n{\n 'METHOD_BODY': [['STATEMENT_LIST']],\n 'STATEMENT_LIST': [['STATEMENT', ' ', 'STATEMENT_LIST_2']],\n 'STATEMENT_LIST_2': [['STATEMENT', ' ', 'STATEMENT_LIST_2'], ['𝛆']],\n 'STATEMENT': [['DECLARATION'], ['IF'], ['WHILE'], ['ASSIGNMENT']],\n 'DECLARATION': [['PRIMITIVE_TYPE', ' ', \"'id'\", ' ', \"';'\"]],\n 'PRIMITIVE_TYPE': [[\"'int'\"], [\"'float'\"]],\n 'IF': [[\"'if'\", ' ', \"'('\", ' ', 'EXPRESSION', ' ', \"')'\", ' ', \"'{'\", ' ', 'STATEMENT', ' ', \"'}'\", ' ', \"'else'\", ' ', \"'{'\", ' ', 'STATEMENT', ' ', \"'}'\"]],\n 'WHILE': [[\"'while'\", ' ', \"'('\", ' ', 'EXPRESSION', ' ', \"')'\", ' ', \"'{'\", ' ', 'STATEMENT', ' ', \"'}'\"]],\n 'ASSIGNMENT': [[\"'id'\", ' ', \"'assign'\", ' ', 'EXPRESSION', ' ', \"';'\"]],\n 'EXPRESSION': [['SIMPLE_EXPRESSION', ' ', 'EXPRESSION_2']],\n 'EXPRESSION_2': [[\"'relop'\", ' ', 'SIMPLE_EXPRESSION'], ['𝛆']],\n 'SIMPLE_EXPRESSION': [['TERM', ' ', 'SIMPLE_EXPRESSION_2'], ['SIGN', ' ', 'TERM', ' ', 'SIMPLE_EXPRESSION_2']],\n 'SIMPLE_EXPRESSION_2': [[\"'addop'\", ' ', 'TERM', ' ', 'SIMPLE_EXPRESSION_2'], ['𝛆']],\n 'TERM': [['FACTOR', ' ', 'TERM_2']],\n 'TERM_2': [[\"'mulop'\", ' ', 'FACTOR', ' ', 'TERM_2'], ['𝛆']],\n 'FACTOR': [[\"'id'\"], [\"'num'\"], [\"'('\", ' ', 'EXPRESSION', ' ', \"')'\"]],\n 'SIGN': [[\"'addop'\"]]\n \n}\n \"\"\"\n","sub_path":"Other/Test_Cases/test_cases.py","file_name":"test_cases.py","file_ext":"py","file_size_in_byte":19766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"333916102","text":"#2d array creates board shape\nboard = [[\" \", \" \", \" \"], \n [\" \", \" \", \" \"], \n [\" \", \" \", \" \"]]\n\n#Players pieces\nplayer1 = \"X\"\nplayer2 = \"O\"\n#If this number reaches 0 then all players have used up their pieces and the game is a draw\nplayer1Count = 5\n#This will determine whos turn it is\ncurrentPlayer = player1\n#If this becomes true the game ends\nwinCondition = False\n\n#prints index positions onto the board so players can see what numbers are needed to place their piece\ndef printIndexes():\n for row in range(0, len(board)):\n for column in range(0, len(board)):\n board[row][column] = str(row) + \":\" + str(column)\n\n#Prints the board out in correct shape\ndef printBoard():\n for row in board:\n print(row) \n\n#Input sanitisation for the row\ndef askForRow():\n global rowIndex\n try:\n rowIndex = int(input(\"Player using {} please choose a valid row \".format(currentPlayer)))\n except ValueError:\n print()\n print(\"Invalid character. Please use 1 or 2\")\n print()\n askForRow()\n except NameError:\n print()\n print(\"Invalid character. Please use 1 or 2\")\n print()\n askForRow()\n\n\n#Input sanitisation for the column\ndef askForColumn():\n global columnIndex\n try:\n columnIndex = int(input(\"Player using {} please choose a valid column \".format(currentPlayer)))\n except ValueError:\n print()\n print(\"Invalid character. Please use 1 or 2\")\n print()\n askForColumn()\n except NameError:\n print()\n print(\"Invalid character. Please use 1 or 2\")\n print()\n askForColumn()\n\n\n#Checks to see the position given by player to see if it is a legal move. If it is legal then it will place piece onto the board using the row and column number \ndef askForPosition():\n askForRow()\n while rowIndex != 0 and rowIndex != 1 and rowIndex != 2:\n print()\n print(\"Invalid character. Please use 1 or 2\")\n print()\n askForRow()\n\n askForColumn()\n while columnIndex != 0 and columnIndex != 1 and columnIndex != 2:\n print()\n print(\"Invalid character. Please use 1 or 2\")\n print()\n askForColumn()\n\n if board[int(rowIndex)][int(columnIndex)] == player1 or board[int(rowIndex)][int(columnIndex)] == player2:\n print()\n print(\"Position has already been filled\")\n print()\n askForPosition()\n else:\n board[int(rowIndex)][int(columnIndex)] = currentPlayer\n\n#Using a bunch of if statements to see if TicTacToe win conditons or a draw has occured\ndef winConditions():\n global winCondition\n if board[0][0] == player1 and board[0][1] == player1 and board[0][2] == player1:\n print(\"Player using X wins the game!\")\n winCondition = True\n\n elif board[0][0] == player1 and board[1][0] == player1 and board[2][0] == player1:\n print(\"Player using X wins the game!\")\n winCondition = True\n \n elif board[0][0] == player1 and board[1][1] == player1 and board[2][2] == player1:\n print(\"Player using X wins the game!\")\n winCondition = True\n \n elif board[2][0] == player1 and board[2][1] == player1 and board[2][2] == player1:\n print(\"Player using X wins the game!\")\n winCondition = True\n\n elif board[0][2] == player1 and board[1][2] == player1 and board[2][2] == player1:\n print(\"Player using X wins the game!\")\n winCondition = True\n \n elif board[0][2] == player1 and board[1][1] == player1 and board[2][0] == player1:\n print(\"Player using X wins the game!\")\n winCondition = True\n\n elif board[1][0] == player1 and board[1][1] == player1 and board[1][2] == player1:\n print(\"Player using X wins the game!\")\n winCondition = True\n\n elif board[0][1] == player1 and board[1][1] == player1 and board[2][1] == player1:\n print(\"Player using X wins the game!\")\n\n elif board[0][0] == player2 and board[0][1] == player2 and board[0][2] == player2:\n print(\"Player using O wins the game!\")\n winCondition = True\n\n elif board[0][0] == player2 and board[1][0] == player2 and board[2][0] == player2:\n print(\"Player using O wins the game!\")\n winCondition = True\n\n elif board[0][0] == player2 and board[1][1] == player2 and board[2][2] == player2:\n print(\"Player using O wins the game!\")\n winCondition = True\n\n elif board[2][0] == player2 and board[2][1] == player2 and board[2][2] == player2:\n print(\"Player using O wins the game!\")\n winCondition = True\n\n elif board[0][2] == player2 and board[1][2] == player2 and board[2][2] == player2:\n print(\"Player using O wins the game!\")\n winCondition = True\n\n elif board[0][2] == player2 and board[1][1] == player2 and board[2][0] == player2:\n print(\"Player using O wins the game!\")\n winCondition = True\n\n elif board[1][0] == player2 and board[1][1] == player2 and board[1][2] == player2:\n print(\"Player using O wins the game!\")\n winCondition = True\n\n elif board[0][1] == player2 and board[1][1] == player2 and board[2][1] == player2:\n print(\"Player using O wins the game!\")\n winCondition = True\n \n elif player1Count == 0:\n print(\"The game ended in a draw!\")\n winCondition = True\n\n else:\n pass\n\ndef main():\n global currentPlayer\n global player1Count\n\n askForPosition()\n \n if currentPlayer == player1:\n player1Count = player1Count - 1\n currentPlayer = player2\n else:\n currentPlayer = player1\n\n print()\n printBoard()\n print()\n winConditions()\n\nprintIndexes()\nprintBoard()\n\nwhile winCondition == False:\n main()\n\n","sub_path":"TTT.py","file_name":"TTT.py","file_ext":"py","file_size_in_byte":5338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"279164125","text":"from Crypto.Util.number import *\nfrom params import p, q, flag\nimport binascii\nimport sys\nimport signal\n\n\nN = p * q\ne = 65537\nd = inverse(e, (p-1)*(q-1))\n\n\ndef input(prompt=''):\n sys.stdout.write(prompt)\n sys.stdout.flush()\n return sys.stdin.buffer.readline().strip()\n\ndef menu():\n sys.stdout.write('''----------\n1) Sign\n2) Exec\n3) Exit\n''')\n try:\n sys.stdout.write('> ')\n sys.stdout.flush()\n return int(sys.stdin.readline().strip())\n except:\n return 3\n\n\n# sign command\ndef cmd_sign():\n data = input('data> ')\n if len(data) > 256:\n sys.stdout.write('Too long\\n')\n return\n\n if b'F' in data or b'1337' in data:\n sys.stdout.write('Error\\n')\n return\n\n signature = pow(bytes_to_long(data), d, N) # sign with private key\n sys.stdout.write('Signature: {}\\n'.format(binascii.hexlify(long_to_bytes(signature)).decode()))\n\n# execute command\n# check signature\ndef cmd_exec():\n data = input('data> ')\n signature = int(input('signature> '), 16)\n\n if signature < 0 or signature >= N:\n sys.stdout.write('Invalid signature\\n')\n return\n\n check = long_to_bytes(pow(signature, e, N)) # check signature with public key\n if data != check:\n sys.stdout.write('Invalid signature\\n')\n return\n\n chunks = data.split(b',')\n stack = []\n for c in chunks:\n if c == b'+':\n stack.append(stack.pop() + stack.pop())\n elif c == b'-':\n stack.append(stack.pop() - stack.pop())\n elif c == b'*':\n stack.append(stack.pop() * stack.pop())\n elif c == b'/':\n stack.append(stack.pop() / stack.pop())\n elif c == b'F':\n val = stack.pop()\n if val == 1337:\n sys.stdout.write(flag + '\\n')\n else:\n stack.append(int(c))\n\n sys.stdout.write('Answer: {}\\n'.format(int(stack.pop())))\n\n\ndef main():\n sys.stdout.write('N: {}\\n'.format(N))\n while True:\n try:\n command = menu()\n if command == 1:\n cmd_sign()\n if command == 2:\n cmd_exec()\n elif command == 3:\n break\n except:\n sys.stdout.write('Error\\n')\n break\n\n\nif __name__ == '__main__':\n signal.alarm(60)\n main()\n","sub_path":"seccon4b2020/rsacalc/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"558654186","text":"import logging\nimport os\n\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nlogging.basicConfig(level=logging.WARNING,\n filename=os.path.join(BASE_DIR,'OPcenter.log'),\n filemode='a',\n format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')\n\n\nlogger = logging","sub_path":"Aladdin/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"189379553","text":"time_range=15\n\nimport os\n#import netCDF4\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport datetime\nimport time\nimport calendar\nfrom helpers.time_ranges import *\nfrom second_order import *\nimport math\nimport lightgbm as lg\nfrom sklearn.model_selection import *\nfrom sklearn.metrics import explained_variance_score, mean_absolute_error, mean_squared_error, r2_score, mean_poisson_deviance, mean_gamma_deviance\nfrom sklearn.metrics import roc_curve, roc_auc_score, accuracy_score, precision_score, confusion_matrix, recall_score, f1_score, auc, matthews_corrcoef\n\nfrom sklearn.metrics import make_scorer\n\nfrom joblib import Parallel, delayed\nfrom sklearn.tree import *\n\nfrom helpers.fourierExtrapolation import *\n#from helpers.helpers import *\nfrom sklearn.ensemble import *\nfrom sklearn.linear_model import *\nimport json\nimport sys\nfrom sklearn.preprocessing import StandardScaler\nindex = int(sys.argv[1])\n#from spark_sklearn import GridSearchCV\n\n\nfrom joblib import Parallel, delayed\ndef get_filepaths(directory):\n file_paths = [] # List which will store all of the full filepaths.\n # Walk the tree.\n for root, directories, files in os.walk(directory):\n for filename in files:\n # Join the two strings in order to form the full filepath.\n filepath = os.path.join(root, filename)\n file_paths.append(filepath ) # Add it to the list.\n return file_paths # Self-explanatory.\ndef fit_classifier(svc_rbf, param, metric, X_train, y_train, X_test, y_test):\n clas = svc_rbf.set_params(**param)\n clas.fit(X_train, y_train)\n #return param, roc_auc_score(y_test, [a[1] for a in clas.predict_proba(X_test)] )\n return param, metric(y_test, clas.predict(X_test) )\ndef gridsearch(X, y, svc_rbf, skf, metric=roc_auc_score, param_space= {}, n_jobs=-1):\n import json\n my_hash = {}\n i=0\n #for train, test in skf.split(X, y):\n param_score = []\n #with parallel_backend('spark'):\n param_score = Parallel(n_jobs=n_jobs)(delayed(fit_classifier)(svc_rbf, param, metric, X[train], y[train], X[test], y[test] ) for param in ParameterGrid(param_space) for train, test in skf.split(X, y))\n #best_param, best_score = max(param_score, key=lambda x: x[1])\n #print('best param this generation is {} with score {}.'.format(best_param, best_score))\n for my_val in param_score:\n #key = [ (v, k) for k, v in my_val[0].iteritems ]\n key = json.dumps(my_val[0] )\n if key not in my_hash.keys() or my_hash[key]==None:\n my_hash[key ] = my_val[1]\n else:\n my_hash[key ] = my_hash[key ] + my_val[1]\n #best_param, best_score = max(param_score, key=lambda x: x[1])\n #print('Best scoring param is {} with score {}.'.format(best_param, best_score))\n #i+=1\n i = skf.get_n_splits()\n param_scores = []\n for k, v in my_hash.items():\n param_scores.append( (k, float(v/i) ) )\n best_param, best_score = max(param_scores, key=lambda x: x[1])\n print('Best scoring param is {} with score {}.'.format(best_param, best_score))\n return best_param\n\nimport glob\ndef get_test(clf, X, y, X_test):\n return [ a for a in clf.fit(X,y).predict(X_test) ]\ndef scatter_index(s, o):\n s_m = [a - np.mean(s) for a in s]\n o_m = [a-np.mean(o) for a in o]\n return math.sqrt(s_m-o_m)^2 / np.sum([a^2 for a in o])\n\nfull_file_paths = get_filepaths('./dataset/')\n\nimport dask\n#dask.config.set(scheduler='processes')\nfrom joblib import parallel_backend\nfrom distributed import Client, progress, LocalCluster\n\n\ndef my_custom_loss_func(ground_truth, predictions):\n #diff = np.abs(ground_truth - predictions).max()\n #return np.log(1 + diff)\n return np.corrcoef(ground_truth, predictions)[0,1] \n\nmy_scorer = make_scorer(my_custom_loss_func, greater_is_better=True)\ndef run():\n global time_range\n #from helpers import helpers\n #from spark_sklearn import GridSearchCV\n for day in range(13, time_range):\n days = day\n print(\"------\")\n print(\"day: \" + str(days) )\n X = []\n y = []\n trends = []\n for fil in full_file_paths:\n #X, y = [], []\n X_temp = []\n y_temp = []\n count = 0\n with open(fil, \"r\") as f:\n #print(fil)\n lines = f.readlines()[1:]\n #if len(lines)>0:\n for line in lines:\n #print(line)\n val = line.strip().split(\",\")\n x = [float(val[a]) for a in range(len(val )- 1 ) ] #last one is date\n x.extend([float(a) for a in val[-1].split(\" \")[0].split(\"/\") ])\n x.extend([ float(str(a)[0] + \".\" + str(a)[1]) for a in val[-1].split(\" \")[1].split(\":\") ])\n y_ = [float(val[a]) for a in range(len(val ) ) if a==index ]\n #if np.nan not in x and np.nan not in y:\n X_temp.append(x)\n y_temp.append(y_[0] )\n \n x_val = X_temp\n y_val = y_temp\n #x_val = X_temp[:len(X_temp) - int(days*24*2)]\n #y_val = y_temp[int(days*24*2):]\n trend = [0 for a in y_val ]\n #print(len(x_val))\n #print(len(y_val))\n #print( len(y_val))\n if len(y_val) == 0: # file with no element\n continue\n x_val, y_val, trend = difference(x_val, y_val, int(days*24*2) )\n for a in range(0, len(x_val)): #remove first val coz trend=y[i+1]-y[i]. so no last val\n #print(x_val) \n if not np.isnan(x_val[a]).any() and not np.isnan(y_val[a]):# and y_val[a] < 20:\n #if all(a>-1000 for a in x_val[a]) and all(a<1000 for a in x_val[a]): \n X.append(x_val[a])\n y.append(y_val[a])\n trends.append(trend[a])\n #X.extend(X_temp[:len(X_temp) - days*24*2])\n #y.extend([a[0] for a in y_temp[days*24*2:] ] )\n\n #timators':[ 100, ], y = X[:50000], y[:50000]\n print(len(X))\n #print(trends[:10])\n trends = np.array(trends)\n indices = [a for a in range(len(y)) ]\n print(\"mean: \" + str(np.mean(y)) + \" std: \" + str(np.std(y)) + \" range: \" + str([min(y), max(y) ]) )\n X_train, X_test, y_train, y_test, idx1, idx2 = train_test_split(X, y, indices, test_size=0.3, random_state=27) \n \n X_train, y_train, X_test, y_test = np.array(X_train), np.array(y_train), np.array(X_test), np.array(y_test)\n tr_trend = np.array(trends[idx1] )\n te_trend =np.array( trends[idx2] )\n scaler = StandardScaler()\n X_train = scaler.fit_transform(X_train)\n X_test = scaler.transform(X_test)\n print(\"train: \" + str(len(X_train)) + \", test: \" + str(len(y_test) ) ) \n grid1 = lg.LGBMRegressor(n_estimators=100, max_depth=None, random_state=100, n_jobs=-1)\n grid2 = ExtraTreesRegressor(random_state=100, max_depth=None, n_jobs=1)\n grid3 = DecisionTreeRegressor()\n grids = [('lgb', grid1),('et', grid2), ]#('dt', grid3) ]\n param2 = {'n_estimators':[ 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000],# 170, 220, 260, 350, 450, 550, 650, 700, 800 ],\n 'min_samples_split':[2, 5, 10, 15, 25, 35, 45, 55, 65],\n 'min_samples_leaf':[100, 150, 200, 250, 300, 350, 400, 450, 500],\n }\n param1 = { 'n_estimators':[ 100, 200, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200], # 130, 200, 320, 370, 470, 540, 600, 700, ],\n 'num_leaves':[ 100, 150, 200, 250, 300, 350, 400, 450, 500, 750, 1000, ], #5, 10, 15, 20, 30, 40, 50, 60, 70, 90, 120, 130],\n 'min_child_samples':[ 5, 10, 20, 30, 50, 75, 100, 150, 200,], \n }\n param3 = { 'n_estimators':[ 100, ],\n 'num_leaves':[5, 10, 15, 20, 30, 40, 50, 60, 70],\n 'min_child_samples':[ 5, 15, 25, 30, 35, 40, ],\n }\n\n c_range = np.linspace(-5,15,num=25)\n C_range = [math.pow(2,i) for i in c_range]\n #param3 = {}#{'alpha': C_range, }\n \n params = [param1, param2, param3] \n for gr in range(len(grids)):\n print(\"classifier: \" + grids[gr][0])\n grid = grids[gr][1]\n #my_f = BlockingTimeSeriesSplit(n_splits=3)\n my_f = KFold(n_splits=3)\n X, y = np.array(X_train), np.array(y_train)\n y_ori = y.copy()\n param = {}\n #with parallel_backend('spark'):\n #param = gridsearch(X, y, grid, my_f, metric=r2_score, param_space= params[gr], n_jobs=-1 )\n #param = json.loads(param)\n grid = grid.set_params(**param) \n grid = GridSearchCV( grid, params[gr], scoring='neg_mean_squared_error', n_jobs=-1, cv=3)\n #with parallel_backend('spark'):\n grid.fit(X, y )\n #print(\"best score: \"+ str(grid.best_score_) + \" best params: \"+ str(grid.best_params_) )\n if grids[gr][0] != 'linear regression':\n param = {'n_estimators': [100, 200, 300, 400, 500, 600, 700, 800, 900],}# 'n_jobs':[-1]}\n print(\"best score: \"+ str(grid.best_score_) + \" best params: \"+ str(grid.best_params_) )\n grid = grid.best_estimator_\n #if grids[gr][0] != 'et':\n # param = {'n_estimators': [100, 150, 200, 250, 300, 350, 400, 450, 500, ], }\n # grid = GridSearchCV( grid, param, scoring='r2', n_jobs=1, cv=3 ) \n #else:\n #param = {'n_estimators': 500, 'n_jobs': -1}\n grid.set_params(**param) \n #else:\n # print(\"best score: \"+ str(grid.best_score_) + \" best params: \"+ str(grid.best_params_) )\n predicted_1 = Parallel(n_jobs=-1)(delayed(get_test)(grid, X_train[train], y_train[train], X_train[test] ) for train,test in my_f.split(X_train, y_train) ) \n predicted_1 = [item for sublist in predicted_1 for item in sublist]\n \n y_selected = []\n y_ori = [y_train[a] +tr_trend[a] for a in range(len(y_train)) ]\n y_ori = np.array(y_ori)\n predict_ = []\n\n for tr, te in my_f.split(X_train, y_train):\n y_tes = y_ori[te]\n y_selected.extend(y_tes)\n predict_.extend([ predicted_1[a]+ tr_trend[a] for a in te ])\n y_ = y_selected\n predicted_1 = predict_\n \n #predicted_1 = cross_val_predict(grid, X, y, cv=5, n_jobs=-1)\n var = explained_variance_score(y_, predicted_1)\n abs_err = mean_absolute_error(y_, predicted_1)\n sq_err = mean_squared_error(y_, predicted_1)\n #r2 = r2_score(y, predicted_1)\n r2 = 1-(1-r2_score(y_, predicted_1))*((len(X_train)-1)/(len(X_train)-len(X_train[0])-1)) \n mean_y, mean_pred = np.mean(y_), np.mean(predicted_1)\n #si = scatter_index(predicted_1, y) #observation put on end\n si = math.sqrt(np.mean([ ( -y_[a] + mean_y - mean_pred + predicted_1[a])**2 for a in range(len(y_)) ]) )/np.mean(y_) \n nse = 1 - sum([(predicted_1[a] -y[a])**2 for a in range(len(y_)) ])/sum([ (y_[a]-mean_y)**2 for a in range(len(y_)) ])\n bias = np.mean([y_[a]-predicted_1[a] for a in range(len(y_)) ])\n hh = math.sqrt(sum([(y_[a]-predicted_1[a])**2 for a in range(len(y_)) ])/sum([y_[a]*predicted_1[a] for a in range(len(y_)) ]) )\n print(\"training\")\n #print(\"mean: \" + str(np.mean(y)) + \" std: \" + str(np.std(y)) + \" range: \" + str([min(y), max(y) ]) )\n print(\"sq_err: \" + str(math.sqrt(sq_err)) + \" abs_err: \" + str(abs_err) + ' var: ' +\n str(var) + ' r2: ' + str(r2) + \" si: \" + str(si) + ' nse: ' + str(nse) +\n ' cc: ' + str(np.corrcoef(y_, predicted_1)[0,1] ) + ' bias: ' + str(bias) +\n ' hh: ' + str(hh) )\n\n print(\"test\")\n param= {'n_jobs':-1}\n grid.set_params(**param)\n #with parallel_backend('spark'):\n grid.fit(X_train, y_train)\n predicted_1 = grid.predict(X_test)\n y_t = [y_test[a] +te_trend[a] for a in range(len(y_test)) ]\n predicted_1 = [predicted_1[a]+ te_trend[a] for a in range(len(predicted_1)) ]\n var = explained_variance_score(y_t, predicted_1) \n abs_err = mean_absolute_error(y_t, predicted_1)\n sq_err = mean_squared_error(y_t, predicted_1)\n #r2_score = r2_score(y_, predicted_1)\n r2 = 1-(1-r2_score(y_t, predicted_1))*((len(X_test)-1)/(len(X_test)-len(X_test[0])-1))\n mean_y, mean_pred = np.mean(y_t), np.mean(predicted_1)\n #si = math.sqrt(sq_err)/np.mean(predicted_1)\n #si = math.sqrt(sum([ ( -y_[a] + mean_y- mean_pred + predicted_1[a])**2 ]) /sum([b**2 for b in y_]) )\n si = math.sqrt(np.mean([ ( -y_t[a] + mean_y - mean_pred + predicted_1[a])**2 for a in range(len(y_t)) ] ) )/np.mean(y_t)\n nse = 1 - sum([(predicted_1[a] -y_t[a])**2 for a in range(len(y_t)) ])/sum([ (y[a]-mean_y)**2 for a in range(len(y_t)) ])\n bias = np.mean([y_t[a]-predicted_1[a] for a in range(len(y_t)) ])\n hh = math.sqrt(sum([(y_t[a]-predicted_1[a])**2 for a in range(len(y_t)) ])/sum([y_[a]*predicted_1[a] for a in range(len(y_t)) ]) )\n print(\"sq_err: \" + str(math.sqrt(sq_err)) + \" abs_err: \" + str(abs_err) + ' var: ' +\n str(var) + ' r2: ' + str(r2) + \" si: \" + str(si) + ' nse: ' + str(nse) + \n ' cc: ' + str(np.corrcoef(y_t, predicted_1)[0,1] ) + ' bias: ' + str(bias) +\n ' hh: ' + str(hh) )\n\n\nif __name__ == \"__main__\":\n #with parallel_backend('spark'):\n run()\n\n\n","sub_path":"test_classifiers.py","file_name":"test_classifiers.py","file_ext":"py","file_size_in_byte":12734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"64549651","text":"import time\r\nimport pyautogui\r\nfrom tkinter import *\r\nfrom PIL import Image, ImageTk\r\nimport os\r\nimport cv2\r\n\r\ndef window_capture():\r\n\timg = pyautogui.screenshot(region=[1642, 802,278,278]) # x,y,w,h\r\n\timg.save('lol.png')\r\n\r\ntk=Tk()\r\ncanvas=Canvas(tk,width=600,height=600,bg = 'white')\r\n\r\nwhile True:\r\n\twindow_capture()\r\n\timg1 = cv2.imread('lol.png')\r\n\tx,y = img1.shape[0:2]\r\n\timg1 = cv2.resize(img1, (int(y*2), int(x*2)))\r\n\tim1 = Image.fromarray(cv2.cvtColor(img1,cv2.COLOR_BGR2RGB))\t\r\n\timg = ImageTk.PhotoImage(image = im1)\r\n\t#img = ImageTk.PhotoImage(file = filename)\r\n\titext = canvas.create_image((300,300),image = img)\r\n\tcanvas.pack()\r\n\ttk.update()\r\n\ttk.after(1000)\r\ntk.mainloop()","sub_path":"lolScreen.py","file_name":"lolScreen.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"249456518","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time    : 2019/3/26 21:01\n# @Author  : Ryu\n# @Site    : \n# @File    : three.py\n# @Software: PyCharm\n\n# 获取输入值 str1\ndef huiwenlian(str1):\n # 拿到输入值str1 的长度是一个int型\n str2 = len(str1)\n # 把str1 的长度减一 因为长度是从一开始的,而下表则是从0开始的所以要拿到\n # 字符串最后一个值,需要减一\n str3 = str2-1\n # 总长度除以2,是为了从中间隔开,要不然他俩轮到同一个值得时候就给判断成了是回文联\n str2 =str2 // 2\n # 先定义一个不是回文联的值,给0也就是false\n shi = 0\n # for 循环进行遍历 遍历的长度是除以2的值,因为这样的话对比到中间就结束了\n for s in range(str2):\n # 这里循环遍历的是数值,所以要用下标找到对应位置的值\n # range 方法是一个从0开始的值,所以s第一次是0 ,所显示的就是第一个值\n # str3 则是总长度减一,就是最后一个值\n # 判断第一个值和最后一个值是否相等\n # 相等则给shi 变成1 也就是true\n # 当有不相等的时候则变成0 也就是flase\n if str1[s] == str1[str3]:\n shi = 1\n str3 -= 1\n else:\n shi = 0\n # 判断shi 是1 还是0 是1 就是回文联 是0 就不是回文联\n if shi:\n print(\"是\")\n else:\n print(\"不是\")\n\n\nwhile 1:\n tey = input(\"请输入\")\n huiwenlian(tey)\n","sub_path":"venv/Practice/three.py","file_name":"three.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"468358629","text":"from rest_framework import generics\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom django.shortcuts import get_object_or_404\nfrom ngeo.apps.account.models import User\nfrom ngeo.apps.field_officer.models import FieldOfficer\nfrom ngeo.apps.county_manager.models import CountyManager\nfrom ngeo.apps.regional_manager.models import RegionalManager\nfrom ngeo.apps.agents.models import Agent\nfrom ngeo.apps.projects.models import Project\nfrom .serializers import AreaSerializer, NotificationSerializer\nfrom .models import Area\nfrom notifications.models import Notification\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass AreaListCreate(generics.ListCreateAPIView):\n \"\"\"\n API view to retrieve list of agents\n \"\"\"\n\n serializer_class = AreaSerializer\n queryset = Area.objects.all()\n\n\nclass AreaDetail(generics.RetrieveUpdateDestroyAPIView):\n \"\"\"\n API view to retrieve one agent\n \"\"\"\n\n queryset = Area.objects.all()\n serializer_class = AreaSerializer\n\n\nclass MyAreaList(APIView):\n def get(self, request, pk):\n user = get_object_or_404(User, pk=pk)\n areas = []\n if user.role == User.FOO:\n areas = Area.objects.filter(field_officer__user=user)\n if user.role == User.CM:\n areas = Area.objects.filter(county_manager__user=user)\n data = AreaSerializer(areas, many=True).data\n return Response(data)\n\n\nclass MyNotificationList(APIView):\n def get(self, request):\n user = self.request.user\n notifications = user.get_notifications()\n data = NotificationSerializer(notifications, many=True).data\n\n return Response(data)\n\n\nclass NotificationDetail(APIView):\n \"\"\"\n Marks a notification as read.\n \"\"\"\n def patch(self, request, pk):\n user = self.request.user\n try:\n notification = user.notifications.get(id=int(pk))\n notification.mark_as_read()\n notification.save()\n data = NotificationSerializer(notification).data\n return Response(data)\n except Notification.DoesNotExist:\n return Response(\"Error getting notification\",\n status.HTTP_400_BAD_REQUEST)\n\n\nclass AssignArea(APIView):\n \"\"\"\n Assign area\n \"\"\"\n def patch(self, request):\n\n try:\n data = request.data\n # Find user being assigned an area\n user_id = data.get(\"user_id\")\n role = data.get('user_role')\n # Agent is not a User, skip user with this role to avoid getting errors\n # when below statement is run.\n user = not(role == 'agent') and user_id and User.objects.filter(pk=user_id).get()\n\n # Find project being assigned an area\n project_id = data.get('project_id')\n \n #\n # Assignee is County Manager\n #\n if user and user.role == User.CM:\n region = data.get('region')\n county = data.get(\"county\").pop()\n county_manager = get_object_or_404(CountyManager, user=user)\n # Check if CM already had an area assigned to them, in this\n # case a new county will be assigned to them\n if county_manager.area:\n county_manager.area.region = region\n county_manager.area.county = county\n # Area being assigned for the first time\n # Create a new area instance and assign it\n if county_manager.area is None:\n area = Area(region=region, county=county)\n county_manager.area = area\n county_manager.area.save()\n county_manager.save()\n return Response(\"Area successfuly assigned to county manager\",\n status=status.HTTP_200_OK)\n #\n # Assignee is Regional Manager\n #\n if user and user.role == User.RM:\n region = data.get(\"region\")\n region_manager = get_object_or_404(RegionalManager, user=user)\n if region_manager.area:\n region_manager.area.region = region\n if region_manager.area is None:\n area = Area(region=region)\n region_manager.area = area\n region_manager.area.save()\n region_manager.save()\n return Response(\n \"Area successfuly assigned to regional manager\",\n status=status.HTTP_200_OK)\n #\n # Assignee is Field Officer\n #\n if user and user.role == User.FOO:\n # \n region = data.get('region')\n county = data.get(\"county\")\n \n if county:\n county = county.pop()\n constituencies = [c for c in data.get(\"constituency\", []) if c]\n sub_counties = [sc for sc in data.get(\"sub_county\", []) if sc]\n divisions = [div for div in data.get(\"division\", []) if div]\n locations = [l for l in data.get(\"location\", []) if l]\n sub_locations = [sub for sub in data.get(\"sub_location\", []) if sub]\n # wards = [w for w in data.get(\"ward\", []) if w]\n # districts = [d for d in data.get(\"district\", []) if d]\n # sub_counties = [s for s in data.get(\"sub_county\", []) if s]\n\n field_officer = get_object_or_404(FieldOfficer, user=user)\n new_county_assigned = False\n \n # Updating field officer area\n if field_officer.area:\n # only update areas whose values are provided for\n if county or region:\n # For county updates, set all other sub-areas to null. The new CM will re-assign these afresh\n # depending on their county\n area = Area(region=region, county=county)\n field_officer.area = area\n new_county_assigned = True\n # Assign these areas if field officer hasnt been given a new county\n # Skip if a new county has been given to FOO.\n if not new_county_assigned:\n field_officer.area.constituency = constituencies\n field_officer.area.sub_county = sub_counties\n field_officer.area.division = divisions\n field_officer.area.location = locations\n field_officer.area.sub_location = sub_locations\n\n if new_county_assigned:\n # Clear FOOs agents and projects\n # These will be re-assigned to them by the new areas\n # CM\n field_officer.agents.clear()\n field_officer.projects.clear()\n field_officer.user.notifications.mark_all_as_deleted()\n\n # Assigning new area to field officer\n if field_officer.area is None:\n area = Area(\n region=region,\n county=county,\n constituency=constituencies,\n sub_county=sub_counties,\n division=divisions,\n location=locations,\n sub_location=sub_locations,\n )\n field_officer.area = area\n # save area\n field_officer.area.save()\n field_officer.save()\n\n return Response(\"Area successfuly assigned to field officer\",\n status=status.HTTP_200_OK)\n #\n # Assigness is an Agent\n #\n if role == \"agent\":\n # Get areas\n county = data.get(\"county\")\n if county:\n county = county.pop()\n sub_counties = [s for s in data.get(\"sub_county\", []) if s]\n locations = [l for l in data.get(\"location\", []) if l]\n sub_locations = [sub for sub in data.get(\"sub_location\", []) if sub]\n wards = [w for w in data.get(\"ward\", []) if w]\n constituencies = [c for c in data.get(\"constituency\", []) if c]\n districts = [d for d in data.get(\"district\", []) if d]\n\n agent = get_object_or_404(Agent, pk=user_id)\n\n # Upating area\n if agent.area:\n # only update areas whose values are provided for\n if county:\n # For county updates, set all other sub-areas to null. The new CM will assign these\n area = Area(county=county,)\n agent.area = area\n if sub_counties:\n agent.area.sub_county = sub_counties\n if locations:\n agent.area.location = locations\n if sub_locations:\n agent.area.sub_location = sub_locations\n if wards:\n agent.area.ward = wards\n if constituencies:\n agent.area.constituency = constituencies\n if districts:\n agent.area.district = districts\n\n # creating new area\n if agent.area is None:\n area = Area(\n sub_county=sub_counties,\n location=locations,\n sub_location=sub_locations,\n ward=wards, \n constituency=constituencies,\n district=districts)\n agent.area = area\n # save area\n agent.area.save()\n agent.save()\n\n return Response(\"Area successfuly assigned to agent\",\n status=status.HTTP_200_OK)\n \n #\n # Assign project to county\n #\n if project_id:\n county = data.get(\"county\")\n region = data.get('region')\n if county:\n county = county.pop()\n project = get_object_or_404(Project, pk=project_id)\n # Check if Project already had an area assigned to it, in this\n # case a new county will be assigned to it.\n if project.area:\n project.area.county = county\n project.area.region = region\n project.county = county\n project.region = region\n # Area being assigned for the first time - \n # Create a new area instance and assign it\n if project.area is None:\n area = Area(county=county, region=region)\n project.area = area\n project.county = county\n project.region = region\n project.area.save()\n project.save()\n \n # Notify all CMs within this county that a project has been\n # assigned to the county\n project.send_notification_to_CM(sender=request.user, county=county)\n\n return Response(\"Area successfuly assigned to project\",\n status=status.HTTP_200_OK)\n except Exception as e:\n raise\n","sub_path":"api/ngeo/apps/common/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"63714206","text":"from grafica import *\r\nfrom ecdif import *\r\n\r\ndef df(y,t):\r\n df= -sqrt(abs(y))+sin(t)\r\n return df\r\n\r\ny0=1\r\nh=0.01\r\nintervalo=[0,5]\r\nsol,t=euler(df,h,intervalo,y0)\r\n\r\ngrafvector(t,sol,\"y'+(abs(y))^1/2=sin(t)\")\r\ntitulos(\"y'+(abs(y))^1/2=sin(t)\",\"t\",\"y(t)\")\r\nmuestra()\r\n","sub_path":"Tema 3 - Ecuaciones Diferenciales Ordinarias/2013_2/Juan Pablo Herrera Musi/Examen 3/Examen Juan Pablo Herrera/Ej1/ej1e.py","file_name":"ej1e.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"161052389","text":"import tkinter as tk\nimport random as r\n\n\nc = tk.Canvas(width=600, height=600)\nc.pack()\nbody = tuple()\nstep = 5\n\n\ndef nahodna_farba():\n zlozky = []\n for i in range(3): zlozky.append(r.randrange(256))\n return f\"#{zlozky[0]:02X}{zlozky[1]:02X}{zlozky[2]:02X}\"\n\n\ndef kresli(e):\n global body\n body += (e.x, e.y)\n if len(body) >= 4:\n zmaz(None)\n c.create_line(body, fill=nahodna_farba(), width=r.randint(1, 10))\n\n\ndef zmaz(e):\n c.delete('all')\n\n\ndef zmensi(e):\n global body\n pomer = 1 / r.randint(2, 10)\n nove_body = tuple()\n for i in body:\n nove_body += i * pomer,\n c.create_line(nove_body, fill=nahodna_farba(), width=r.randint(1, 10), tags='miniatura')\n\n\ndef posun(dx, dy):\n c.move('miniatura', dx, dy)\n\n\nc.bind('', kresli)\nc.bind_all('', zmaz)\nc.bind_all('m', zmensi)\nc.bind_all('', lambda e: posun(-step, 0))\nc.bind_all('', lambda e: posun(step, 0))\nc.bind_all('', lambda e: posun(0, -step))\nc.bind_all('', lambda e: posun(0, step))\n","sub_path":"20171121/kreslenie.py","file_name":"kreslenie.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"618346944","text":"\"\"\"Ecole collection of environments.\"\"\"\n\nimport ecole.core as core\nimport ecole.observation\nimport ecole.reward\nfrom ecole.core.environment import *\n\n\nclass EnvironmentComposer:\n\n __Dynamics__ = None\n __DefaultObservationFunction__ = ecole.observation.Nothing\n __DefaultRewardFunction__ = ecole.reward.IsDone\n\n def __init__(\n self,\n observation_function=\"default\",\n reward_function=\"default\",\n scip_params=None,\n **dynamics_kwargs\n ) -> None:\n self.observation_function = self.__parse_observation_function(observation_function)\n self.reward_function = self.__parse_reward_function(reward_function)\n self.scip_params = scip_params if scip_params is not None else {}\n self.model = None\n self.dynamics = self.__Dynamics__(**dynamics_kwargs)\n self.can_transition = False\n self.random_engine = ecole.spawn_random_engine()\n\n @classmethod\n def __parse_reward_function(cls, reward_function):\n if reward_function == \"default\":\n return cls.__DefaultRewardFunction__()\n elif reward_function is None:\n return ecole.reward.Constant(0.0)\n else:\n return reward_function\n\n @classmethod\n def __parse_observation_function(cls, observation_function):\n if observation_function == \"default\":\n return cls.__DefaultObservationFunction__()\n elif observation_function is None:\n return ecole.observation.Nothing()\n elif isinstance(observation_function, tuple):\n return ecole.observation.TupleFunction(\n *(cls.__parse_observation_function(fun) for fun in observation_function)\n )\n elif isinstance(observation_function, dict):\n return ecole.observation.DictFunction(\n **{\n name: cls.__parse_observation_function(func)\n for name, func in observation_function.items()\n }\n )\n else:\n return observation_function\n\n def reset(self, instance, *dynamics_args, **dynamics_kwargs):\n \"\"\"Start a new episode.\n\n This method brings the environment to a new initial state, *i.e.* starts a new\n episode.\n The method can be called at any point in time.\n\n Parameters\n ----------\n instance:\n The combinatorial optimization problem to tackle during the newly started\n episode.\n dynamics_args:\n Extra arguments are forwarded as is to the underlying :py:class:`~ecole.typing.Dynamics`.\n dynamics_kwargs:\n Extra arguments are forwarded as is to the underlying :py:class:`~ecole.typing.Dynamics`.\n\n Returns\n -------\n observation:\n The observation of extracted from the initial state.\n Typically used to take the next action.\n action_set:\n An optional subset of accepted action in the next transition.\n For some environment, this may change at every transition.\n reward_offset:\n An offset on the initial state.\n This reward is not used for learning (as no action has yet been taken) but is used in\n evaluation for the sum of rewards when one needs to account for computations that\n happened during :py:meth:`reset` (*e.g.* computation time, number of LP iteration in\n presolving...).\n done:\n A boolean flag indicating wether the current state is terminal.\n If this is true, the episode is finished, and :meth:`step` cannot be called.\n\n \"\"\"\n self.can_transition = True\n try:\n if isinstance(instance, core.scip.Model):\n self.model = instance\n else:\n self.model = core.scip.Model.from_file(instance)\n self.model.set_params(self.scip_params)\n\n self.dynamics.set_dynamics_random_state(self.model, self.random_engine)\n\n done, action_set = self.dynamics.reset_dynamics(\n self.model, *dynamics_args, **dynamics_kwargs\n )\n self.observation_function.reset(self.model)\n self.reward_function.reset(self.model)\n\n reward_offset = self.reward_function.obtain_reward(self.model)\n observation = self.observation_function.obtain_observation(self.model)\n return observation, action_set, reward_offset, done\n except Exception as e:\n self.can_transition = False\n raise e\n\n def step(self, action, *dynamics_args, **dynamics_kwargs):\n \"\"\"Transition from one state to another.\n\n This method takes a user action to transition from the current state to the\n next.\n The method **cannot** be called if the environment has not been reset since its\n instantiation or since a terminal state.\n\n Parameters\n ----------\n action:\n The action to take in as part of the Markov Decision Process.\n If an action set has been given in the latest call (inluding calls to\n :meth:`reset`), then the action **must** be in that set.\n dynamics_args:\n Extra arguments are forwarded as is to the underlying :py:class:`~ecole.typing.Dynamics`.\n dynamics_kwargs:\n Extra arguments are forwarded as is to the underlying :py:class:`~ecole.typing.Dynamics`.\n\n Returns\n -------\n observation:\n The observation of extracted from the current state.\n Typically used to take the next action.\n action_set:\n An optional subset of accepted action in the next transition.\n For some environment, this may change at every transition.\n reward:\n A real number to use for reinforcement learning.\n done:\n A boolean flag indicating wether the current state is terminal.\n If this is true, the episode is finished, and this method cannot be called\n until :meth:`reset` has been called.\n info:\n A collection of environment specific information about the transition.\n This is not necessary for the control problem, but is useful to gain\n insights about the environment.\n\n \"\"\"\n if not self.can_transition:\n raise core.environment.Exception(\"Environment need to be reset.\")\n\n try:\n done, action_set = self.dynamics.step_dynamics(\n self.model, action, *dynamics_args, **dynamics_kwargs\n )\n reward = self.reward_function.obtain_reward(self.model, done)\n observation = self.observation_function.obtain_observation(self.model)\n return observation, action_set, reward, done, {}\n except Exception as e:\n self.can_transition = False\n raise e\n\n def seed(self, value: int) -> None:\n \"\"\"Set the random seed of the environment.\n\n The the random seed is used to seed the environment :py:class:`~ecole.RandomEngine`.\n At every call to :py:meth:`reset`, the random engine is used to create new seeds\n for the solver.\n Setting the seed once will ensure determinism for the next trajectories.\n By default, the random engine is initialized by the\n `random `_ module.\n \"\"\"\n self.random_engine.seed(value)\n\n\nclass Branching(EnvironmentComposer):\n __Dynamics__ = core.environment.BranchingDynamics\n __DefaultObservationFunction__ = ecole.observation.NodeBipartite\n\n\nclass Configuring(EnvironmentComposer):\n __Dynamics__ = core.environment.ConfiguringDynamics\n","sub_path":"python/src/ecole/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":7706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"555435631","text":"#Fibonacci Series\n\ndef fibonacci(n):\n \"\"\"\n To print the nth value in fibonacci series based on n value.\n \n Arguements:\n n: number of terms(integer)\n \n Returns:\n nth: nth value in fibonacci series\n \"\"\"\n \n if (n < 0):\n print(\"Please enter a postive integer for n.\")\n elif (n == 0):\n return 0\n elif (n == 1):\n return 1\n else:\n nth = fibonacci(n-1) + fibonacci(n-2)\n return nth\n\ndef lucas(n):\n \"\"\"\n To print the nth value in lucas series based on n value.\n \n Arguements:\n n: number of terms(integer)\n \n Returns:\n nth: nth value in lucas series\n \"\"\"\n \n if (n < 0):\n print(\"Please enter a postive integer for n.\")\n elif (n == 0):\n return 2\n elif (n == 1):\n return 1\n else:\n nth = lucas(n-1) + lucas(n-2)\n return nth\n\ndef sum_series(n, n1, n2):\n \"\"\"\n To print the fibonacci or lucas series based on parameters.\n \n Arguements:\n n: number of terms(integer)\n \n Returns:\n nth: nth value\n \"\"\"\n \n if (n < 0):\n print(\"Please enter a postive integer for n.\")\n elif (n == 0):\n return n1\n elif (n == 1):\n return n2\n else:\n nth = sum_series(n-1) + sum_series(n-2)\n return nth\n \n","sub_path":"students/Ramkumar_Rajanbabu/Lesson02/series.py","file_name":"series.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"295849860","text":"def main():\n N = int(input())\n tasks = []\n for i in range(N):\n tasks.append(list(map(int, input().split())))\n tasks.sort(key=lambda x: x[1])\n t = 0\n for task in tasks:\n t += task[0]\n if t > task[1]:\n print('No')\n return\n print('Yes')\nif __name__ == \"__main__\":\n main()\n","sub_path":"old/ABC131/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"633259149","text":"import numpy as np\nimport h5py\nimport tflearn\nimport matplotlib.pyplot as plt\nimport scipy.misc\nimport math\nimport matplotlib.cbook as cbook\nfrom scipy.misc import imshow\nimport time\nfrom os import walk\n\ndirectory = '/home/mpcr/Desktop/rodrigo/deepcontrol/'\n\ndirectory = directory + raw_input(\"Input folder path (from /deepcontrol): \")\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n pass\n try:\n import unicodedata\n unicodedata.numeric(s)\n return True\n except (TypeError, ValueError):\n pass\n return False\n\n\ndata_files = []\n\nfor (dirpath, dirnames, filenames) in walk(directory):\n data_files.extend(filenames)\n break\n\nfor i in range(len(data_files)):\n if is_number(data_files[i][9]):\n data_files[i] = int(data_files[i][7] + data_files[i][8] + data_files[i][9])\n elif is_number(data_files[i][8]):\n data_files[i] = int(data_files[i][7] + data_files[i][8])\n else:\n data_files[i] = int(data_files[i][7])\ndata_files.sort()\n\nprint(data_files)\n\nh5f = []\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nplt.ion()\n\nfig.show()\nfig.canvas.draw()\n\ndef return_keys(keystates):\n key_string = \"\"\n for i in range(len(keystates)):\n if keystates[i] == 1.:\n if i == 0:\n key_string += \"right-\"\n if i == 1:\n key_string += \"down-\"\n if i == 2:\n key_string += \"shift-\"\n if i == 3:\n key_string += \"up-\"\n if i == 4:\n key_string += \"down-\"\n if i == 5:\n key_string += \"left-\"\n return key_string\nh5f = []\nwhile True:\n x = raw_input(\"Type # of dataset to view: \")\n if x == \"all\":\n pass\n else:\n x = int(x)\n\n if not x in data_files:\n print(str(x) + \" isn't in the datasets\")\n else:\n h5f.append(h5py.File(directory + '/dataset' + str(data_files[x]) + '.h5'))\n\n image_set = np.asarray(h5f[0]['X'])\n image_set = image_set[:,:,:,None]\n image_set = np.squeeze(image_set)\n action_array_set = np.asarray(h5f[0]['Y'])\n action_array_set = action_array_set.astype(int)\n\n nframes = action_array_set.shape[0]\n for i in range(nframes-1):\n ax.clear()\n print(\"frame \" + str(i) + \" of %s\" % nframes)\n print(image_set[i,:,:].shape)\n ax.imshow(image_set[i,:,:])\n key_string = return_keys(action_array_set[i])\n print(action_array_set)\n fig.canvas.draw()\n h5f = []\n","sub_path":"helper_scripts/dataview.py","file_name":"dataview.py","file_ext":"py","file_size_in_byte":2583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"509672515","text":"'''\nDate: 31 July 2018\nPurpose of code: Main function for efficiency calculation of four wire chambers for multiple test beam data.\nUsage: This code encompasses the whole work flow. Comment sections that are not of use. \nAdd test beam data by writing its name in data_name list\nLog:\n'''\n\n#initialize\n\n#pdb.set_trace()\nimport ROOT\nimport root_numpy\nimport numpy as np \nfrom array import array\nfrom tabulate import tabulate\nimport matplotlib.pyplot as plt\nimport pdb\nimport pandas as pd\nimport seaborn as sns \nfrom scipy import stats\nimport time\nstart_time = time.time()\n\n#import data \ndata_name = [\"dwc_reco_628\", \"dwc_reco_687\", \"dwc_reco_694\", \"dwc_reco_696\", \"dwc_reco_705\", \"dwc_reco_744\", \"dwc_reco_745\",\\\n \"dwc_reco_820\", \"dwc_reco_832\", \"dwc_reco_850\"] \n#data_name = [\"dwc_reco_850\"]\n#data_name = [\"dwc_reco_471\",\"dwc_reco_932\"]\nfile = {}\ntree = {}\ndata = {}\nfor item in data_name:\n\tfile[item] = ROOT.TFile(\"./AHCalMay2018_DWCs/%s.root\"% item) #read root file \n\ttree[item] = file[item].Get(\"/dwc_ntupelizer/dwc_reco\") #read tree file\n\tdata[item] = root_numpy.tree2array(tree[item], branches=[\"reco1_x\", \"reco1_y\",\"reco2_x\", \"reco2_y\",\"reco3_x\", \"reco3_y\",\"reco4_x\", \"reco4_y\",\"z1\", \"z2\", \"z3\", \"z4\",\"beamEnergy\", \"pdgID\"])\n\n\n'''\n#1 August 2018: Have not modified visualize() for multiple data\n#23 August 2018: See plot_xy_distribution.py. It does pretty much what visualize() should do\n#Section1: visualize data \nfrom visualize import visualize\nvisualize()\n'''\n\n\n#Section2: Calculate offset and print to .txt data \nfrom offset_multiple import offset_multiple \noffs_x = {}\noffs_y = {}\noffs_x_write = [[\"pdgID\",\"DWC4\",\"DWC3\",\"DWC2\",\"DWC1\" ]]\noffs_y_write = [[\"pdgID\",\"DWC4\",\"DWC3\",\"DWC2\",\"DWC1\" ]]\ncolor = 1 #for color of the histogram\nfile1 = open(\"./Residual/mean_residual_distribution_x.txt\", 'w+') #prepare a txt file\nfile2 = open(\"./Residual/mean_residual_distribution_y.txt\", 'w+') #prepare a txt file\nfor item in data_name:\n\toffs_x[item], offs_y[item] = offset_multiple(data[item], item, color)\n\toffs_x_write.append([\"%sw/energy%sGeV\"%(data[item][\"pdgID\"][0], data[item][\"beamEnergy\"][0])] + offs_x[item])\n\toffs_y_write.append([\"%sw/energy%sGeV\"%(data[item][\"pdgID\"][0], data[item][\"beamEnergy\"][0])] + offs_y[item])\n\tcolor += 1\n\tif color == 10:\n\t\tcolor += 1\nfile1.write(tabulate(offs_x_write, headers = \"firstrow\"))\nfile2.write(tabulate(offs_y_write, headers = \"firstrow\"))\nfile1.close()\nfile2.close()\n\n\n\n#the same game as in offset2.py\nfrom offset2_multiple import offset2_multiple\noffs_x_corr = {}\noffs_y_corr = {}\ndeg = {}\noffs_x_corr_write = [[\"pdgID\",\"DWC4\",\"DWC3\",\"DWC2\",\"DWC1\" ]]\noffs_y_corr_write = [[\"pdgID\",\"DWC4\",\"DWC3\",\"DWC2\",\"DWC1\" ]]\ncolor = 1 #for color of the histogram\n#Data frame for mu and sigma\nstatistic_x = pd.DataFrame({\"Data_Name\" : [], \"Energy_GeV\" : [], \"Mean_mm\" : [], \"Sigma_mm\" : [], \"DWC\" : []})\nstatistic_y = pd.DataFrame({\"Data_Name\" : [], \"Energy_GeV\" : [], \"Mean_mm\" : [], \"Sigma_mm\" : [], \"DWC\" : []})\nstat_x_deg = pd.DataFrame({\"Data_Name\" : [], \"Energy_GeV\" : [], \"Mean_rad\" : [], \"Sigma_rad\" : []})\nstat_y_deg = pd.DataFrame({\"Data_Name\" : [], \"Energy_GeV\" : [], \"Mean_rad\" : [], \"Sigma_rad\" : []})\nfile1 = open(\"./Residual/mean_residual_distribution_x_corrected.txt\", 'w+') #prepare a txt file\nfile2 = open(\"./Residual/mean_residual_distribution_y_corrected.txt\", 'w+') #prepare a txt file\nfile3 = open(\"./Result/BeamAngle/statistic_x.txt\", 'w+')\nfile4 = open(\"./Result/BeamAngle/statistic_y.txt\", 'w+')\nfile5 = open(\"./Result/BeamAngle/statistic_x_deg.txt\", \"w+\")\nfile6 = open(\"./Result/BeamAngle/statistic_y_deg.txt\", \"w+\")\nfor item in data_name:\n\toffs_x_corr[item], offs_y_corr[item], deg_x, deg_y, dev_x, dev_y, chisqd_x, chisqd_y = offset2_multiple(data[item], item, offs_x[item], offs_y[item], color)\n\t#for chi squared (incomplete project)\n\tf, (ax1, ax2) = plt.subplots(2,1, sharex = 'col', sharey = 'row')\n\t(mu, sigma) = stats.norm.fit(chisqd_x)\n\tsns.distplot(chisqd_x, norm_hist = True, ax = ax1, label = '$\\chi^2$ x %s GeV $\\mu$ %.2f $\\sigma$ %.2f'%(data[item][\"beamEnergy\"][0],mu, sigma))\n\tax1.set_title(\"$\\chi^2$ Distribution\",fontsize= 'x-large')\n\tax1.set_ylabel(\"Distribution\",fontsize= 'x-large')\n\tax1.set_xlabel(\"$\\chi^2$ [$mm^2$]\",fontsize= 'x-large')\n\tax1.set_xlim([-3*sigma, 3*sigma])\n\tax1.legend(fontsize= 'x-large')\n\t(mu, sigma) = stats.norm.fit(chisqd_y)\n\te = pd.DataFrame({\"Data_Name\" : [item], \"Energy_GeV\" : [data[item][\"beamEnergy\"][0]], \"Mean_rad\" : [mu], \"Sigma_rad\" : [sigma]})\n\tsns.distplot(chisqd_y, norm_hist = True, ax = ax2, label = '$\\chi^2$ y %s GeV $\\mu$ %.2f $\\sigma$ %.2f'%(data[item][\"beamEnergy\"][0],mu, sigma))\n\tax2.set_ylabel(\"Distribution\",fontsize= 'x-large')\n\tax2.set_xlabel(\"$\\chi^2$ [$mm^2$]\",fontsize= 'x-large')\n\tax2.set_xlim([-3*sigma, 3*sigma])\n\tax2.legend(fontsize= 'x-large')\n\tplt.savefig('./Result/BeamAngle/%s_chisqd.pdf'%item )\n\t#For beam angle\n\tdeg[item] = pd.DataFrame({'x' : deg_x, 'y' : deg_y})\n\toffs_x_corr_write.append([\"%sw/energy%sGeV\"%(data[item][\"pdgID\"][0], data[item][\"beamEnergy\"][0])] + offs_x_corr[item])\n\toffs_y_corr_write.append([\"%sw/energy%sGeV\"%(data[item][\"pdgID\"][0], data[item][\"beamEnergy\"][0])] + offs_y_corr[item])\n\tcolor += 1\n\tif color == 10:\n\t\tcolor += 1\n\tf, (ax1, ax2) = plt.subplots(2,1, sharex = 'col', sharey = 'row')\n\t#Getting fit from scipy as they are equivalent: https://stackoverflow.com/questions/31413934/howto-get-fit-parameters-from-seaborn-distplot-fit\n\t(mu, sigma) = stats.norm.fit(deg[item]['x'])\n\td = pd.DataFrame({\"Data_Name\" : [item], \"Energy_GeV\" : [data[item][\"beamEnergy\"][0]], \"Mean_rad\" : [mu], \"Sigma_rad\" : [sigma]})\n\tstat_x_deg = stat_x_deg.append(d)\n\tsns.distplot(deg[item]['x'], norm_hist = True, ax = ax1, label = 'x %s GeV'%data[item][\"beamEnergy\"][0])\n\tax1.set_title(\"Fit Line Angle Distribution\")\n\tax1.set_ylabel(\"Distribution\")\n\tax1.set_xlabel(\"Line Angle [rad]\")\n\tax1.legend()\n\t(mu, sigma) = stats.norm.fit(deg[item]['y'])\n\te = pd.DataFrame({\"Data_Name\" : [item], \"Energy_GeV\" : [data[item][\"beamEnergy\"][0]], \"Mean_rad\" : [mu], \"Sigma_rad\" : [sigma]})\n\tstat_y_deg = stat_y_deg.append(e)\n\tsns.distplot(deg[item]['y'], norm_hist = True, ax = ax2, label = 'y %s GeV'%data[item][\"beamEnergy\"][0])\n\tax2.set_ylabel(\"Distribution\")\n\tax2.set_xlabel(\"Line Angle [rad]\")\n\tax2.legend()\n\t#For deviation\n\tplt.savefig('./Result/BeamAngle/%s_angle.pdf'%item )\n\t#Getting fit from scipy as they are equivalent: https://stackoverflow.com/questions/31413934/howto-get-fit-parameters-from-seaborn-distplot-fit\n\tf, (ax1, ax2) = plt.subplots(2,1, sharex = 'col', sharey = 'row')\n\t#loop over DWC1,2,3,4\n\tfor a in [1,2,3]:\n\t\t(mu, sigma) = stats.norm.fit(dev_x['%s'%a])\n\t\tb = pd.DataFrame({\"Data_Name\" : [item], \"Energy_GeV\" : [data[item][\"beamEnergy\"][0]], \"Mean_mm\" : [mu], \"Sigma_mm\" : [sigma], \"DWC\" : [int(a)]})\n\t\tstatistic_x = statistic_x.append(b)\n\t\tsns.distplot(dev_x['%s'%a], norm_hist = True, ax = ax1, label = 'x $\\mu$=%.2fmm $\\sigma$=%.2fmm \\n z=%scm'%(mu, sigma, data[item][\"z%s\"%a][0]))\n\t\t(mu, sigma) = stats.norm.fit(dev_y['%s'%a])\n\t\tc = pd.DataFrame({\"Data_Name\" : [item], \"Energy_GeV\" : [data[item][\"beamEnergy\"][0]], \"Mean_mm\" : [mu], \"Sigma_mm\" : [sigma], \"DWC\" : [int(a)]})\n\t\tstatistic_y = statistic_y.append(c)\n\t\tsns.distplot(dev_y['%s'%a], norm_hist = True, ax = ax2, label = 'y $\\mu$=%.2fmm $\\sigma$=%.2fmm \\n z=%scm'%(mu, sigma, data[item][\"z%s\"%a][0]))\n\tax1.set_title(\"Deviation Distribution at Several z Values %s %s GeV\"%(item, data[item][\"beamEnergy\"][0]))\n\tax1.set_ylabel(\"Distribution\")\n\tax1.set_xlabel(\"Deviation w.r.t. DWC4 at z = %s cm [mm]\"%data[item][\"z4\"][0])\n\tax1.legend()\n\tax2.set_ylabel(\"Distribution\")\n\tax2.set_xlabel(\"Deviation w.r.t. DWC4 at z = %s cm [mm]\"%data[item][\"z4\"][0])\n\tax2.legend()\n\tplt.savefig('./Result/BeamAngle/%s_deviationZ.pdf'%item)\n\t\n\nstatistic_x = statistic_x.sort_values(by = ['Energy_GeV'])\nstatistic_x = statistic_x.reset_index(drop = True)\n# statistic_x = statistic_x.to_string(index = False)\nstatistic_y = statistic_y.sort_values(by = ['Energy_GeV'])\nstatistic_y = statistic_y.reset_index(drop = True)\n# statistic_y = statistic_y.to_string(index = False)\nstat_x_deg = stat_x_deg.sort_values(by = ['Energy_GeV'])\nstat_x_deg = stat_x_deg.reset_index(drop = True)\n# stat_x_deg = stat_x_deg.to_string(index = False)\nstat_y_deg = stat_y_deg.sort_values(by = ['Energy_GeV'])\nstat_y_deg = stat_y_deg.reset_index(drop = True)\n# stat_y_deg = stat_y_deg.to_string(index = False)\nfile1.write(tabulate(offs_x_corr_write, headers = \"firstrow\"))\nfile2.write(tabulate(offs_y_corr_write, headers = \"firstrow\"))\nfile3.write(statistic_x.to_string(index = False))\nfile4.write(statistic_y.to_string(index = False))\nfile5.write(stat_x_deg.to_string(index = False))\nfile6.write(stat_y_deg.to_string(index = False))\nfile1.close()\nfile2.close()\nfile3.close()\nfile4.close()\nfile5.close()\nfile6.close()\n\n\n\n\n'''\n\n#Section3: Create offset distribution plot \nfrom plot_histo_multiple import plot_histo_multiple\nplot_histo_multiple(data_name,data)\nfrom plot_histo_corr_multiple import plot_histo_corr_multiple\nplot_histo_corr_multiple(data_name,data)\n\n\n\n\n#Section 4: Efficiency Measurement \n\n#first import offset \nb = np.genfromtxt('./Residual/mean_residual_distribution_x.txt',dtype = 'float', skip_header = 2, usecols = (1,2,3,4))\nc = np.genfromtxt('./Residual/mean_residual_distribution_y.txt',dtype = 'float', skip_header = 2, usecols = (1,2,3,4))\n#convert offsets into a dictionary with data_name as key to make the offset easier accessible\noffset_x = {}\noffset_y = {}\n#get beam angle\ndeg1 = {}\ndeg2 = {}\ndeg3 = {}\ndeg4 = {}\nindex = 0\nfor item in data_name:\n\toffset_x[item] = b[index]\n\toffset_y[item] = c[index]\n\tindex += 1\n#tolerance = np.linspace(0,15,num = 160) #spatial accuracy in mm\ntolerance = [1.]\nfrom efficiency_function_multiple import efficiency_function_multiple\nfor item in data_name:\n\ttolerance_treated = []\n\tefficiency = {1 : [], 2 : [], 3 : [], 4 : []} #define efficiency dictionary, redefine for every item in data_name\n\tnumerator = {1 : [], 2 : [], 3 : [], 4 : []} #define numerator dictionary, redefine for every item in data_name\n\tdenominator = {1 : [], 2 : [], 3 : [], 4 : []} #define denominator dictionary, redefine for every item in data_name\n\n\tfor tl in tolerance:\n\t\ta = efficiency_function_multiple(data[item], float(tl), offset_x[item], offset_y[item])\n\t\ttolerance_treated.append(tl)\n\t\tefficiency[1].append(a[0])\n\t\tefficiency[2].append(a[1])\n\t\tefficiency[3].append(a[2])\n\t\tefficiency[4].append(a[3])\t\n\t\tnumerator[1].append(a[4])\n\t\tnumerator[2].append(a[5])\n\t\tnumerator[3].append(a[6])\n\t\tnumerator[4].append(a[7])\n\t\tdenominator[1].append(a[8])\n\t\tdenominator[2].append(a[9])\n\t\tdenominator[3].append(a[10])\n\t\tdenominator[4].append(a[11])\n\t\t#put file writing in the loop to save every iteration\n\t\tfile1 = open(\"./Result/tolerance_vs_efficiency%s.txt\"%item, 'w+') #prepare a txt file\n\t\theader = [\"Tolerance [mm]\", \"Efficiency1 [%]\",\"Efficiency DWC2 [%]\", \"Efficiency DWC3 [%]\", \"Efficiency DWC4 [%]\", \\\n\t\t\"Numerator1\", \"Numerator2\", \"Numerator3\", \"Numerator4\", \"Denominator1\", \"Denominator2\", \"Denominator3\", \"Denominator4\" ]\n\t\twrt = [] # prepare a list to write into file\n\t\tfor index in range(0,len(tolerance_treated)):\n\t\t\twrt.append([tolerance_treated[index],efficiency[1][index],efficiency[2][index],efficiency[3][index],efficiency[4][index],\\\n\t\t\t\tnumerator[1][index],numerator[2][index],numerator[3][index],numerator[4][index],\\\n\t\t\t\tdenominator[1][index],denominator[2][index],denominator[3][index],denominator[4][index]])\n\n\t\tfile1.write(tabulate(wrt, headers = header))\n\t\tfile1.close()\n\n'''\n\t\n\n'''\n#Section 5: Plot result\nfrom plot_multiple import plot_multiple\nplot_multiple(data_name ,data)\n'''\n\n\n\n\n\n\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n\n\n\n","sub_path":"Efficiency_Multiple/efficiency_multiple.py","file_name":"efficiency_multiple.py","file_ext":"py","file_size_in_byte":11679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"601148294","text":"import sys\nimport random\n\n\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport numpy as np\n# https://networkx.github.io/documentation/stable/reference/drawing.html\n# https://networkx.github.io/documentation/stable/reference/functions.html\n# https://matplotlib.org/examples/color/named_colors.html\n# H = nx.Graph(G) # convert G to undirected graph\n# PyGraphviz\n# https://github.com/CristiFati/Prebuilt-Binaries/tree/master/Windows/PyGraphviz\n# Graphviz 64 bit windows\n# https://github.com/mahkoCosmo/GraphViz_x64/\n\ndef graph_states(G, graphNumber, pos):\n\tfig=plt.figure(graphNumber)\n\tfig.suptitle(\"Eurelian Parte {}\".format(graphNumber), fontsize=16)\n\n\t# get atributes\n\tedge_labels = nx.get_edge_attributes(G,'weight')\n\tnode_colors=nx.get_node_attributes(G,'color')\n\tedge_colors=nx.get_edge_attributes(G,'color')\n\n\tnx.draw(G, pos, edge_color=edge_colors.values(), node_color=node_colors.values(),\n\t\twith_labels=True, font_weight='bold'\n\t)\n\tnx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels, with_labels=True, \n\t\tfont_weight='bold'\n\t)\n\nif __name__ == \"__main__\":\n\tG = nx.Graph()\n\n\tcolors=[\"silver\", \"red\", \"orange\", \"lime\", \"y\", \"c\"]\n\t# graphNodes=[\n\t# \t[0, {\"color\":\"silver\"}], \n\t# \t[1, {'color':\"silver\"}],\n\t# \t[2, {'color':\"silver\"}],\n\t# \t[3, {\"color\":\"silver\"}], \n\t# \t[4, {'color':\"silver\"}],\n\t# \t[5, {\"color\":\"silver\"}], \n\t# \t[6, {\"color\":\"silver\"}], \n\t# \t[7, {\"color\":\"silver\"}], \n\t# \t[8, {\"color\":\"silver\"}], \n\t# \t[\"j\", {\"color\":\"silver\"}]\n\t# ]\n\tgraphNodes=[\n\t\t[\"v1\", {\"color\":\"silver\", \"pos\":np.array([1, 3])}], \n\t\t[\"v2\", {'color':\"silver\", \"pos\":np.array([0, 2])}],\n\t\t[\"v3\", {'color':\"silver\", \"pos\":np.array([1, 2])}],\n\t\t[\"v4\", {\"color\":\"silver\", \"pos\":np.array([2, 2])}], \n\t\t[\"v5\", {\"color\":\"silver\", \"pos\":np.array([0, 1])}], \n\t\t[\"v6\", {\"color\":\"silver\", \"pos\":np.array([1, 1])}],\n\t\t[\"v7\", {\"color\":\"silver\", \"pos\":np.array([2, 1])}],\n\t\t[\"v8\", {\"color\":\"silver\", \"pos\":np.array([1, 0])}]\n\t]\n\n\t# graphEdges=[\n\t# \t[0, 1, {\"color\":\"silver\"}], \n\t# \t[1, 2, {\"color\":\"silver\"}],\n\t# \t[2, 3, {\"color\":\"silver\"}],\n\t# \t[2, 8, {\"color\":\"silver\"}],\n\t# \t[2, 6, {\"color\":\"silver\"}],\n\t# \t[3, 0, {\"color\":\"silver\"}],\n\t# \t[4, 5, {\"color\":\"silver\"}],\n\t# \t[5, 7, {\"color\":\"silver\"}],\n\t# \t[6, 7, {\"color\":\"silver\"}],\n\t# \t[6, 4, {\"color\":\"silver\"}],\n\t# \t[8, \"j\", {\"color\":\"silver\"}]\n\t# ]\n\n\tgraphEdges=[\n\t\t[\"v1\", \"v2\", {\"color\":\"black\"}], \n\t\t[\"v1\", \"v3\", {\"color\":\"black\"}],\n\t\t[\"v1\", \"v4\", {\"color\":\"black\"}],\n\t\t[\"v2\", \"v3\", {\"color\":\"black\"}],\n\t\t[\"v2\", \"v5\", {\"color\":\"black\"}],\n\t\t[\"v3\", \"v4\", {\"color\":\"black\"}],\n\t\t[\"v3\", \"v6\", {\"color\":\"black\"}],\n\t\t[\"v4\", \"v7\", {\"color\":\"black\"}],\n\t\t[\"v5\", \"v6\", {\"color\":\"black\"}],\n\t\t[\"v5\", \"v8\", {\"color\":\"black\"}],\n\t\t[\"v6\", \"v7\", {\"color\":\"black\"}],\n\t\t[\"v6\", \"v8\", {\"color\":\"black\"}],\n\t\t[\"v7\", \"v8\", {\"color\":\"black\"}]\n\t]\t\n\t\n\n\tG.add_nodes_from(graphNodes)\n\n\t# graphEdges=[]\n\t# for nodeI in G.nodes():\n\t# \tfor nodeJ in G.nodes():\n\t# \t\tprint(nodeI, nodeJ)\n\t# \t\tif(nodeI!=nodeJ):\n\t# \t\t\tgraphEdges.append([nodeI, nodeJ, {\"color\":\"silver\"}])\n\tG.add_edges_from(graphEdges)\t\t\n\n\n\t\n\n\t#G.add_weighted_edges_from(graphEdges)\n\t# nx.set_node_attributes(G, colors[0], 'color')\n\n\n\t\n\n\t# pos=nx.spring_layout(G)\n\t\t\n\t# pos[1]=np.array([0.1, 0.1])\n\t#print(pos)\n\n\t#print(G.edges[1,2])\n\t#print(G.edges[[1,2]])\n\t# print(G.edges[(0, 1)][\"weight\"])\n\t# data=[0, 1]\n\t\n\t# print(G.edges([1,2]))\n\n\t#G=nx.from_prufer_sequence([3,1,3,2,4,5])\n\t#G=nx.random_tree(10)\n\t# pos=nx.spring_layout(G)\n\t#pos=nx.nx_agraph.pygraphviz_layout(G, prog='dot')\n\t#print(nx.is_tree(G))\n\t#print(nx.to_prufer_sequence(G))\n\n\t# G=nx.complete_graph(5)\n\n\t# \n\n\t# nx.set_node_attributes(G, \"silver\", 'color')\n\t# nx.set_edge_attributes(G, \"black\", \"color\")\n\n\t# https://networkx.github.io/documentation/stable/reference/algorithms/tournament.html\n\n\t# https://networkx.github.io/documentation/stable/reference/algorithms/euler.html\n\t#print(nx.is_eulerian(G))\n\t#print(list(nx.eulerian_circuit(G)))\n\t# number=0\n\t# #for edgeI in nx.eulerian_circuit(G):\n\t# \tG.edges[edgeI][\"color\"]=\"red\"\n\t# \tgraph_states(G,number, pos)\n\t# \tnumber+=1\n\n\t###\n\t#G.edges[edgeI][\"color\"]=\"red\"\n\n\t#G=nx.algorithms.tournament.random_tournament(6)\n\t#pos=nx.spring_layout(G)\n\n\t\n\t\n\t\n\n\tpos={}\n\t# Replace pos from node attribute\n\tfor nodeI, posI in G.nodes.data(\"pos\"):\n\t\tpos[nodeI]=posI\n\n\tG=nx.icosahedral_graph()\n\n\tpos=nx.spring_layout(G)\n\n\n\tchromaticDict=nx.coloring.greedy_color(G, strategy='largest_first')\n\n\tnx.set_node_attributes(G, \"silver\", 'color')\n\tnx.set_edge_attributes(G, \"black\", \"color\")\n\n\tfor nodeI in chromaticDict:\n\t\t# print(colors[chromaticDict.get(nodeI)])\n\t\tG.nodes[nodeI][\"color\"]=colors[chromaticDict.get(nodeI)]\n\n\t\n\n\tedge_labels = nx.get_edge_attributes(G,'weight')\n\tnode_colors=nx.get_node_attributes(G,'color')\n\tedge_colors=nx.get_edge_attributes(G,'color')\n\n\t# print(nx.neighbors(G, 6))\n\t\n\tnx.draw(G, pos, edge_color=edge_colors.values(), node_color=node_colors.values(),with_labels=True, font_weight='bold')\n\n\t#pos[0]=pos[0]+0.2\n\tnx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels, with_labels=True, font_weight='bold')\n\tplt.show()","sub_path":"classes/clase 12-07-2019/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"526900335","text":"#!/usr/bin/env python\n\n\"\"\" CETech engine build script\n\"\"\"\n\n###########\n# IMPORTS #\n########################################################################################################################\n\nimport os\nimport subprocess\nimport argparse\nimport platform\nimport shutil\nimport multiprocessing\nimport sys\n\n###########\n# GLOBALS #\n########################################################################################################################\n\nCPU_COUNT = multiprocessing.cpu_count()\nCPU_COUNT_STR = str(CPU_COUNT)\n\nOS_NAME = platform.system().lower()\nOS_ARCH = 64 if sys.maxsize > 2 ** 32 else 32\n\nROOT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)))\nBUILD_DIR = os.path.abspath(os.path.join(ROOT_DIR, '.build'))\nEXTERNAL_BUILD_DIR = os.path.abspath(os.path.join(ROOT_DIR, 'external', '.build'))\n\nGENIE = os.path.join(EXTERNAL_BUILD_DIR,\n \"%s%s\" % (OS_NAME, OS_ARCH),\n 'bin',\n 'genie')\n\nLINUX_GENIE = [GENIE, '--gcc=linux-clang', 'gmake']\nLINUX_BUILD = ['make', '-j', CPU_COUNT_STR, '-R', '-C', '.build/projects/gmake-linux-clang']\n\nDARWIN_GENIE = [GENIE, '--gcc=osx', 'gmake']\nDARWIN_BUILD = ['make', '-j', CPU_COUNT_STR, '-R', '-C', '.build/projects/gmake-osx']\n\nDEFAULT_BUILD = \"%s%s\" % (OS_NAME, OS_ARCH)\n\n##########\n# CONFIG #\n########################################################################################################################\n\n# Command line actions.\nACTIONS = {\n '',\n 'clean'\n}\n\n# Build config.\nCONFIG = {\n 'develop',\n 'debug',\n 'release'\n}\n\n# Build platform.\nPLATFORMS = {\n 'linux64'\n 'darwin64'\n}\n\n# Platform specific genie command.\nPLATFORMS_GENIE = {\n 'linux64': LINUX_GENIE,\n 'linux32': LINUX_GENIE,\n 'darwin64': DARWIN_GENIE,\n}\n\n# PLatform specific build command.\nPLATFORMS_BUILD = {\n 'linux64': {\n 'develop': LINUX_BUILD + ['config=develop64'],\n 'debug': LINUX_BUILD + ['config=debug64'],\n 'release': LINUX_BUILD + ['config=release64'],\n },\n\n 'linux32': {\n 'develop': LINUX_BUILD + ['config=develop32'],\n 'debug': LINUX_BUILD + ['config=debug32'],\n 'release': LINUX_BUILD + ['config=release32'],\n },\n\n 'darwin64': {\n 'develop': DARWIN_BUILD + ['config=develop64'],\n 'debug': DARWIN_BUILD + ['config=debug64'],\n 'release': DARWIN_BUILD + ['config=release64'],\n },\n}\n\n########\n# ARGS #\n########################################################################################################################\n\nARGS_PARSER = argparse.ArgumentParser(description='CETech build script')\n\nARGS_PARSER.add_argument(\n \"action\",\n help=\"Build action\",\n nargs='?', type=str, default='', choices=ACTIONS)\n\nARGS_PARSER.add_argument(\n \"--generate\",\n help='Only generate project files to build dir',\n action='store_true')\n\nARGS_PARSER.add_argument(\n \"--config\",\n help='Build configuration',\n default='debug', choices=CONFIG)\n\nARGS_PARSER.add_argument(\n \"--platform\",\n default=DEFAULT_BUILD, choices=PLATFORMS, help='Target platform')\n\nARGS_PARSER.add_argument(\n \"--cc\",\n help='CC')\n\nARGS_PARSER.add_argument(\n \"--cxx\",\n help='CXX')\n\n\n###########\n# PROGRAM #\n########################################################################################################################\n\ndef run_genie(platform, cc=None, cxx=None):\n \"\"\"Run platform specific genie command.\n \"\"\"\n\n cmd = PLATFORMS_GENIE[platform]\n\n subprocess.check_call(cmd)\n\n\ndef make(config, platform, only_genie=False, cc=None, cxx=None):\n \"\"\"Make build\n\n :param config: Build configuration.\n :param platform: Build platform.\n :param only_genie: Do not run build, only create projects files.\n \"\"\"\n print('Runing genie.')\n run_genie(platform=platform)\n\n print('Building.')\n if not only_genie:\n cmd = PLATFORMS_BUILD[platform][config]\n\n if cc is not None:\n cmd.append('CC=%s' % cc)\n\n if cxx is not None:\n cmd.append('CXX=%s' % cxx)\n\n subprocess.check_call(cmd)\n\n\ndef clean():\n \"\"\" Remove build dir.\n \"\"\"\n\n print('Cleaning...')\n shutil.rmtree(BUILD_DIR, ignore_errors=True)\n\n\ndef main(args=None):\n \"\"\" ENTRY POINT\n \"\"\"\n\n args = ARGS_PARSER.parse_args(args=args)\n\n action = args.action\n if action == '':\n make(args.config, args.platform, args.generate, args.cc, args.cxx)\n\n elif action == 'clean':\n clean()\n\n\n########\n# MAIN #\n########################################################################################################################\n\nif __name__ == '__main__':\n main()\n\n########################################################################################################################\n","sub_path":"make.py","file_name":"make.py","file_ext":"py","file_size_in_byte":4824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"624216056","text":"#!/usr/bin/env python\nimport web\nimport os\nimport json\n\nfrom traceback import print_exc\n\n\nfrom api import Heatmap, Echo\n\n\nclass FrontController:\n CONTROLLERS = {\n 'heatmap' : Heatmap,\n 'echo': Echo\n }\n \n def GET(self, path):\n if not path in FrontController.CONTROLLERS:\n raise web.notfound('HTTP 404 - Unknown Request')\n \n controller = FrontController.CONTROLLERS[path]\n handler = controller()\n query = dict(web.input())\n \n try:\n res = handler.GET(**query)\n except TypeError:\n msg = 'HTTP 400 - Bad Request'\n if getattr(controller, '__doc__', False):\n msg += '\\n' + controller.__doc__\n \n print_exc()\n raise self.badrequest(msg)\n \n if isinstance(res, dict):\n web.header('Content-Type', 'application/json')\n res = json.dumps(res)\n \n \n return res\n \n \n def badrequest(self, message):\n return web.HTTPError('400 Bad Request', {'Content-Type': 'text/plain'}, message)\n\n\n\n\nurls = (\n '/(.*)', 'FrontController'\n)\n\napp = web.application(urls, globals())\nif __name__ == '__main__':\n with open(os.path.expanduser('~/environment.json'), 'w') as f:\n json.dump(dict(DOTCLOUD_DATA_MONGODB_URL = 'localhost'), f)\n \n app.run()\nelse:\n with open(os.path.expanduser('~/environment.json')) as f:\n env = json.load(f)\n web.DOTCLOUD_DATA_MONGODB_URL = env['DOTCLOUD_DATA_MONGODB_URL']\n web.config.debug = False\n application = app.wsgifunc()\n","sub_path":"wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"644346207","text":"# First make imports of all modules to be used.\nfrom django.shortcuts import render, get_object_or_404 # for unavailable urls, 404 will be displayed\nfrom .models import Post, Comment # for posts\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.views.generic import ListView\nfrom .forms import EmailPostForm, CommentForm, SearchForm, ContactForm # this two lines are used to send mails and search\nfrom django.core.mail import send_mail\nfrom taggit.models import Tag\nfrom django.db.models import Count\nfrom django.contrib.postgres.search import SearchVector, SearchQuery, SearchRank\nfrom django.contrib.postgres.search import TrigramSimilarity # To implement TrigramSimilarity search\n\n# Create your views here.\n\n\n# all posts will be viewed as a list\ndef post_list(request, tag_slug=None):\n\tobject_list = Post.published.all()\n\ttag = None\n\n\tif tag_slug:\n\t\ttag = get_object_or_404(Tag, slug=tag_slug)\n\t\tobject_list = object_list.filter(tags__in=[tag])\n\tpaginator = Paginator(object_list, 4) # 3 posts in each page\n\tpage = request.GET.get('page')\n\ttry:\n\t\tposts = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\t# If page is not an integer, deliver the first page\n\t\tposts = paginator.page(1)\n\texcept EmptyPage:\n\t\t# If page is out of range, deliver last of results\n\t\tposts = paginator.page(paginator.num_pages)\n\t\n\treturn render(request, 'blog/post/list.html', {'page': page, 'posts': posts, 'tag': tag})\n\n\n# the details for each post will be gotten from this function\ndef post_detail(request, year, month, day, post):\n\tpost = get_object_or_404(Post, slug=post, status='published', publish__year=year, publish__month=month,\n\t\t\t\t\t\t\t publish__day=day)\n\n\t# List of active comments for the post\n\tcomments = post.comments.filter(active=True)\n\n\tnew_comment = None\n\n\tif request.method == 'POST':\n\t\t# A comment was posted\n\t\tcomment_form = CommentForm(data=request.POST)\n\t\tif comment_form.is_valid():\n\t\t\t# Create comment object but don't save to database yet\n\t\t\tnew_comment = comment_form.save(commit=False)\n\t\t\t# Assign the current post to the comment\n\t\t\tnew_comment.post = post\n\t\t\t# save the comment to the database\n\t\t\tnew_comment.save()\n\telse:\n\t\tcomment_form = CommentForm()\n\n\t# List of similar posts\n\tpost_tag_ids = post.tags.values_list('id', flat=True)\n\tsimilar_posts = Post.published.filter(tags__in=post_tag_ids).exclude(id=post.id)\n\tsimilar_posts = similar_posts.annotate(same_tag=Count('tags')).order_by('-same_tag', '-publish')[:4]\n\n\treturn render(request, 'blog/post/detail.html',\t{'post': post, 'comments': comments,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t'new_comment': new_comment,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t'comment_form': comment_form,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t'similar_posts': similar_posts})\n\n\n# we use pagination to shorten the numbers oa post for each page.\nclass PostListView(ListView):\n\tqueryset = Post.published.all()\n\tcontext_object_name = 'posts'\n\tpaginate_by = 3\n\ttemplate_name = 'blog/post/list.html'\n\n\n# to share posts\ndef post_share(request, post_id):\n\t# Retrieve post by id\n\tpost = get_object_or_404(Post, id=post_id, status='published')\n\tsent = False\n\n\tif request.method == 'POST':\n\t\t# Form was submitted\n\t\tform = EmailPostForm(request.POST)\n\t\tif form.is_valid():\n\t\t\t# form fields passed validation\n\t\t\tcd = form.cleaned_data\n\t\t\t# ... send email\n\t\t\tpost_url = request.build_absolute_uri(\n\t\t\t\tpost.get_absolute_url())\n\t\t\tsubject = f\"{cd['name']} recommends you read {post.title}\"\n\t\t\tmessage = f\"Read {post.title} at {post_url}\\n\\n{cd['name']}\\'s comments: {cd['comments']}\"\n\t\t\tsend_mail(subject, message, 'onyedikachi1997@gmail.com', [cd['to']])\n\t\t\tsent = True\n\n\telse:\n\t\tform = EmailPostForm()\n\treturn render(request, 'blog/post/share.html', {'post': post, 'form': form, 'sent': sent})\n\n\n# To search posts\ndef post_search(request):\n\tform = SearchForm()\n\tquery = None\n\tresults = []\n\tif 'query' in request.GET:\n\t\tform = SearchForm(request.GET)\n\t\tif form.is_valid():\n\t\t\tquery = form.cleaned_data['query']\n\t\t\tsearch_vector = SearchVector('title', weight='A') + SearchVector('body', weight='B')\n\t\t\tsearch_query =SearchQuery(query)\n\t\t\tresults = Post.published.annotate(\n\t\t\t\t\t\t\t\t\t\t\tsimilarity = TrigramSimilarity('title', query),\n\t\t\t\t\t\t\t\t\t\t\t# search=search_vector,\n\t\t\t\t\t\t\t\t\t\t\t# rank=SearchRank(search_vector, search_query)\n\t\t\t\t\t\t\t\t\t\t\t).filter(similarity__gt=0.1).order_by('similarity') # for trimgram\n\t\t\t\t\t\t\t\t\t\t\t# filter(rank__gte=0.3).order_by('-rank') # for rank search\n\treturn render(request,\n\t\t\t\t\t'blog/post/search.html',\n\t\t\t\t\t{'form': form,\n\t\t\t\t\t 'query': query,\n\t\t\t\t\t 'results': results})\n\n\n# Contact form\ndef contact_form(request):\n\tsent = False\n\n\tif request.method == 'POST':\n\t\t# Form was submitted\n\t\tcontact_form = ContactForm(request.POST)\n\t\tif contact_form.is_valid():\n\t\t\t# form fields passed validation\n\t\t\tcd = contact_form.cleaned_data\n\t\t\t# ... send email\n\t\t\t\n\t\t\tsubject = f\"Mail from {cd['name']}\"\n\t\t\tsenders_email = f\"{cd['email']}\"\n\t\t\trecipients_email ='onyedikachi1997@gmail.com'\n\t\t\tmessage = f\"{cd['subject']} at \\n\\n{cd['message']}\\'s comments: {cd['message']}\"\n\t\t\tsend_mail(subject, senders_email, message, 'onyedikachi1997@gmail.com', recipients_email)\n\t\t\tsent = True\n\n\telse:\n\t\tcontact_form = ContactForm()\n\treturn render(request, 'blog/post/contact.html', {'contact_form': contact_form, 'sent': sent})\n","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"237944986","text":"# File to check the PyTorch CNN model on a single mole image \n# To execute this file: exec(open('testpythonflask.py').read()) \n# Contributor: Frédéric Fourré\n\nimport cv2\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\nimport torch\nimport torch.nn as nn\nimport torchvision.transforms as transforms\n\n\nclass mynet(nn.Module):\n def __init__(self):\n super().__init__()\n # two convolutional layers, kernel 3x3 + padding\n self.conv1 = nn.Conv2d(3, 64, kernel_size=3, padding=1) \n self.act1 = nn.Tanh()\n self.pool1 = nn.MaxPool2d(2) \n self.conv2 = nn.Conv2d(64, 32, kernel_size=3, padding=1)\n self.act2 = nn.Tanh()\n self.pool2 = nn.MaxPool2d(2)\n self.fc1 = nn.Linear(16 * 16 * 32, 64)\n self.act3 = nn.Tanh()\n self.fc2 = nn.Linear(64, 3)\n\n def forward(self, x):\n out = self.pool1(self.act1(self.conv1(x)))\n out = self.pool2(self.act2(self.conv2(out)))\n # reshape !\n out = out.view(-1, 16 * 16 * 32)\n out = self.act3(self.fc1(out))\n out = self.fc2(out)\n return out\n \n \n\nloaded_model = mynet()\n\nmodel_path='./'\n\nfilename='6987CNN2pixel64kernel3inputC643216epoch200.pt'\n\nloaded_model.load_state_dict(torch.load(model_path + filename))\n\ndatpath='./pytorchdat/3'\n\n# images in classe 3: D269, D288, D322, D328, D329, D340, D353, D437, D540\nimgname='D540.BMP'\n\n# read image with cv2\n#image = cv2.imread(datpath + '/' + imgname, cv2.IMREAD_COLOR)\n#imagec = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n# read the image with PIL\nimagec = Image.open(datpath + '/' + imgname)\n\nplt.imshow(imagec)\nplt.show()\n\nt2=transforms.ToTensor()\n\nimgt2=t2(imagec)\n\nprint(imgt2.shape)\n\nsizein=64\nt1=transforms.Resize((sizein,sizein))\n\nimgt1=t1(imgt2)\n\nprint(imgt1.shape)\n\nimg=imgt1.unsqueeze(0)\n\nprint(img.shape)\n\noutput=loaded_model(img)\n\nfsoftmax = nn.Softmax()\n\nproba=fsoftmax(output)\n\nprint(proba)\n\ntorch.max(proba)\n\n_, indice = torch.max(proba, dim=1) \n\nprint(indice.item())\n\n\n\n\n","sub_path":"testpythonflask.py","file_name":"testpythonflask.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"620506641","text":"import sys\n# Should be executed in MNE-Environment or base with MNE installed\nfrom functools import partial\n\nimport mne\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtWidgets import (QAction, QApplication, QDesktopWidget, QDialog, QHBoxLayout, QLabel, QMainWindow,\n QPushButton, QStyle, QVBoxLayout, QWidget)\nfrom matplotlib import pyplot as plt\nfrom mayavi import mlab\n\n\nclass Testsecwin(QDialog):\n def __init__(self, parent):\n super().__init__(parent)\n self.layout = QHBoxLayout()\n self.layout.addWidget(QLabel('Huga'))\n self.setLayout(self.layout)\n self.open()\n # self.raise_()\n # self.activateWindow()\n\n\nclass TestWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n self.setWindowTitle('QT-Test')\n self.setCentralWidget(QWidget(self))\n self.main_layout = QVBoxLayout()\n self.addmenu()\n self.addlabel()\n self.addbuttons()\n self.centralWidget().setLayout(self.main_layout)\n\n # Necessary because frameGeometry is dependent on number of function-buttons\n newh = self.sizeHint().height()\n neww = self.sizeHint().width()\n self.setGeometry(0, 0, neww, newh)\n\n # This is also possible but does not center a widget with height < 480\n # self.layout().update()\n # self.layout().activate()\n self.center()\n\n def addmenu(self):\n first_menu = self.menuBar().addMenu(self.style().standardIcon(QStyle.SP_DialogCancelButton), '&First')\n first_action = QAction(icon=self.style().standardIcon(QStyle.SP_DialogCancelButton), text='Second', parent=self)\n first_action.triggered.connect(self.test_matplotlib)\n first_menu.addAction(first_action)\n\n def addbuttons(self):\n h_layout = QHBoxLayout()\n bt1 = QPushButton('Quit')\n bt2 = QPushButton('Matplotlib')\n bt3 = QPushButton('MNE-Coreg')\n bt4 = QPushButton('Mayavi')\n bt5 = QPushButton('Second-Window')\n icon = QIcon(':/correct_icon.svg')\n bt6 = QPushButton(icon=icon)\n h_layout.addWidget(bt1)\n h_layout.addWidget(bt2)\n h_layout.addWidget(bt3)\n h_layout.addWidget(bt4)\n h_layout.addWidget(bt5)\n h_layout.addWidget(bt6)\n self.setLayout(h_layout)\n\n bt1.clicked.connect(app.quit)\n bt2.clicked.connect(self.test_matplotlib)\n bt3.clicked.connect(self.test_mne_coreg)\n bt4.clicked.connect(self.test_mayavi)\n bt5.clicked.connect(partial(Testsecwin, self))\n\n self.main_layout.addLayout(h_layout)\n\n def addlabel(self):\n label = QLabel('Hello World!', self)\n self.main_layout.addWidget(label)\n\n def center(self):\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n\n def test_matplotlib(self):\n plt.plot([1, 2, 3, 4])\n plt.show()\n\n def test_mne_coreg(self):\n mne.gui.coregistration()\n\n def test_mayavi(self):\n mlab.figure()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n view = TestWindow()\n # Program Mode\n view.show()\n sys.exit(app.exec_())\n # Debug Mode\n # app.lastWindowClosed.connect(app.quit)\n # app.exec_()\n # test.close()\n # del app, test, TestWindow\n# # Required, if to run repeatedly in Ipython\n# if app in locals():\n# del app\n\n# Necessary for Spyder\n# app.lastWindowClosed.connect(app.quit)\n","sub_path":"mne_pipeline_hd/development/qt_test.py","file_name":"qt_test.py","file_ext":"py","file_size_in_byte":3508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"452878077","text":"from blackjack.chips import Chips\nfrom blackjack.deck import Deck\nfrom blackjack.hand import Hand\nfrom blackjack.card import Card\n\n\n\ndef take_bet(chips):\n while True:\n try:\n chips.bet = int(input('how many chips would you like to bet?'))\n except:\n print('sorry please provide an integer')\n else:\n if chips.bet > chips.total:\n print('sorry you do not have enough chips, you have:{}'.format(chips.total))\n else:\n break\n\ndef hit(deck, hand):\n single_card = deck.deal()\n hand.add_card(single_card)\n hand.adjust_for_ace()\n\ndef hit_or_stand(deck, hand):\n global playing\n\n while True:\n x = input('Hit or Stand? Enter h or s')\n if x[0].lower() == 'h':\n hit (deck, hand)\n elif x[0].lower() == 's':\n print(\"player stands dealer's turn\")\n playing = False\n else:\n print(\"sorry I did no undestand enter h or s\")\n continue\n break\n\ndef show_some(player, dealer):\n print(\"\\nDealer's Hand:\")\n print(\"