diff --git "a/4287.jsonl" "b/4287.jsonl"
new file mode 100644--- /dev/null
+++ "b/4287.jsonl"
@@ -0,0 +1,734 @@
+{"seq_id":"163917093","text":"\"\"\"Example script showing use of postproc.py module.\n* Input tables (Mat1 and Mat2) containing routing information \n have already been created using sfr_classes.py \n* A shapefile is supplied for model grid input instead of a MODFLOW DIS file, \n as the model grid is rotated in this case \n (the DIS file reader in SFRmaker does not support rotated grids)\n\"\"\"\n\nimport sys\nsys.path.insert(0, 'D:/ATLData/Documents/GitHub/SFR')\n#sys.path.append('D:\\JointBaseModel\\SFRMakerData\\TestModel')\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom rasterstats import zonal_stats\nfrom postproc import SFRdata\n\npath = 'D:/ATLData/SFR_testing/TestModel/'\n\n# intantiate SFRdata object\nsfr = SFRdata(Mat1=path + 'SFR_GWVmat1.txt',\n Mat2=path + 'SFR_GWVmat2.txt',\n mfgridshp='ForsyGrd.shp',\n mfgridshp_node_field='CellNum', \n mfgridshp_row_field='ROW', mfgridshp_column_field='COLUMN')\n\n# create columns in Mat2 of Min and Max elevation for each segment\n# (these columns are not created by sfr_classes.py)\nsfr.update_Mat2_elevations()\n\n# update the reach elevations in Mat1 with minimum elevations from DEM\nsfr.reset_m1_streambed_top_from_dem(dem=path + 'forsy_lid')\n\n# trace routing from headwater segments to outlets; assign outlet to each segment\nsfr.map_outsegs()\n\n# creates table of segment confluences\nsfr.map_confluences()\n\n# smooth DEM elevations in segment interiors so that they decrease in downstream direction\nsfr.smooth_interior_elevations()\n\n# read in the DIS file (this is needed for some of the methods below;\n# e.g. model top elevations are added to the stream profiles by default)\nsfr.read_dis2(mfdis='Forsy.DIS', mfnam='ForsySFRMaker.nam')\n\n# plot profiles of streambed elevations in comparison to model top and DEM minimum\nsfr.plot_stream_profiles(add_profiles={'Minimum DEM elevation': 'landsurface'})\n\n# enforce only one SFR conductance for each model cell \n# (other reaches in cell assigned near-zero conductance)\nsfr.consolidate_conductance()\n\n# adjust model grid so that all SFR reaches are in layer 1\n# outputs a new DIS file for model\nsfr.reset_model_top_2streambed(outdisfile='Forsy_adjusted_to_streambed.dis')\n\n# run suite of diagnostics to test for common problems with SFR package\nsfr.run_diagnostics()\n\n# create shapefile for visualizing SFR package\nsfr.write_shapefile(outshp='Forsy.shp', prj='ForsyGrd.prj')\n\n# write updated tables\nsfr.write_tables(basename='Forsy')\n\n# write an SFR package file\nsfr.write_sfr_package(basename='Forsy')\n","sub_path":"Examples/Example_postproc_workflow2.py","file_name":"Example_postproc_workflow2.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"580314720","text":"import nltk\r\nfrom nltk.tokenize import RegexpTokenizer\r\nimport operator\r\nfrom nltk.tokenize import word_tokenize\r\n\r\n\r\n\r\ntokenizer = RegexpTokenizer(r'\\w+')\r\n\r\nfile = open(\"textrank_hotels_output2.txt\", \"r\")\r\ndata = dict()\r\ndictList = [];\r\ndictNumber = 0;\r\n\r\n# counts \r\nfor line in file:\r\n if line.strip():\r\n word = tokenizer.tokenize(line)\r\n if len(word) > 0:\r\n if (word[0].isupper()):\r\n dictList.append(data)\r\n data = dict()\r\n data[line] = 0\r\n continue\r\n continue\r\n## if word[0] not in data:\r\n## data[word[0]] = 1\r\n## else:\r\n## data[word[0]] = data[word[0]] + 1\r\n for single in word:\r\n if not single.isdigit():\r\n if single not in data:\r\n data[single] = 1\r\n else:\r\n data[single] = data[single] + 1\r\n \r\n else:\r\n dictList.append(data)\r\n data = dict()\r\n data[\"********* NAMELESS **********\"] = 0\r\n \r\n\r\n# print (dictList)\r\n\r\n\r\nfor dataset in dictList:\r\n dataset2 = dict(dataset);\r\n for item in dataset2:\r\n if len(item) == 1:\r\n del dataset[item]\r\n\r\n\r\n\r\nfor dataset in dictList:\r\n sorted_data = sorted(dataset.items(), key=operator.itemgetter(1))\r\n for line in sorted_data:\r\n if line[0].isupper():\r\n print (\"\\n\")\r\n print (line[0])\r\n print (\"\\n\")\r\n else:\r\n print(line)\r\n\r\n##data2 = dict(data)\r\n##\r\n### filtering:\r\n### 1. number of instances\r\n### 2. manual items\r\n### 3. parsing issue\r\n### 4. non-nouns\r\n##for item in data2:\r\n## if data[item] < 3:\r\n## del data[item]\r\n## elif \"hotel\" in item or \"stay\" in item or \"excellent\" in item or \"phoenix\" in item or \"tablet\" in item or \"android\" in item or \"free\" in item or \"book\" in item or \"mount\" in item or \"page\" in item:\r\n## del data[item]\r\n## elif len(item) == 1:\r\n## del data[item]\r\n## else:\r\n## text = word_tokenize(item)\r\n## sample_line = nltk.pos_tag(text)\r\n## if sample_line[0][1] not in 'NN':\r\n## del data[item]\r\n## \r\n### sorts dict for printing\r\n##sorted_data = sorted(data.items(), key=operator.itemgetter(1))\r\n##\r\n##for line in sorted_data:\r\n## print(line)\r\n","sub_path":"wordCountEditedv2.py","file_name":"wordCountEditedv2.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"548677601","text":"import os\nimport pickle\nimport shutil\nimport sys \nimport re\nimport time\nimport functools\nimport filecmp\nfrom subprocess import call\nfrom collections import namedtuple, OrderedDict, Hashable\n\nfrom geosoft_api import gxapi\n\nGXApiCollectionInfo = namedtuple('GXApiCollectionInfo', ['classes', 'known_classes', 'known_class_handles', 'known_methods', 'known_definitions', 'known_definition_values'])\nglobal_collection = 0\n\nclass memoized:\n '''Decorator. Caches a function's return value each time it is called.\n If called later with the same arguments, the cached value is returned\n (not reevaluated).\n '''\n def __init__(self, func):\n self.func = func\n self.cache = {}\n def __call__(self, *args):\n if not isinstance(args, Hashable):\n # uncacheable. a list, for instance.\n # better to not cache than blow up.\n return self.func(*args)\n if args in self.cache:\n return self.cache[args]\n else:\n value = self.func(*args)\n self.cache[args] = value\n return value\n def __repr__(self):\n '''Return the function's docstring.'''\n return self.func.__doc__\n def __get__(self, obj, objtype):\n '''Support instance methods.'''\n return functools.partial(self.__call__, obj)\n\ndef convert_camel_case(name):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', name)\n s2 = re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n return s2.replace(\"3_d\", \"_3d\")\n\ndef is_class(type_name):\n return type_name in global_collection.known_classes or type_name in global_collection.known_class_handles\n\ndef get_class(type_name):\n if type_name in global_collection.known_classes:\n return global_collection.known_classes[type_name]\n elif type_name in global_collection.known_class_handles:\n return global_collection.known_class_handles[type_name]\n else:\n return None\n\ndef get_python_defintion_value(defintion_value):\n if defintion_value.val in global_collection.known_definition_values:\n return global_collection.known_definition_values[defintion_value.val]['defined_value'].get_python_value()\n else:\n if defintion_value.type == 'System.String':\n return '\"' + defintion_value.val + '\"'\n else:\n return \"(\" + defintion_value.get_cpp_const_type() + \")\" + defintion_value.get_value_without_casts()\n\ndef parse_type(type_name):\n if type_name in global_collection.known_class_handles:\n return global_collection.known_class_handles[type_name].name\n else:\n return type_name\n\ndef get_cpp_return_cast(type_name):\n if type_name == \"bool\":\n return \"0 != \"\n elif type_name in global_collection.known_definitions:\n return \"(\" + type_name + \")\"\n else:\n return \"\"\n\ndef get_is_cpp_long_equivalent(type_name):\n if type_name == 'int32_t':\n return True\n elif type_name in global_collection.known_definitions:\n return not global_collection.known_definitions[type_name].constant\n return False\n\ndef get_cpp_type(type_name, no_pointer=False):\n if is_class(type_name):\n type_name = \"GX\" + parse_type(type_name)\n if not no_pointer:\n return type_name + \"Ptr\"\n else:\n return type_name\n else:\n return {\n 'real': 'double',\n 'int': 'int32_t',\n 'intval': 'int32_t',\n 'string': 'const gx_string_type&',\n 'var string': 'gx_string_type&',\n 'CRC': 'int32_t',\n 'WND': 'int32_t',\n 'PTMP': 'int32_t',\n 'FILTER': 'int32_t',\n 'DGW_OBJ': 'int32_t',\n 'TB_FIELD': 'int32_t',\n 'DB_SELECT': 'int32_t',\n 'DB_SYMB': 'int32_t',\n 'META_TOKEN': 'int32_t',\n 'HANDLE': 'int32_t',\n 'GEO_BOOL': 'bool'\n }.get(type_name, type_name)\n\n\ndef restructured_directive(start, contents):\n indent = '\\n' + ' ' * (len(start) + 1)\n return start + \" \" + indent.join(contents.strip().split('\\n')) + \"\\n\"\n\ndef word_to_ref(word, allow_classes):\n if allow_classes and word in global_collection.known_classes:\n return \"\\\\ :class:`geosoft.gxapi.GX\" + word + \"`\\\\ \"\n elif word in global_collection.known_methods:\n method_info = global_collection.known_methods[word]\n method = method_info['method']\n gxclass = method_info['gxclass']\n return \"\\\\ :func:`geosoft.gxapi.GX\" + gxclass.name + \".\" + gxclass.py_method_name(method) + \"`\\\\ \"\n else:\n return word\n\ndef all_refs_repl(matchobj):\n return word_to_ref(matchobj.group(0), True)\n\ndef subst_all_refs(description):\n return re.sub('\\w+', all_refs_repl, description)\n\ndef non_class_refs_repl(matchobj):\n return word_to_ref(matchobj.group(0), False)\n\ndef subst_non_class_refs(description):\n return re.sub('\\w+', non_class_refs_repl, description)\n\ndef define_refs_repl(matchobj):\n if matchobj.group(1) == \"GEO_BOOL\":\n return \"bool\"\n else:\n definition_name = matchobj.group(1)\n if definition_name in global_collection.known_definitions:\n definition = global_collection.known_definitions[definition_name]\n if definition.null_handle:\n return \"\\\\ :func:`geosoft.gxapi.GX\" + definition_name.replace(\"_NULL\", \"\") + \".null()`\\\\ \"\n return \"\\\\ :ref:`\" + matchobj.group(1) + \"`\\\\ \"\n\ndef subst_defines(description):\n return re.sub('(.+?)', define_refs_repl, description)\n\ndef docstring_fixes(description):\n description = description.replace(\"GS_TRUE\", \"``True``\")\n description = description.replace(\"GS_FALSE\", \"``False``\")\n description = subst_defines(description)\n description = description.replace(\"*\", \"\\\\ `*`\\\\ \")\n description = description.replace(\"|\", \"\\\\ `|`\\\\ \")\n return description\n\ndef docstring_literal_para(description, para_id='.. parsed-literal::', sub_all_refs=False):\n if sub_all_refs:\n description = subst_all_refs(description)\n else:\n description = subst_non_class_refs(description)\n description = docstring_fixes(description)\n return '\\n\"\\\\n' + para_id + '\\\\n\\\\n\"\\n\" ' + '\\\\n\"\\n\" '.join(description.replace(\"\\\\\", \"\\\\\\\\\").replace(\"\\\"\", \"\\\\\\\"\").split('\\n')) + '\\\\n\\\\n\"\\n'\n\ndef docstring_literal_note(description):\n return '\\n\"\\\\n\\\\n**Note:**\\\\n\\\\n\"\\n' + docstring_literal_para(description)\n\ndef docstring_literal_seealso(description):\n return docstring_literal_para(description, para_id = '.. seealso::', sub_all_refs=True)\n\ndef docstring_literal_version(version):\n return '\\n\"\\\\n.. versionadded:: ' + version + '\\\\n\\\\n\"\\n'\n\ndef multi_line_fixup(description):\n return description.replace(\"\\\\\", \"\\\\\\\\\").replace(\"\\\"\", \"\\\\\\\"\")\n\ndef docstring_multi_line(description):\n description = docstring_fixes(description)\n return '\\n\"' + '\\\\n\"\\n\"'.join(description.replace(\"\\\\\", \"\\\\\\\\\").replace(\"\\\"\", \"\\\\\\\"\").split('\\n')) + '\\\\n\"\\n'\n\ndef generate_sphinx_description(description):\n return (' '.join(description.replace(\"\\\\\", \"\\\\\\\\\").replace(\"\\\"\", \"\\\\\\\"\").split('\\n'))).strip()\n\nclass defined_value_class(gxapi.defined_value.typeDefinition()):\n @memoized\n def get_value_without_casts(self):\n return self.val.replace(\"(unsigned long) \", \"\").replace(\"(__GS_INT64) \", \"\").replace(\"(__GS_UINT64) \", \"\")\n\n @memoized\n def get_cpp_value(self):\n if self.type == 'System.String':\n return 'gx_string_literal(\"' + self.val + '\")'\n else:\n return self.get_value_without_casts()\n\n @memoized\n def get_python_value(self):\n return get_python_defintion_value(self)\n\n @memoized\n def get_cpp_const_type(self):\n if self.cpp_type:\n return self.cpp_type\n if self.type == 'System.String':\n return \"gx_string_char_type*\"\n if self.type == 'System.Int32':\n return \"int32_t\"\n if self.type == 'System.Single':\n return \"float\"\n if self.type == 'System.Double':\n return \"double\"\n\n @memoized\n def get_spec_type(self):\n if self.cpp_type:\n return 'Type.{}'.format(self.cpp_type.upper())\n elif self.type == 'System.String':\n return \"Type.STRING\"\n elif self.type == 'System.Int32':\n return 'Type.INT32_T'\n elif self.type == 'System.Single':\n return 'Type.FLOAT'\n elif self.type == 'System.Double':\n return 'Type.DOUBLE'\n else:\n return '\"{}\"'.format(self.type)\n\n @memoized\n def get_sphinx_docstring(self):\n return docstring_fixes(subst_non_class_refs(self.description))\n\ngxapi.defined_value.typeDefinition()._SetSupersedingClass(defined_value_class)\t\t\t \n\nclass definition_class(gxapi.definition.typeDefinition()):\n @memoized\n def get_cpp_const_name(self, defined_value):\n if self.single_constant:\n return defined_value.name\n else:\n value_name = defined_value.name\n parts = self.name.split('_')\n for part in parts:\n if value_name.startswith(part + \"_\"):\n value_name = value_name[len(part) + 1:]\n return value_name\n\n @memoized\n def get_cpp_const_declaration(self, defined_value):\n return 'static const ' + defined_value.get_cpp_const_type() + \" \" + self.get_cpp_const_name(defined_value) + \" = \" + defined_value.get_cpp_value() + \";\"\n\n @memoized\n def get_cpp_defined_value_name(self, defined_value):\n if self.cpp_prefix:\n return self.cpp_prefix + defined_value.name\n else:\n return defined_value.name\n\n @memoized\n def get_sphinx_docstring(self):\n return docstring_fixes(subst_non_class_refs(self.description))\ngxapi.definition.typeDefinition()._SetSupersedingClass(definition_class)\t\t\t\n\ndef resolve_enum_type_from_description(type_name, description):\n if description and type_name == \"int\":\n defines = re.findall(\"(.+?)\", description)\n if len(defines) == 1:\n if not defines[0] in global_collection.known_definitions:\n raise Exception('Unknown definition indicated for parameter or return value: ' + defines[0])\n return defines[0]\n return type_name\n \nclass parameter_class(gxapi.parameter.typeDefinition()):\n @memoized\n def is_class(self):\n return is_class(self.type)\n\n @memoized\n def is_var(self):\n return self.type.startswith(\"var \")\n\n @memoized\n def is_var_type(self):\n return self.type != \"var string\" and self.is_var()\n\n @memoized\n def get_spec_type(self):\n type = self.type[4:] if self.is_var() else self.type\n if type == 'string':\n return \"Type.STRING\"\n elif type == 'int' or type == \"intval\":\n return 'Type.INT32_T'\n elif type == 'real':\n return 'Type.DOUBLE'\n else:\n return '\"{}\"'.format(self.type)\n\n @memoized\n def get_type(self):\n if self.is_var_type():\n return self.type[4:]\n else:\n return self.type\n\n @memoized\n def __cpp_type(self, no_pointer):\n return get_cpp_type(resolve_enum_type_from_description(self.get_type(), self.description), no_pointer)\n\n def cpp_type(self, no_pointer=False):\n return self.__cpp_type(no_pointer)\n\n @memoized\n def cpp_python_wrap_type(self):\n if self.is_cpp_long_equivalent():\n type_name = \"int32_t\"\n else:\n type_name = self.cpp_type()\n\n if type_name == \"gx_string_type&\":\n return \"str_ref&\"\n elif self.is_var_type():\n if type_name == \"int32_t\":\n return \"int_ref&\"\n elif type_name == \"double\":\n return \"float_ref&\"\n elif type_name == \"bool\":\n return \"bool_ref&\"\n else:\n raise Exception(\"Unexpected var type: \" + type_name)\n else:\n return type_name\n\n @memoized\n def cpp_python_docstring_type(self):\n if self.is_cpp_long_equivalent():\n type_name = \"int32_t\"\n else:\n type_name = self.cpp_type(no_pointer=True)\n if type_name == \"double\":\n type_name = \"float\"\n elif type_name == \"int32_t\":\n type_name = \"int\"\n elif type_name == \"const gx_string_type&\":\n type_name = \"str\"\n elif type_name == \"gx_string_type&\":\n type_name = \"str_ref\"\n if self.is_var_type():\n type_name = type_name + \"_ref\"\n return type_name\n\n @memoized\n def cpp_python_wrap_cast(self):\n type_name = self.cpp_type()\n\n if not type_name == \"int32_t\" and self.is_cpp_long_equivalent():\n if self.is_var_type():\n return \"(\" + type_name + \"&)\"\n else:\n return \"(\" + type_name + \")\"\n else:\n return \"\"\n\n @memoized\n def is_val_type(self):\n return self.get_type() == 'intval' or self.get_type() == 'HWND' or self.get_type() == 'HDC'\n\n @memoized\n def is_param_in_type(self):\n return self.get_type().startswith(\"void (\")\n\n @memoized\n def is_cpp_long_equivalent(self):\n if not self.is_val_type():\n return get_is_cpp_long_equivalent(self.cpp_type())\n else:\n return False\n\n @memoized\n def cpp_cast_start(self):\n if self.is_cpp_long_equivalent():\n if self.is_var_type():\n return \"reinterpret_cast(\"\n else:\n return \"reinterpret_cast(\"\n else:\n return \"\"\n\n @memoized\n def cpp_cast_end(self):\n if self.is_cpp_long_equivalent():\n return \")\"\n else:\n return \"\"\n\n @memoized\n def get_python_docstring(self):\n return docstring_literal_para(self.description)\ngxapi.parameter.typeDefinition()._SetSupersedingClass(parameter_class)\n\n\nclass method_class(gxapi.method.typeDefinition()):\n @memoized\n def external_name(self):\n if self.externalname:\n return self.externalname\n else:\n return self.name\n\n @memoized\n def returns_class(self):\n return is_class(self.returnval.type)\n\n @memoized\n def get_return_class(self):\n return get_class(self.returnval.type)\n\n @memoized\n def __cpp_return_type(self, no_pointer):\n return get_cpp_type(resolve_enum_type_from_description(self.returnval.type, self.returnval.description), no_pointer = no_pointer)\n\n def cpp_return_type(self, no_pointer=False):\n return self.__cpp_return_type(no_pointer)\n\n @memoized\n def __python_wrap_return_type(self, no_pointer):\n type_name = self.cpp_return_type(no_pointer = no_pointer)\n if get_is_cpp_long_equivalent(type_name):\n return \"int32_t\"\n else:\n return type_name\n\n def python_wrap_return_type(self, no_pointer=False):\n return self.__python_wrap_return_type(no_pointer)\n\n @memoized\n def cpp_return_cast(self):\n return get_cpp_return_cast(self.cpp_return_type())\n\n def get_spec_lic(self):\n if self.license.startswith('_public'):\n return 'Availability.PUBLIC'\n elif self.license.startswith('_license'):\n return 'Availability.LICENSED'\n elif self.license.startswith('_ext'):\n return 'Availability.EXTENSION'\n else:\n return 'Availability.UNKNOWN'\n\n def get_spec_ret_type(self):\n if self.returnval.type == 'int':\n return 'Type.INT32_T'\n elif self.returnval.type == 'real':\n return 'Type.DOUBLE'\n elif self.returnval.type == 'void':\n return 'Type.VOID'\n else:\n return '\"{}\"'.format(self.returnval.type)\n\n @memoized\n def is_app(self):\n return self.license.endswith('_app')\ngxapi.method.typeDefinition()._SetSupersedingClass(method_class)\n\nclass ext_parameter_info:\n def __init__(self, index=None, parameter=None, size_parameter=None, size_parameter_index=None, real_index=None):\n self.index = index\n self.parameter = parameter\n self.size_parameter_index = size_parameter_index\n self.size_parameter = size_parameter\n self.real_index = real_index\n\nclass int_parameter_info:\n def __init__(self, self_handle=False, ext_index=None, parameter=None, size_parameter=None, size_parameter_index=None,gxclass=None):\n self.self_handle = self_handle\n self.parameter = parameter\n self.size_parameter_index = size_parameter_index\n self.size_parameter = size_parameter\n self.ext_index = ext_index\n self.gxclass = gxclass\n\n\ndef get_rest_docstring_type_name(type_name):\n if type_name in [\"float\", \"bool\", \"int\", \"str\", \"None\"]:\n return type_name\n else:\n return \":class:`geosoft.gxapi.\" + type_name + \"`\"\n\nclass gx_class(gxapi.gxclass.typeDefinition()):\n @memoized\n def is_method_static(self, method):\n if len(method.parameters.parameter) > 0:\n return parse_type(method.parameters.parameter[0].type) != self.name\n else:\n return True\n\n @memoized\n def get_method_ext_parameter_infos(self, method):\n ext_parameter_infos = []\n static = self.is_method_static(method)\n size_of_parameters_set = set()\n for parameter in method.parameters.parameter:\n if parameter.size_of_param:\n size_of_parameters_set.add(parameter.size_of_param)\n\n index = 1\n for i, parameter in enumerate(method.parameters.parameter):\n if (i == 0 and not static) or i in size_of_parameters_set:\n continue\n size_parameter_index = parameter.size_of_param\n size_parameter = None\n if size_parameter_index:\n size_parameter = method.parameters.parameter[size_parameter_index]\n ext_parameter_infos.append(ext_parameter_info(index=index, parameter=parameter, size_parameter=size_parameter, size_parameter_index=size_parameter_index, real_index=i))\n index = index + 1\n return ext_parameter_infos\n\n @memoized\n def get_method_int_parameter_infos(self, method):\n int_parameter_infos = []\n static = self.is_method_static(method)\n size_of_parameters_set = set()\n for parameter in method.parameters.parameter:\n if parameter.size_of_param:\n size_of_parameters_set.add(parameter.size_of_param)\n\n index = 1\n for i, parameter in enumerate(method.parameters.parameter):\n self_handle = False\n if i in size_of_parameters_set:\n ext_index = None\n elif i == 0 and not static:\n ext_index = None\n self_handle = True\n else:\n ext_index = index\n index = index + 1\n size_parameter_index = parameter.size_of_param\n size_parameter = None\n if size_parameter_index:\n size_parameter = method.parameters.parameter[size_parameter_index]\n int_parameter_infos.append(int_parameter_info(self_handle=self_handle, ext_index=ext_index, parameter=parameter, size_parameter=size_parameter, size_parameter_index=size_parameter_index, gxclass=get_class(parameter.type)))\n return int_parameter_infos\n\n @memoized\n def is_static(self):\n for methodgroup in self.methodgroups.methodgroup:\n for method in methodgroup.method:\n if not self.is_method_static(method):\n return False\n return True\n\n def _ext_method_name_camel(self, method):\n if method.name == \"iCheckError_SYS\":\n return \"iCheckError\"\n method_postfix = \"_\" + self.name\n method_name = method.external_name()\n if method.name.endswith(method_postfix):\n method_name = method_name[0 : len(method_name) - len(method_postfix)]\n if method_name.startswith(\"_\") or (method_name.startswith(\"I\") and len(method_name) > 2 and (method_name[1] == 'i' or (method_name[1] >= 'A' and method_name[1] <= 'Z'))):\n return method_name[1:]\n return method_name\n\n def _ext_method_name_no_camel(self, method):\n return convert_camel_case(self._ext_method_name_camel(method))\n\n def _ext_method_name_real_to_double(self, method):\n return self._ext_method_name_no_camel((method)).replace(\"_real\", \"_double\")\n\n def _ext_method_name_no_polish(self, method):\n method_name = self._ext_method_name_real_to_double(method)\n return_type = method.cpp_return_type()\n if method_name.startswith(\"i_\") or method_name.startswith(\"r_\"):\n return method_name[2:]\n else:\n return method_name\n\n @memoized\n def ext_method_name(self, method):\n method_name = self._ext_method_name_no_polish(method)\n if method.cpp_pre:\n method_name = method.cpp_pre + method_name\n if method.cpp_post:\n method_name = method_name + method.cpp_post\n if self.name == \"MATH\":\n method_name = method_name + \"_\" # Stops keyword and macro collisions everywhere\n return method_name\n \n @memoized\n def py_method_name(self, method):\n method_name = self.ext_method_name(method)\n return method_name.strip(\"_\")\n\n def get_python_docstring(self):\n docstring = docstring_literal_para(self.description)\n if self.notes:\n docstring = docstring + docstring_literal_note(self.notes)\n return docstring\n\n def generate_sphinx_description(self):\n return generate_sphinx_description(self.description)\n\n def get_python_method_docstring(self, method):\n return_type = method.python_wrap_return_type(no_pointer = True)\n if return_type == \"void\":\n return_type = \"None\"\n elif return_type == \"double\":\n return_type = \"float\"\n elif return_type == \"int32_t\":\n return_type = \"int\"\n\n signature = self.py_method_name(method) + \"(\"\n restructured_text_params = \"\"\n\n first_parameter = False\n for parameter_info in self.get_method_ext_parameter_infos(method):\n if first_parameter:\n signature = signature + \", \"\n else:\n first_parameter = True\n type_name = parameter_info.parameter.cpp_python_docstring_type()\n arg_name = \"arg\" + str(parameter_info.index)\n\n signature = signature + \"(\" + type_name + \")\" + arg_name\n\n restructured_text_params = restructured_text_params + restructured_directive(\":param \" + arg_name + \":\", parameter_info.parameter.description)\n restructured_text_params = restructured_text_params + restructured_directive(\":type \" + arg_name + \":\", get_rest_docstring_type_name(type_name))\n\n signature = signature + \") -> \" + return_type + \":\"\n if method.returnval.description:\n restructured_text_params = restructured_text_params + restructured_directive(\":returns:\", method.returnval.description)\n restructured_text_params = restructured_text_params + restructured_directive(\":rtype:\", get_rest_docstring_type_name(return_type))\n\n docstring = docstring_multi_line(signature) + docstring_literal_para(method.description) + docstring_multi_line(restructured_text_params) + docstring_literal_version(method.available)\n\n if method.notes:\n docstring = docstring + docstring_literal_note(method.notes)\n\n if method.see_also:\n docstring = docstring + docstring_literal_seealso(method.see_also)\n\n return docstring\n\ngxapi.gxclass.typeDefinition()._SetSupersedingClass(gx_class)\n\ndef object_from_pickled_file(pickled_file_path):\n with open(pickled_file_path, 'rb') as f:\n return pickle.load(f)\n\ndef pickle_object_with_makedir(object, pickled_file_path):\n pickled_file_dir = os.path.dirname(pickled_file_path)\n if not os.path.exists(pickled_file_dir):\n os.makedirs(pickled_file_dir)\n with open(pickled_file_path, 'wb') as f:\n pickle.dump(object, f, pickle.HIGHEST_PROTOCOL)\n\ndef gxapi_pickle(api_file_path, pickled_file_path):\n pickle_object_with_makedir(gxapi.CreateFromDocument(open(api_file_path).read()), pickled_file_path)\n\n\ndef gxapi_api_collection_pickle(pickle_source_dirs, pickled_file_path):\n api_coll = GXApiCollectionInfo([], {}, {}, {}, {}, {})\n\n files_dict = {}\n for source_dir in pickle_source_dirs.split(';'):\n for root, subFolders, files in os.walk(source_dir):\n for file in files:\n files_dict[file] = os.path.join(root, file)\n\n for file in OrderedDict(sorted(files_dict.items())):\n gxclass = object_from_pickled_file(files_dict[file])\n api_coll.known_classes[gxclass.name] = gxclass\n if (gxclass.handlename):\n api_coll.known_class_handles[gxclass.handlename] = gxclass\n if gxclass.name == \"GEOSOFT\":\n api_coll.classes.insert(0, gxclass)\n else:\n api_coll.classes.append(gxclass)\n\n for methodgroup in gxclass.methodgroups.methodgroup:\n for method in methodgroup.method:\n api_coll.known_methods[method.name] = { 'gxclass': gxclass, 'method': method }\n\n for definition in gxclass.definitions.definition:\n api_coll.known_definitions[definition.name] = definition\n for defined_value in definition.defined_value:\n api_coll.known_definition_values[defined_value.name] = { 'gxclass': gxclass, 'definition': definition, 'defined_value': defined_value }\n\n pickle_object_with_makedir(api_coll, pickled_file_path)\n\ndef render_template(j2env, namespace_parts, build_version, output_dir, template_name, sort_classes=False):\n output_file = os.path.join(output_dir, template_name)\n print('Rendering: ' + output_file)\n template = j2env.get_template(template_name)\n open(output_file, 'w+').write(template.render(build_version=build_version, classes=sorted(global_collection.classes , key=lambda gxclass: gxclass.name) if sort_classes else global_collection.classes, namespace_parts=namespace_parts))\n\n\ndef render_python_imports(j2env, namespace_parts, output_dir):\n template = j2env.get_template('python_import.cpp')\n\n for gxclass in global_collection.classes:\n # TODO Expose CGEO::GetPtrVM and CGEO::GetPtrVV the way we do in C# (remove\n # comments from python_module.cpp when completed)\n # TODO expose void * and callback methods in PG class in a sensible way and\n # remove the nocpp=\"true\" atribute on them\n if not gxclass.name == \"GEO\":\n output_file = os.path.join(output_dir, \"python_import_\" + gxclass.name + \".cpp\")\n print('Rendering: ' + output_file)\n open(output_file, 'w+').write(template.render(gxclass=gxclass, namespace_parts=namespace_parts))\n\n\ndef render_sphinx_rsts(j2env, namespace_parts, output_dir):\n template = j2env.get_template('class.rst')\n\n for gxclass in global_collection.classes:\n # TODO Expose CGEO::GetPtrVM and CGEO::GetPtrVV the way we do in C# (remove\n # comments from python_module.cpp when completed)\n # TODO expose void * and callback methods in PG class in a sensible way and\n # remove the nocpp=\"true\" atribute on them\n if not gxclass.name == \"GEO\":\n output_file = os.path.join(output_dir, \"GX\" + gxclass.name + \".rst\")\n print('Rendering: ' + output_file)\n open(output_file, 'w+').write(template.render(gxclass=gxclass, namespace_parts=namespace_parts))\n\ndef generate_code(pickled_collection_file, namespace, build_version, output_dir):\n from jinja2 import Environment, FileSystemLoader\n global global_collection\n global __j2env\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n namespace_parts = namespace.split('::')\n\n tools_root = os.getenv('TOOLS_ROOT')\n templates_dir = os.path.join(tools_root, 'msbuild', 'gx_python', 'gxapi_templates')\n astyle_tool = os.path.join(tools_root, 'utils', 'astyle.exe')\n\n j2env = Environment(loader=FileSystemLoader(templates_dir),\n trim_blocks = True,\n lstrip_blocks = True)\n\n global_collection = object_from_pickled_file(pickled_collection_file)\n\n start = time.perf_counter()\n render_template(j2env, namespace_parts, build_version, output_dir, 'gxcpp_geogx.h')\n render_template(j2env, namespace_parts, build_version, output_dir, 'python_ref_wrappers.h')\n render_template(j2env, namespace_parts, build_version, output_dir, 'python_module.cpp')\n render_template(j2env, namespace_parts, build_version, output_dir, 'index.rst', sort_classes=True)\n render_template(j2env, namespace_parts, build_version, output_dir, 'toc.rst', sort_classes=True)\n render_python_imports(j2env, namespace_parts, output_dir)\n render_sphinx_rsts(j2env, namespace_parts, output_dir)\n\n print('Formatting source code...')\n if not 0 == call([astyle_tool, '-n', '-N', '--style=allman', os.path.join(output_dir, '*.h')]):\n raise Exception(astyle_tool + \" error!\")\n if not 0 == call([astyle_tool, '-n', '-N', '--style=allman', os.path.join(output_dir, '*.cpp')]):\n raise Exception(astyle_tool + \" error!\")\n elapsed = time.perf_counter() - start\n print(\"Generation completed in %s seconds\" % elapsed)\n","sub_path":"docs/transform/gxgenutils.py","file_name":"gxgenutils.py","file_ext":"py","file_size_in_byte":29439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"449590914","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/lib64/python3.6/site-packages/ioflo/base/test/_testFraming.py\n# Compiled at: 2017-12-17 08:35:26\n# Size of source mod 2**32: 2613 bytes\n\n\ndef TestFrame():\n \"\"\"Module Common self test\n\n \"\"\"\n import acting, poking, needing, goaling, doing, traiting, fiating, wanting\n try:\n Frame.Clear()\n f1 = Frame(name='Primero')\n f2 = Frame()\n f3 = Frame()\n f1.attach(f2)\n f1.attach(f3)\n f4 = Frame()\n f5 = Frame()\n f2.attach(f4)\n f3.attach(f5)\n Act = acting.Act\n Transact = acting.Transact\n need = acting.need\n goal = acting.goal\n deed = acting.deed\n trait = acting.trait\n spec = acting.spec\n fiat = acting.fiat\n a = Act(action=need, act=(need.checkDepth), parms=dict(depth=5.0))\n f2.beacts.append(a)\n a = Act(action=goal, act=(goal.setDepth), parms=dict(depth=2.0))\n f2.enacts.append(a)\n a = Act(action=trait, act=(trait.useDepth), parms=dict(depth=3.0))\n f2.reacts.append(a)\n a = Act(action=deed, act=(deed.doDepth), parms=dict(depth=1.0))\n f2.reacts.append(a)\n a = Act(action=trait, act=(trait.useDepth), parms=dict(depth=6.0))\n f2.exacts.append(a)\n t = Transact()\n a = Act(action=need, act=(need.checkDepth), parms=dict(depth=4.0))\n t.needs.append(a)\n t.far = f5\n f2.preacts.append(t)\n a = Act(action=deed, act=(deed.doDepth), parms=dict(depth=1.0))\n f2.preacts.append(a)\n t = Transact()\n a = Act(action=need, act=(need.checkDepth), parms=dict(depth=1.5))\n t.needs.append(a)\n t.far = f4\n f5.preacts.append(t)\n f6 = Frame()\n a = Act(action=trait, act=(trait.useDepth), parms=dict(depth=10.0))\n f6.reacts.append(a)\n fr1 = Framer()\n fr1.first = f6\n f3.auxes.append(fr1)\n fr2 = Framer()\n fr2.first = f1\n fr2.runner.send(START)\n for i in xrange(3):\n status = fr2.runner.send(RUN)\n\n except excepting.ParameterError as ex:\n console.terse(ex)\n raise\n\n return f1\n\n\ndef Test():\n \"\"\"Module Common self test\n\n \"\"\"\n TestFrame()\n\n\nif __name__ == '__main__':\n Test()","sub_path":"pycfiles/ioflo-py3.6-dev-1.7.5.linux-x86_64.tar/_testFraming.cpython-36.py","file_name":"_testFraming.cpython-36.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"555527107","text":"import re\nfrom ..where_abstract import WhereAbstract\nfrom ...mapping.field import Field\nfrom ...common.common_define import CommonDefine\n\n\nclass Where(WhereAbstract):\n sql = \"\"\n param_dict = {}\n operator_dict = {\n \"eq\": \"=\",\n \"neq\": \"<>\",\n \"gt\": \">\",\n \"ge\": \">=\",\n \"lt\": \"<\",\n \"le\": \"<=\",\n \"in\": \"IN\",\n \"nin\": \"NOT IN\",\n \"like\": \"LIKE\",\n \"fis\": \"FIND_IN_SET\"\n }\n\n def add_and(self, *args):\n args = args + (self,)\n self.sql = self.__get_sql_expression(args, 1)\n return self\n\n def add_or(self, *args):\n args = args + (self,)\n self.sql = self.__get_sql_expression(args, 2)\n return self\n\n def get_and(self, *args):\n result = self.__get_sql_expression(args, 1)\n return result\n\n def get_or(self, *args):\n result = self.__get_sql_expression(args, 2)\n return result\n\n def or_and(self, *args):\n sql = self.__get_sql_expression(args, 1)\n return self.add_or(sql)\n\n def and_or(self, *args):\n sql = self.__get_sql_expression(args, 2)\n return self.add_and(sql)\n\n def __get_sql_expression(self, args, set_type):\n if args is None:\n return \"\"\n\n sql = \"\"\n for item in args:\n temp_sql = \"\"\n if type(item) == str and item != \"\":\n temp_sql = item\n elif isinstance(item, Where):\n temp_sql = item.sql\n self.param_dict = {**self.param_dict, **item.param_dict}\n else:\n expression_dict = self.get_expression(item, self.param_dict)\n temp_sql = expression_dict[\"sql\"]\n self.param_dict = expression_dict[\"param_dict\"]\n\n temp_str = re.sub(r\"\\(.*\\)\", \"\", temp_sql)\n if (\" AND \" in temp_str and set_type == 2) or (\" OR \" in temp_str and set_type == 1):\n temp_sql = \"(\" + temp_sql + \")\"\n\n if not temp_sql:\n continue\n\n if not sql:\n sql = temp_sql\n else:\n if set_type == 1:\n sql += \" AND \" + temp_sql\n else:\n sql += \" OR \" + temp_sql\n return sql\n\n def get_expression(self, condition, param_dict):\n sql = \"\"\n field_name = condition.field_name\n if condition.alias_table_name is not None:\n field_name = condition.alias_table_name + \".\" + field_name\n\n if isinstance(condition.value, Field):\n if condition.value.alias_table_name is not None:\n sql = field_name + self.operator_dict[\n condition.operator] + condition.value.alias_table_name + \".\" + condition.value.field_name\n else:\n sql = field_name + \\\n self.operator_dict[condition.operator] + \\\n condition.value.field_name\n else:\n\n if condition.value is None:\n if condition.operator == CommonDefine.OPERATOR_EQ:\n sql = field_name+\" IS NULL\"\n elif condition.operator == CommonDefine.OPERATOR_NEQ:\n sql = field_name + \" IS NOT NULL\"\n else:\n param_name = \"p\" + str(CommonDefine.SQL_PARAMETER_INDEX)\n CommonDefine.SQL_PARAMETER_INDEX += 1\n if condition.operator == CommonDefine.OPERATOR_IN or condition.operator == CommonDefine.OPERATOR_NIN:\n in_str = \"\"\n temp_value = []\n if isinstance(condition.value, str):\n temp_value = condition.value.split(',')\n if isinstance(temp_value, list) or isinstance(temp_value, tuple):\n in_index = 0\n for value in temp_value:\n if value:\n temp_name = param_name + \"_\" + in_index\n in_str = \":\"+temp_name + \",\"\n param_dict[temp_name] = value\n CommonDefine.SQL_PARAMETER_INDEX += 1\n\n in_str = in_str.strip(',')\n sql = field_name + \" \" + \\\n self.operator_dict[condition.operator] + \\\n \"(:\" + param_name + \")\"\n elif (condition.operator == CommonDefine.OPERATOR_FIND_IN_SET):\n param_dict[param_name] = condition.value\n sql = self.operator_dict[condition.operator] + \\\n \"(:\" + param_name + \",\" + field_name + \")\"\n else:\n param_dict[param_name] = condition.value\n sql = field_name + \" \" + \\\n self.operator_dict[condition.operator] + \\\n \" :\" + param_name\n\n return {\"sql\": sql, \"param_dict\": param_dict}\n","sub_path":"lingorm/drivers/mysql/where.py","file_name":"where.py","file_ext":"py","file_size_in_byte":4929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"344067628","text":"#!/usr/bin/env python3\n\n# Author: wwong3\n# Date: 2019-Jan-31\n# Purpose: 03-python Grad Grid Homework\n\n\n\"\"\"grid\"\"\"\n\nimport os\nimport sys\n\ndef main():\n num = sys.argv[1:]\n \n\n# Error message with usage\n if len(num) == 0:\n print('Usage: {} NUM'.format(os.path.basename(sys.argv[0])))\n sys.exit(1)\n\n# Error message if more than one arguments\n if len(num)>1:\n print('Usage: {} NUM'.format(os.path.basename(sys.argv[0])))\n sys.exit(1) \n\n# First if statement: Error if num is not between 2 and 9\n# Elif statement: If num is between 2 and 9, will display grid \n num=int(num[0]) #changes num from a list to an integer\n if not 2<= num <10:\n print('NUM ({}) must be between 1 and 9'.format(num))\n sys.exit(1)\n elif 1 < num < 10:\n last_num=num+1 # to include last number through indexing\n for j in range (1,last_num): # iterate through cols\n for i in range(1,last_num): # iterate through rows \n print('{:>3d}'.format(i+num*(j-1)),end='')\n # i+num*(j-1) is pattern for grid output\n # don't use {:>2d) with end=' ' > will give you extra space at end of line\n #{:>3d} is string formatting\n # 3=num of character field, d=decimal, > means right-aligned\n print('') #print new line\n \n exit(0)\nmain()\n","sub_path":"assignments/03-python-grad/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"134722792","text":"from bs4 import BeautifulSoup\nimport requests\n\nurl1 = 'https://webcams.nyctmc.org/multiview2.php'\nr1 = requests.get(url1)\nsoup1 = BeautifulSoup(r1.text, 'html.parser')\n\nhtmlTables = [[\"Manhattan\",\"tableCam\"],[\"Brooklyn\",\"tableCam2\"]]\nfor htmlTable in htmlTables:\n cameraList = soup1.find('table', id=htmlTable[1])\n\n cameraIDs = []\n cameraURLs = []\n cameraNames = []\n boroughName = htmlTable[0]\n\n for i in cameraList('tr'):\n idRow = i.find('input')\n try:\n if 'value' in idRow.attrs:\n cameraIDs.append(idRow.get('value'))\n except:\n pass\n\n for cameraID in cameraIDs: \n url2 = 'https://webcams.nyctmc.org/multiview2.php?listcam=' + cameraID\n r2 = requests.get(url2)\n soup2 = BeautifulSoup(r2.text, 'html.parser')\n cameraURL = soup2.find('img', id=\"repCamView__ct0_imgLink\").get('src')\n cameraURLs.append(cameraURL)\n cameraName = soup2.find('td', {\"class\": \"TitleCam\"}).getText()\n cameraNames.append(cameraName)\n\n for i in range(0,len(cameraIDs)):\n print(cameraNames[i] + \",\" + cameraIDs[i] + \",\" + cameraURLs[i] + \",\" + boroughName)\n","sub_path":"scraper/nycdotcams.py","file_name":"nycdotcams.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"233032055","text":"# Создать список и заполнить его элементами различных типов данных.\r\n# Реализовать скрипт проверки типа данных каждого элемента.\r\n# Использовать функцию type() для проверки типа.\r\n# Элементы списка можно не запрашивать у пользователя, а указать явно, в программе.\r\n\r\nmy_list = [\r\n # целое число (int)\r\n 12345,\r\n\r\n # дробное число (float)\r\n 12345.99,\r\n\r\n # строка (str)\r\n '12345',\r\n 'one two three four five',\r\n\r\n # список (list)\r\n [12345, '12345'],\r\n list('12345'),\r\n\r\n # кортеж (tuple)\r\n (12345, '12345'),\r\n tuple('12345'),\r\n\r\n # множество (set и frozenset)\r\n {1, 2, 3},\r\n frozenset({1, 2, 3}),\r\n set('123'),\r\n frozenset('123'),\r\n\r\n # словарь (dict)\r\n {'one': 1, 'two': 2},\r\n dict(one=1, two=2),\r\n\r\n # булеан (bool)\r\n True,\r\n False,\r\n\r\n # байты (bytes и bytearray)\r\n b'text',\r\n bytes(b'text'),\r\n bytearray(b'text'),\r\n\r\n # NoneType\r\n None,\r\n]\r\n\r\nfor el in my_list:\r\n print(f'Элемент = {el}, тип элемента = {type(el)}')\r\n","sub_path":"2. Lesson_2/les02_1.py","file_name":"les02_1.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"330247433","text":"\"\"\"\nYou are given an m x n integer matrix heights representing the height\nof each unit cell in a continent. The Pacific ocean touches the continent's\nleft and top edges, and the Atlantic ocean touches the continent's right and bottom edges.\n\nWater can only flow in four directions: up, down, left, and right. Water\nflows from a cell to an adjacent one with an equal or lower height.\n\nReturn a list of grid coordinates where water can flow to both the\nPacific and Atlantic oceans.\n\nStatus: Incomplete\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def pacificAtlantic(self, heights: List[List[int]]) -> List[List[int]]:\n n_rows, n_cols = len(heights), len(heights[0])\n visit = [[\"unvisited\"] * n_cols for _ in range(n_rows)]\n atlantic = [[False] * n_cols for _ in range(n_rows - 1)] + [[True] * n_cols]\n pacific = [[True] * n_cols] + [[False] * n_cols for _ in range(n_rows - 1)]\n res = []\n\n for row in range(n_rows):\n pacific[row][0] = atlantic[row][-1] = True\n\n print(atlantic)\n print(pacific)\n\n def get_valid_neighbours(row, col):\n res = [(row - 1, col), (row + 1, col), (row, col - 1), (row, col + 1)]\n res = [(row, col) for row, col in res\n if 0 <= row < n_rows and 0 <= col < n_cols] # and visit[row][col] == \"unvisited\"\n return [(r, c) for r, c in res if heights[r][c] <= heights[row][col]]\n\n def dfs(row, col):\n print(row, col, atlantic)\n if visit[row][col] == \"visited\":\n return\n visit[row][col] = \"visiting\"\n if not (atlantic[row][col] and pacific[row][col]):\n neighbours = get_valid_neighbours(row, col)\n print(neighbours)\n for neighbour_row, neighbour_col in neighbours:\n dfs(neighbour_row, neighbour_col)\n if atlantic[neighbour_row][neighbour_col]:\n atlantic[row][col] = True\n if pacific[neighbour_row][neighbour_col]:\n pacific[row][col] = True\n visit[row][col] = \"visited\"\n if atlantic[row][col] and pacific[row][col]:\n res.append([row,col])\n\n for row in range(n_rows):\n for col in range(n_cols):\n if visit[row][col] == \"unvisited\":\n dfs(row, col)\n print(\"---\")\n\n print(atlantic)\n print(pacific)\n\n return res\n\n\ns = Solution()\nprint(s.pacificAtlantic([[1,2,2,3,5],[3,2,3,4,4],[2,4,5,3,1],[6,7,1,4,5],[5,1,1,2,4]]))\n# print(s.pacificAtlantic([[2, 1], [1, 2]]))\n","sub_path":"Pacific Atlantic Water Flow.py","file_name":"Pacific Atlantic Water Flow.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"298355270","text":"import csv\nfrom pygal.maps.world import COUNTRIES, World\nfrom pygal.style import RotateStyle as RS, LightColorizedStyle as LCS\n\nfrom country_codes import get_country_code\n\nfilename='inflation_csv.csv'\nwith open(filename) as f:\n\tdata=csv.reader(f)\n\theader_row=next(data)\n\theader_row=next(data)\n\theader_row=next(data)\n\theader_row=next(data)\n\theader_row=next(data)\n#countries={}\n\tcountries_list={}\n\t\n\tfor countries in data:\n\t\tcountry=countries[0]\n\t\tvalue=countries[51]\n\t\tprint(value)\n\t\tcode=get_country_code(country)\n\t\tif code:\n\t\t\tcountries_list[code]=value\n\n# Group the countries into 3 population levels.\ncc_pops_1, cc_pops_2, cc_pops_3 = {},{},{}\nfor cc, pop in countries_list.items():\n\tif pop < 1:\n\t\tcc_pops_1[cc] = pop\n\telif pop < 10:\n\t\tcc_pops_2[cc] = pop\n\telse:\n\t\tcc_pops_3[cc] = pop\n\t\t\n# Styling world maps in pygal\nwm_style = RS('#108080',base_style=LCS)\nwm= World(style=wm_style)\n\n# See how many countries are in each level.\nprint(len(cc_pops_1), len(cc_pops_2), len(cc_pops_3))\n\n#wm=World()\nwm.title='World GDP in 2016, by Country'\nwm.add('0-10m', cc_pops_1)\nwm.add('10m-1b', cc_pops_2)\nwm.add('>1bn', cc_pops_3)\n\nwm.render_to_file('world_gdp_2016.svg')\n","sub_path":"chapter16/inflation.py","file_name":"inflation.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"403551638","text":"'''PSR Exercise sheet 5'''\nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.optimize as optimize\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom scipy.interpolate import spline\nfrom sklearn.decomposition import PCA\nimport random \n\n#comment\n# known dimensions to load the data\nM = 50\nm = 677970\n\ndef load_data(fname, skipset_columns = {}):\n print (\"Data loading...\")\n # load from folder\n raw_array = np.fromfile(fname, dtype=np.float32)\n D = [[ raw_array[item_idx] for item_idx in range(row_idx*M, (row_idx+1)*M) if item_idx not in {x+row_idx*M for x in skipset_columns}] for row_idx in range(m) if row_idx%100==0]\n print(\"Data loaded.\")\n return D\n\ndef plotPCA2d(data):\n plt.title(\"PCA\")\n plt.xlabel(\"First dim of pca\")\n plt.ylabel(\"Second dim of pca\")\n s = [2 for n in range(len(data))]\n plt.scatter([val[0] for val in data], [ val[1] for val in data], s=s)\n plt.grid(True)\n plt.savefig('pca_result2d.png')\n plt.show()\n\ndef plotPCA3d(data):\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n s = [1 for n in range(len(data))]\n ax.scatter([val[0] for val in data], [ val[1] for val in data], [ val[2] for val in data], s=s)\n plt.grid(True)\n plt.savefig('pca_result3d.png')\n plt.show()\n\ndef GMM(data, numberOfClusters):\n init()\n # expectation()\n # maximization()\n\n # return means, covarianceMatrices, priors\n\ndef random_points_mean(data):\n #choose 5 random values\n #ran_val = random.sample(data, 5)\n indices = [x for x in range(0, len(data))]\n ran_ind = random.sample(indices, 5)\n #print(\"random indices\", ran_ind)\n ran_val = []\n for i in ran_ind:\n ran_val.append(data[i])\n #print(\"random values\", ran_val)\n sum_x = 0\n sum_y = 0\n #print(\"type\", type(ran_val[0][1]))\n for i in range(0, 5):\n sum_x += ran_val[i][0]\n sum_y += ran_val[i][1]\n\n print(ran_val[i][0])\n mean_x = sum_x / 5\n mean_y = sum_y / 5\n mean = [mean_x, mean_y]\n #print(\"our mean\", mean_x, mean_y)\n return mean\n\ndef init(data, numberOfClusters):\n # means of 5 random points\n init_means = []\n\n #calculate means by 5 random points for each potential cluster:\n for i in range(0, numberOfClusters):\n init_means.append(random_points_mean(data))\n print(\"means:\", init_means)\n\n #Initializing the covariance matrices to identity matrices.\n covarianceMatrices = []\n for i in range(0, numberOfClusters):\n covarianceMatrices.append([[1,0],[0,1]])\n #print (first) two initial covariance matrices:\n print(\"first covariance matrix: \", covarianceMatrices[0])\n print(\"second covariance matrix: \", covarianceMatrices[1])\n\n\n priors = []\n\n # init step\n\n mixture_components = [1/numberOfClusters for index in range(numberOfClusters)]\n\n\n#ex5\ndef rmse(vec1, vec2):\n N = len(vec1)\n distance = 0\n if(len(vec1) == len(vec2)):\n for idx in range(N):\n distance+=(vec1[idx]-vec2[idx])*(vec1[idx]-vec2[idx])\n distance/=N\n return math.sqrt(distance)\n\n# ex6\ndef associate(data, initial_means):\n result_vector = []\n clusters_number = len(initial_means)\n if(clusters_number>0):\n for idx, value in enumerate(data):\n nearest_cluster_index = len(initial_means)-1\n max_value = np.inf\n for idx_mean, mean_vector in enumerate(initial_means):\n current_distance = rmse(mean_vector, data[idx] if type(data[idx]) is list else [data[idx]])\n if max_value >= current_distance:\n max_value = current_distance\n nearest_cluster_index = idx_mean\n result_vector.append(nearest_cluster_index)\n\n return result_vector\n\n \ndef add_vec(vec1, vec2):\n if type(vec1) is not list:\n vec1 = [vec1]\n if type(vec2) is not list:\n vec2 = [vec2] \n print (vec1, vec2)\n return [vec2[idx]+vec1[idx] for idx in range(len(vec1))]\n\n#7-8\ndef compute_means(corpus, association, k):\n cluster_centroids = [ [0] for row_idx in range(k)] # k*N matrix, k - number of clusters, N - data dim, e.g. 49\n number_of_data_points_per_cluster = [0 for x in range(k)]\n for idx, current_data_point in enumerate(corpus):\n number_of_data_points_per_cluster[association[idx]]+=1\n cluster_centroids[association[idx]] += current_data_point\n cluster_centroids = [[val/number_of_data_points_per_cluster[j] for val in cluster_centroids[j]] for j in range(k)]\n return cluster_centroids\n\ndef recompute(data_matrix, init_clusters_means):\n print(\"init_clusters_means\", init_clusters_means)\n old_association = associate(data_matrix, init_clusters_means)\n new_means = compute_means(data_matrix, old_association, len(init_clusters_means))\n print(\"new means\", new_means)\n new_association = associate(data_matrix, new_means)\n number_of_point_that_changed_clusters = 0\n for idx in range(len(new_association)):\n if new_association[idx]!=old_association[idx]:\n number_of_point_that_changed_clusters+=1\n return number_of_point_that_changed_clusters, new_means, new_association\n\n\ndef kmeans(data_matrix, init_clusters_means):\n number_of_point_that_changed_clusters, new_means, new_association = recompute(data_matrix, init_clusters_means)\n i = 0\n print(\"Amount of points that changed clusters: \", number_of_point_that_changed_clusters)\n while number_of_point_that_changed_clusters!=0:\n i+=1\n print (\"Iteration - \", i)\n number_of_point_that_changed_clusters, new_means, new_association = recompute(data_matrix, new_means)\n print(\"Amount of points that changed clusters: \", number_of_point_that_changed_clusters)\n return new_means, new_association\n\ndef get_mean_and_centered(X):\n mean_matrix = []\n set_vers_matrix_centered = X\n for index_av in range(len(X[0])):\n mean_matrix.append(np.mean(X[:,index_av])) \n for index_experiment in range(len(X)):\n set_vers_matrix_centered[index_experiment][index_av] = X[index_experiment][index_av]-mean_matrix[index_av]\n return mean_matrix, set_vers_matrix_centered\n\n\ncorpus = load_data(\"corpus\", {0})\nprint(len(corpus))\n# index_min = corpus.index(min(corpus))\n# index_max = corpus.index(max(corpus))\n\npca2 = PCA(n_components=2)\npca_result2 = pca2.fit_transform(corpus)\nprint(\"PCA result: \\n\", pca_result2)\n#print(\"PCA data type \", type(pca_result2))\n\npca3 = PCA(n_components=3)\npca_result3 = pca3.fit_transform(corpus)\nprint(\"PCA result: \\n\", pca_result3)\n# first_dim = [val[0] for val in pca_result]\n# print(\"First dimension of PCA result: \\n\", first_dim)\nplotPCA2d(pca_result2)\nplotPCA3d(pca_result3)\ninit(pca_result2, 2)\n# # trying to get most distinct points as initial values for clusters\n# init_clusters = [[-1.2], [1.0]] \n\n# print(\"Clusters initialized: \", init_clusters)\n# new_means, last_association = kmeans(first_dim, init_clusters)\n# print(\"new_means: \", new_means)\n# for cluster_idx in range(len(init_clusters)):\n# print(\"#12: mean vector \", cluster_idx, \" \\n\", new_means[cluster_idx])\n\n# #11\n# plotting(first_dim, new_means, init_clusters)\n\n# #computing wigths in a way as computing probabilities of point to be in each cluster\n# # for two clusters if K1 - amount of point from cluster1, and K2- cluster2,\n# # so weight(cluster1) = K1/(K1+K2) and weight(cluster2) = K2/(K1+K2),\n# # in common case we have weight(clusterN) = Kn/amount(data_point)\n\n# mean_matrix, centered = get_mean_and_centered(first_dim)\n\n# print('Mean vector: \\n',mean_matrix)\n# print('Centered matrix: \\n',centered)\n# print('Covariance matrix: \\n', np.cov(first_dim.T))\n# cluster_weights = [0 for i in range(len(init_clusters))] # initialize with zero weights\n# for idx in range(len(last_association)):\n# cluster_weights[last_association[idx]]+=1\n# cluster_weights = [value/len(first_dim) for value in cluster_weights]\n# print(\"weights of clusters: \", cluster_weights)\n","sub_path":"ex6/source/PSR6.py","file_name":"PSR6.py","file_ext":"py","file_size_in_byte":7931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"460073198","text":"#pvprograms.weebly.com\n#Sums in Loop\n\ndef main():\n test = int(input()) #number of test cases\n nums = [] #List of lists\n for i in range(test):\n a = input()\n #split the string - separated with space\n a = a.split(' ')\n #turn them to int first before appending to the main lists\n a[0] = int(a[0])\n a[1] = int(a[1])\n #append a to the nums list\n nums.append(a)\n #print the sum of the sub list - a\n for a in nums:\n print(sum(a), end=\" \")\n\n#call main function\nmain()\n","sub_path":"PY/Sums in Loop.py","file_name":"Sums in Loop.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"347693283","text":"import json\nimport boto3\nfrom botocore.exceptions import ClientError\nfrom ex import exceptions\n\ndynamodb = boto3.resource(\"dynamodb\")\ntable = dynamodb.Table(\"DemoServerless\")\n\ndef get(codigo):\n\tprint(codigo)\n\t\n\ttry:\n\t\tresponse = table.get_item(\n\t\t\tKey = {\n\t\t\t\t\"hk\": \"EMPLEADO\",\n\t\t\t\t\"sk\": codigo\n\t\t\t}\n\t\t)\n\t\tprint(response)\n\n\texcept ClientError as e:\n\t\traise exceptions.InternalServerError(e.response['Error']['Message'])\n\n\telse:\n\t\tif \"Item\" not in response:\n\t\t\traise exceptions.NotFound(\"No existe el código '{}'\".format(codigo))\n\n\t\tregistro = response['Item']\n\t\tregistro[\"codigo\"] = registro[\"sk\"]\n\t\tdel registro[\"hk\"]\n\t\tdel registro[\"sk\"]\n\t\tdel registro[\"busqueda\"]\n\t\t\n\t\treturn registro\n","sub_path":"api/get.py","file_name":"get.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"551965830","text":"try:\r\n import argparse\r\nexcept ImportError:\r\n print(\"Please check if module 'argparse' is installed\")\r\n quit()\r\n\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('--tab', type=argparse.FileType('r'), required=True)\r\nparser.add_argument('--kegg', type=str, required=True, help=\"The ID of the analyzed pathway in the KEGG.\\n\"\r\n \"For instance: ko04310 or ko04350\")\r\nparser.add_argument('--out', type=str, required=True, help=\"Prefix for output files\")\r\nargs = parser.parse_args()\r\n\r\n\r\ndef table_parsing(tab, contig_dict):\r\n header = tab.readline()\r\n for line in tab:\r\n description = line.strip().split(\"\\t\")\r\n contig, pathway, sites_significant, sites_head_cluster, sites_tail_cluster, head_significant, head_cluster, \\\r\n tail_significant, tail_cluster = description[0], description[12], description[23], description[24], \\\r\n description[25], description[26], description[27], description[28], \\\r\n description[29]\r\n contig_dict[contig] = {\"pathway\": pathway, \"sites_significant\": sites_significant,\r\n \"sites_head_cluster\": sites_head_cluster, \"sites_tail_cluster\": sites_tail_cluster,\r\n \"head_significant\": head_significant, \"head_cluster\": head_cluster,\r\n \"tail_significant\": tail_significant, \"tail_cluster\": tail_cluster}\r\n\r\n\r\ndef append_contig_to_significant(kegg_dict, contig, values, values_tag, kegg_tag):\r\n if values[values_tag] != '-':\r\n kegg_dict[kegg_tag].append(contig)\r\n\r\n\r\ndef append_contig_to_cluster(kegg_dict, contig, values, values_tag):\r\n if values[values_tag] != '-':\r\n if values[values_tag] not in kegg_dict.keys():\r\n kegg_dict[values[values_tag]] = []\r\n kegg_dict[values[values_tag]].append(contig)\r\n\r\n\r\ndef kegg_summary(contig_dict, kegg, kegg_dict):\r\n for contig, values in contig_dict.items():\r\n if kegg in values[\"pathway\"].split(\",\"):\r\n kegg_dict[\"total\"].append(contig)\r\n append_contig_to_significant(kegg_dict, contig, values, \"sites_significant\", \"sites\")\r\n append_contig_to_significant(kegg_dict, contig, values, \"head_significant\", \"head\")\r\n append_contig_to_significant(kegg_dict, contig, values, \"tail_significant\", \"tail\")\r\n append_contig_to_cluster(kegg_dict, contig, values, \"sites_head_cluster\")\r\n append_contig_to_cluster(kegg_dict, contig, values, \"sites_tail_cluster\")\r\n append_contig_to_cluster(kegg_dict, contig, values, \"head_cluster\")\r\n append_contig_to_cluster(kegg_dict, contig, values, \"tail_cluster\")\r\n\r\n\r\ndef output_writing(out, kegg, kegg_dict):\r\n cluster_keys = [cluster for cluster in kegg_dict.keys() if cluster not in [\"total\", \"sites\", \"head\", \"tail\"]]\r\n\r\n with open(\"{out}.{kegg}_summary.tsv\".format(out=out, kegg=kegg), 'a') as output:\r\n output.write(\"### In total, {count} sequences are assigned to {kegg} pathway\\n\"\r\n \"### Among them:\\n\"\r\n \"### {sites} were previously classified as sites-significant:\\t{sites_contigs}\\n\"\r\n \"### {head} were previously classified as head-significant:\\t{head_contigs}\\n\"\r\n \"### {tail} were previously classified as tail-significant:\\t{tail_contigs}\\n\"\r\n \"Cluster\\tNumber of 'pathway'-contigs in cluster\\tContigs (comma separated)\\n\".format(\r\n count=len(set(kegg_dict[\"total\"])), kegg=kegg,\r\n sites=len(set(kegg_dict[\"sites\"])), sites_contigs=\",\".join(set(kegg_dict[\"sites\"])),\r\n head=len(set(kegg_dict[\"head\"])), head_contigs=\",\".join(set(kegg_dict[\"head\"])),\r\n tail=len(set(kegg_dict[\"tail\"])), tail_contigs=\",\".join(set(kegg_dict[\"tail\"]))\r\n ))\r\n for cluster in cluster_keys:\r\n output.write(\"{cluster}\\t{length}\\t{contigs}\\n\".format(cluster=cluster, length=len(set(kegg_dict[cluster])),\r\n contigs=\",\".join(set(kegg_dict[cluster]))))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n contig_dict, kegg_dict = {}, {\"total\": [], \"sites\": [], \"head\": [], \"tail\": []}\r\n print(\"***** Input table parsing *****\")\r\n table_parsing(args.tab, contig_dict)\r\n print(\"***** Search for sequences related to {kegg} *****\".format(kegg=args.kegg))\r\n kegg_summary(contig_dict, args.kegg, kegg_dict)\r\n print(\"***** Output file writing *****\")\r\n output_writing(args.out, args.kegg, kegg_dict)","sub_path":"Pdum_KEGG_analysis.py","file_name":"Pdum_KEGG_analysis.py","file_ext":"py","file_size_in_byte":4674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"255867402","text":"from aws_resources.dynamo import build_update_expression, build_update_attributes_dictionary, table\nfrom boto3.dynamodb.conditions import Key\nimport uuid\n\ndef resolve_hang(obj, info, id):\n hang = table().query(\n KeyConditionExpression=Key('id').eq(id)\n )['Items'][0]\n return hang\n\ndef resolve_hangs(obj, info):\n ids = obj['hangs']\n return map(lambda id: resolve_hang(obj, info, id), ids)\n\ndef create_hang(obj, info, hang):\n id = str(uuid.uuid4())\n hang['id'] = id\n table().put_item(Item=hang)\n return {\n 'hang': hang,\n 'message': 'success',\n 'code': 200,\n 'success': True\n }\n\ndef update_hang(obj, info, hang):\n attributes_to_update = build_update_attributes_dictionary(hang)\n update_expression = build_update_expression(hang)\n table().update_item(\n Key={\n 'id': hang['id']\n },\n UpdateExpression=update_expression,\n ExpressionAttributeValues=attributes_to_update,\n )\n updated_hang = table().query(\n KeyConditionExpression=Key('id').eq(hang['id'])\n )['Items'][0]\n\n return {\n 'hang': updated_hang,\n 'message': 'success',\n 'code': 200,\n 'success': True\n }","sub_path":"features/Hangs/hang.py","file_name":"hang.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"2681962","text":"REQUEST_HEADERS = {\n 'sec-fetch-dest': 'document',\n 'sec-fetch-mode': 'navigate',\n 'sec-fetch-site': 'cross-site',\n 'sec-fetch-user': '?1',\n 'upgrade-insecure-requests': '1',\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36',\n}\n\nURL = 'https://overclockers.ru/lab?offset=-180&max=200'\n\nMAX_ARTICLES_ON_PAGE = 200\n\nRESULT_FILENAME = 'result.xlsx'\nNEW_SHEET = 'Нова сторінка'\nCOLUMN_SIZE = (150,\n 100,\n 20,\n 20,\n 20)\nTITLE_COLUMNS = ('Посилання',\n 'Назва',\n 'Автор',\n 'Дата створення',\n 'Категорія')\n\nDB_FILENAME = 'db.sqlite'\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"235762876","text":"import sys\nimport tkinter as tk\nimport sqlite3\nfrom pathlib import Path\nfrom random import randrange\nimport time\n\nfrom logging import getLogger, StreamHandler, DEBUG\n\nlogger = getLogger(__name__)\nhandler = StreamHandler()\nhandler.setLevel(DEBUG)\nlogger.setLevel(DEBUG)\nlogger.addHandler(handler)\nlogger.propagate = False\n\nimport sqlite_manager\n\ndatabase_path = str(Path(__file__).parent.parent / Path(\"database/alias_snippets\"))\nsm = sqlite_manager.SqliteManager(db_path=database_path)\n\nclass TkinterManager():\n def __init__(self, title):\n self.title = title\n\n def remove_empty(self, text):\n return text.replace(\" \", \"\").replace(\" \", \"\").replace(\"\\t\", \"\").replace(\"\\'\", \"\\\"\")\n\n def address_specific_letter(self, text):\n return text.replace(\"\\'\", \"\\\"\")\n\n def insert_alias_snippet(self):\n try:\n alias = self.remove_empty(entry1.get())\n snippet = self.address_specific_letter(entry2.get())\n\n query = \"\"\"\n insert into alias_snippets(alias_name,snippet) values('%s','%s')\n \"\"\" % (alias, snippet)\n\n messages = []\n loop_num = 1\n for i in range(loop_num):\n res_query = sm.execute_query(query,loop_num=5,sleep_time=0.2)\n messages.append(str(res_query))\n\n #sm.display_message(message=\"\\n\".join(messages))\n if res_query is None:\n sm.display_message(message=\"Failed\")\n else:\n sm.display_message(message=\"Ok\")\n \n # if res_query is not None:\n # sm.display_message(message=f\"Had success in Registering alias {alias}\")\n # else:\n # sm.display_message(message=f\"Failed to register {alias}\")\n # sm.display_message(message=f\"{res_query}\")\n\n except Exception as e:\n sm.display_message(message=e)\n finally:\n root.destroy()\n\n def register_snippet(self):\n global entry1, entry2, root\n root = tk.Tk()\n root.title(\"Productive Alias-Snippets\")\n\n w = 300\n h = 200\n\n ws = root.winfo_screenwidth()\n hs = root.winfo_screenheight()\n x = (ws / 2) - (w / 2)\n y = (hs / 2) - (h / 2) - 200\n\n root.geometry('%dx%d+%d+%d' % (w, h, x, y))\n\n label1 = tk.Label(root, text=self.title, font=(\"\", 16), height=2)\n label1.pack(fill=\"x\")\n frame1 = tk.Frame(root, pady=10)\n frame1.pack()\n label2 = tk.Label(frame1, font=(\"\", 14), text=\" alias \")\n label2.pack(side=\"left\")\n entry1 = tk.Entry(frame1, font=(\"\", 14), justify=\"center\", width=15)\n entry1.pack(side=\"left\")\n frame2 = tk.Frame(root, pady=10)\n frame2.pack()\n label3 = tk.Label(frame2, font=(\"\", 14), text=\"snippet\")\n label3.pack(side=\"left\")\n entry2 = tk.Entry(frame2, font=(\"\", 14), justify=\"center\", width=15)\n entry2.pack(side=\"left\")\n button4 = tk.Button(root, text=\"Register\", font=(\"\", 16), width=20, bg=\"gray\",\n command=self.insert_alias_snippet)\n button4.pack()\n root.mainloop()\n\n def update_database(self):\n\n try:\n alias = self.remove_empty(entry1.get())\n snippet = self.address_specific_letter(entry2.get())\n\n message = list()\n query = \"\"\"\n update alias_snippets set deleted_at = CURRENT_TIMESTAMP where alias_name = '%s'\n \"\"\" % alias\n res = sm.execute_query(query, is_update=True)\n logger.debug(res)\n\n deleted_alias = alias + str(randrange(9999999999999999))\n query = \"\"\"\n update alias_snippets set alias_name = '%s' where alias_name = '%s'\n \"\"\" % (deleted_alias, alias)\n res = sm.execute_query(query, is_update=True)\n logger.debug(res)\n\n query = \"\"\"\n insert into alias_snippets(alias_name,snippet) values('%s','%s')\n \"\"\" % (alias, snippet)\n\n res = sm.execute_query(query, is_update=False)\n sm.display_message(message=res)\n except Exception as e:\n sm.display_message(message=e)\n finally:\n root.destroy()\n\n def update_snippet(self):\n global entry1, entry2, root\n root = tk.Tk()\n root.title(\"Productive Alias-Snippets\")\n\n w = 300\n h = 200\n\n ws = root.winfo_screenwidth()\n hs = root.winfo_screenheight()\n x = (ws / 2) - (w / 2)\n y = (hs / 2) - (h / 2) - 200\n\n root.geometry('%dx%d+%d+%d' % (w, h, x, y))\n\n label1 = tk.Label(root, text=self.title, font=(\"\", 16), height=2)\n label1.pack(fill=\"x\")\n frame1 = tk.Frame(root, pady=10)\n frame1.pack()\n label2 = tk.Label(frame1, font=(\"\", 14), text=\" alias \")\n label2.pack(side=\"left\")\n entry1 = tk.Entry(frame1, font=(\"\", 14), justify=\"center\", width=15)\n entry1.pack(side=\"left\")\n frame2 = tk.Frame(root, pady=10)\n frame2.pack()\n label3 = tk.Label(frame2, font=(\"\", 14), text=\"snippet\")\n label3.pack(side=\"left\")\n entry2 = tk.Entry(frame2, font=(\"\", 14), justify=\"center\", width=15)\n entry2.pack(side=\"left\")\n button4 = tk.Button(root, text=\"Register\", font=(\"\", 16), width=20, bg=\"gray\",\n command=self.update_database)\n button4.pack()\n root.mainloop()\n","sub_path":"modules/tkinter_manager.py","file_name":"tkinter_manager.py","file_ext":"py","file_size_in_byte":5450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"125490143","text":"from flask import Flask, Blueprint, render_template, request, flash, redirect, url_for\nfrom flask_login import login_required, current_user\nimport os, datetime, logging, json\n\nimport utils\nfrom models import Problem, Submission\nfrom exts import db\n\nfrom judger import judge, manage\n\nlog = logging.getLogger('Judger')\n\nsubmit_page = Blueprint('submit_page',\n\t\t\t\t\t\t__name__,\n\t\t\t\t\t\ttemplate_folder=os.path.join(utils.cur_path(__file__), 'templates'))\n\n@submit_page.route('/submit', methods=['GET','POST'])\n@login_required\ndef submit_handle():\n\tif request.method == 'POST':\n\t\tpid = int(request.form['probID'])\n\t\tlang = request.form['lang']\n\t\tcode = request.form['code']\n\n\t\tprob = Problem.query.get(pid)\n\t\tif prob:\n\t\t\tinfo = json.loads(prob.info)\n\t\t\tnum_td = int(info['td_num'])\n\n\t\t\tdate_time = datetime.datetime.now()\n\t\t\tsub = Submission(result='Wait'\n\t\t\t\t\t, resTime=-1.0, resMem=-1.0\n\t\t\t\t\t, code=code, lang=lang, rank=-1, time=date_time\n\t\t\t\t\t, account=current_user, problem=prob)\n\t\t\tdb.session.add(sub)\n\t\t\tdb.session.commit()\n\n\t\t\tlog.debug('Add problem pid={} subid={}'.format(prob.problem_id, sub.submit_id))\n\n\t\t\tmanage.add_judger(sub.submit_id, prob.problem_id, judge.JUDGE_CPP, code, 3.0, 65536, num_td)\n\n\t\treturn redirect(url_for('submissions_page.submissions_handle'))\n\t# pid\n\tpid = ''\n\tif 'pid' in request.args:\n\t\tpid = request.args['pid']\n\t# not if\n\treturn render_template('submit.html', problem_id=pid)\n","sub_path":"page/submit/submit.py","file_name":"submit.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"594873954","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/taurus/core/tango/util/formatter.py\n# Compiled at: 2019-08-19 15:09:29\n__all__ = [\n 'tangoFormatter']\nfrom taurus.core.units import Quantity\n\ndef tangoFormatter(dtype=None, **kwargs):\n \"\"\"\n The tango formatter callable. Returns a format string based on\n the `format` Tango Attribute configuration (Display.Format in Tango DB)\n\n :param dtype: (type) type of the value object\n :param kwargs: other keyword arguments (ignored)\n\n :return: the string formatting\n \"\"\"\n if dtype is Quantity:\n fmt = '{:~{bc.modelObj.format_spec}}'\n else:\n fmt = '{:{bc.modelObj.format_spec}}'\n return fmt","sub_path":"pycfiles/taurus-4.6.1-py2.7/formatter.py","file_name":"formatter.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"69205884","text":"#!/usr/bin/python\n\nfrom scipy.spatial import KDTree\nimport matplotlib.pyplot as plt\nplt.rc(\"savefig\", dpi=150)\nimport numpy as np\n\npoints = np.array([[1,1],[1,-1],[-1,-1],[2,-2]])\ntree = KDTree(points)\nx = np.linspace(-2.5, 2.5, 100)\ny = np.linspace(-2.5, 2.5, 100)\nxx, yy = np.meshgrid(x, y)\nxy = np.c_[xx.ravel(), yy.ravel()]\nplt.pcolor(x, y, tree.query(xy)[1].reshape(100, 100))\nplt.plot(points[:,0], points[:,1], 'ko')\nplt.xlabel(\"X\")\nplt.ylabel(\"Y\")\n#plt.savefig(\"3b_Voronoi.png\")\n#plt.savefig(\"3b_Voronoi.ps\")\nplt.show()\n","sub_path":"CS6350_Machine-Learning/HW1/images/3a_voronoi.py","file_name":"3a_voronoi.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"423315526","text":"# pylint: disable=arguments-differ, redefined-builtin, missing-docstring, no-member, invalid-name, line-too-long, not-callable\nimport torch\n\nfrom e3nn import rs\nfrom e3nn.non_linearities import GatedBlock\nfrom e3nn.non_linearities.rescaled_act import swish, sigmoid\nfrom e3nn.linear import Linear\n\n\nclass DepthwiseConvolution(torch.nn.Module):\n def __init__(self, Rs_in, Rs_out, Rs_mid1, Rs_mid2, groups, convolution, linear=Linear, scalar_activation=swish, gate_activation=sigmoid):\n super().__init__()\n\n act_in = GatedBlock(groups * Rs_mid1, scalar_activation, gate_activation)\n self.lin_in = linear(Rs_in, act_in.Rs_in)\n self.act_in = act_in\n\n act_mid = GatedBlock(Rs_mid2, scalar_activation, gate_activation)\n self.conv = convolution(Rs_mid1, act_mid.Rs_in)\n self.act_mid = act_mid\n\n act_out = GatedBlock(Rs_out, scalar_activation, gate_activation)\n self.lin_out = linear(groups * Rs_mid2, act_out.Rs_in)\n self.act_out = act_out\n\n self.groups = groups\n\n def forward(self, features, *args, **kwargs):\n \"\"\"\n :param features: tensor [..., point, channel]\n :return: tensor [..., point, channel]\n \"\"\"\n features = self.lin_in(features)\n features = self.act_in(features)\n\n features = self.conv(features, *args, **kwargs, groups=self.groups)\n features = self.act_mid(features.reshape(-1, rs.dim(self.act_mid.Rs_in))).reshape(*features.shape[:-1], -1)\n\n features = self.lin_out(features)\n features = self.act_out(features)\n\n return features\n","sub_path":"e3nn/point/depthwise.py","file_name":"depthwise.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"519476528","text":"# -*- coding: utf-8 -*-\n##\n##\n## This file is part of Indico.\n## Copyright (C) 2002 - 2013 European Organization for Nuclear Research (CERN).\n##\n## Indico is free software; you can redistribute it and/or\n## modify it under the terms of the GNU General Public License as\n## published by the Free Software Foundation; either version 3 of the\n## License, or (at your option) any later version.\n##\n## Indico is distributed in the hope that it will be useful, but\n## WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n## General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License\n## along with Indico;if not, see .\n\nfrom MaKaC.conference import ConferenceHolder\nfrom indico.ext.search.repozer.repozeIndexer import RepozeCatalog\nfrom indico.ext.search.repozer.options import confCatalog, contribCatalog, matCatalog\n\nfrom datetime import datetime\nimport time\nfrom pytz import timezone\nimport MaKaC.common.info as info\n\nfrom repoze.catalog.query import *\n\n\nclass RepozerQueryManager(): \n \n def __init__(self, params):\n self.query = None\n self.params = params\n \n\n\n def getQuery(self): \n if not self.params:\n return \n self.checkParams() \n return self.query\n\n\n def setQuery(self, query):\n self.query = query\n \n\n def addQuery(self, elem):\n if not self.query:\n self.query = elem\n else:\n self.query = self.query & elem\n return\n \n \n def getResults(self, query=None):\n res = []\n params = self.params\n\n if params.get('id',None):\n event = ch.getById(params['id'])\n res.append(event)\n return 1, res\n \n if not query:\n query = self.getQuery() \n \n if not query:\n return 0, []\n \n collections = params.get('collections', 'Conference')\n rc = RepozeCatalog()\n if collections == 'Material':\n rc = RepozeCatalog(matCatalog)\n if collections == 'Contribution':\n rc = RepozeCatalog(contribCatalog)\n \n catalog = rc.catalog \n ch = ConferenceHolder() \n desc = params.get('desc',False) \n sort_field = params.get('sort_field','startDate') \n \n numdocs, results = catalog.query(query, sort_index=sort_field, reverse=desc, limit=params.get('limit',5000)) \n results = [catalog.document_map.address_for_docid(result) for result in results]\n \n if params.get('onlyFids', False):\n return numdocs, results\n else:\n for obj in results:\n try:\n confId = str(obj).split(\"|\")[0]\n event = ch.getById(confId)\n res.append(event)\n except:\n pass \n \n return numdocs, res\n \n \n def checkParams(self): \n params = self.params \n #print params\n if params.has_key('text'):\n text = params.get('text', None)\n \n # Ictp: custom case\n if text.lower().startswith('smr'):\n self.setQuery( Any('keywords', text.replace(\" \", \"\")) )\n return\n \n # WHERE: specify where to search \n where = params.get('where', 'title_description')\n if where == 'title_description':\n self.addQuery( Eq('title', text.decode('utf8')) | Eq('description', text.decode('utf8')) )\n\n if where == 'title':\n self.addQuery( Eq('title', text.decode('utf8')) )\n \n if where == 'roles':\n val = unicode(text, \"UTF-8\").encode('ascii', 'xmlcharrefreplace')\n self.addQuery( Contains('rolesVals', val) )\n \n if where == 'persons':\n self.addQuery( Contains('persons', text.decode('utf8')) )\n\n if where == 'all':\n val = unicode(text, \"UTF-8\").encode('ascii', 'xmlcharrefreplace')\n textDecoded = text.decode('utf8')\n self.addQuery( Eq('description', textDecoded) | Eq('title', textDecoded) | Contains('persons', text) | Contains('rolesVals', val) )\n \n\n # START_DATE, END_DATE, STARTED\n startDate_ts = None\n endDate_ts = None\n datesAvailable = False\n localTimezone = info.HelperMaKaCInfo.getMaKaCInfoInstance().getTimezone()\n \n if params.has_key('start_date'):\n sdate = params['start_date'].split('/')\n if 1:\n #try:\n startDate_ts = timezone(localTimezone).localize(datetime(int(sdate[0]), int(sdate[1]), int(sdate[2]), 0, 0))\n datesAvailable = True\n #except:\n # self.setQuery(None)\n # return\n \n if params.has_key('end_date'):\n edate = params['end_date'].split('/')\n try:\n endDate_ts = timezone(localTimezone).localize(datetime(int(edate[0]), int(edate[1]), int(edate[2]), 23, 59))\n datesAvailable = True\n except:\n self.setQuery(None)\n return \n \n if params.has_key('started'):\n ssdate = params['started'].split('/')\n try:\n started_ts = timezone(localTimezone).localize(datetime(int(ssdate[0]), int(ssdate[1]), int(ssdate[2]), 0, 0))\n self.addQuery( Ge('startDate',started_ts) )\n except:\n self.setQuery(None)\n return\n \n elif params.has_key('today'):\n if params['today'] == '':\n td = time.strftime(\"%Y/%m/%d\").split('/')\n else: \n td = params['today'].split('/')\n try:\n today_ts = timezone(localTimezone).localize(datetime(int(td[0]), int(td[1]), int(td[2]), 23, 59))\n end_today_ts = timezone(localTimezone).localize(datetime(int(td[0]), int(td[1]), int(td[2]), 00, 00))\n except:\n self.setQuery(None)\n return \n self.addQuery( Le('startDate',today_ts) & Ge('endDate',end_today_ts) )\n \n elif params.has_key('todaybeyond'):\n if params['todaybeyond'] == '' or params['todaybeyond'] == '1':\n td = time.strftime(\"%Y/%m/%d\").split('/')\n else: \n td = params['todaybeyond'].split('/')\n try:\n today_ts = timezone(localTimezone).localize(datetime(int(td[0]), int(td[1]), int(td[2]), 23, 59))\n except:\n self.setQuery(None)\n return \n self.addQuery( Le('startDate',today_ts) & Ge('endDate',today_ts) | Ge('startDate',today_ts) ) \n \n elif datesAvailable:\n self.addQuery( Not(Lt('endDate',startDate_ts) | Gt('startDate',endDate_ts)) | (InRange('startDate',startDate_ts, endDate_ts)) ) \n \n if params.has_key('keywords'):\n k = params['keywords']\n if k.find(',') > -1:\n kw = k.split(',')\n else:\n kw = [k] \n self.addQuery( Any('keywords', kw) )\n \n if params.has_key('keywordsAnd'):\n kw = params['keywordsAnd'].split(',')\n self.addQuery( All('keywords', kw) )\n \n if params.has_key('category'):\n kw = params['category'].split(',')\n self.addQuery( Any('category', kw) )\n \n # ICTP SPECIFIC\n if params.has_key('valid_deadline'):\n today = datetime.now()\n self.addQuery( Gt('deadlineDate', today) & NotEq('deadlineDate', datetime.strptime('01/01/1970', '%d/%m/%Y')) )\n\n # ICTP SPECIFIC: do not add Conference with keyword = NOSCIAL \n if params.get('collections', 'Conference') == 'Conference': \n self.addQuery( Not(Any('keywords', 'NOSCICAL')) ) \n\n return \n \n \n\n\n \n ","sub_path":"repozerQueryManager.py","file_name":"repozerQueryManager.py","file_ext":"py","file_size_in_byte":8523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"637199590","text":"import yaml\nimport json\nimport paramiko\nimport requests\nimport ssl\nimport socket\nimport hashlib\nimport os\nimport pickle\nimport time\nfrom pprint import pprint\nfrom bravado.client import SwaggerClient\nfrom bravado.requests_client import RequestsClient\n\nPYNSXTOBJFILE = '.pynsxt'\nSPEC_PATH = \"/tmp/nsx_api.json\"\n\n\ndef load_configfile(args):\n with open(args.config_file, 'r') as f:\n config = yaml.load(f)\n return config\n\n\ndef connect_cli(config):\n if config.has_key('cli'):\n return config['cli']\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(config['ip'], username=config['user'],\n password=config['password'], port=22, timeout=15.0, look_for_keys=False)\n config['cli'] = ssh\n return ssh\n\n\ndef exec_command(cli, cmd, display=False):\n output = \"\"\n if display:\n print(\"# %s\" % cmd)\n stdin, stdout, stderr = cli.exec_command(cmd)\n for line in stdout:\n output += line\n if display:\n print(output)\n return output\n\n\ndef load_spec(manager):\n raw_spec = requests.get(\"https://%s/api/v1/spec/openapi/nsx_api.json\" %\n manager['ip'], auth=(manager['user'], manager['password']), verify=False).json()\n\n\ndef api_request(args, method, uri, data=\"\"):\n config = load_configfile(args)\n uri = \"https://%s/\" % config['nsxManager']['ip'] + uri\n # headers = {'Content-Type': 'application/json'}\n headers = {'Content-Type': 'application/json',\n 'Accept': 'application/json'}\n auth = (config['nsxManager']['user'], config['nsxManager']['password'])\n if method == 'get':\n res = requests.get(uri, auth=auth, headers=headers, verify=False)\n elif method == 'post':\n res = requests.post(uri, auth=auth, headers=headers,\n data=data, verify=False)\n elif method == 'delete':\n res = requests.delete(uri, auth=auth, headers=headers,\n data=data, verify=False)\n return (res.status_code, res.json())\n\n\ndef get_api_client(config, validation=False):\n if config.has_key('client'):\n return config['client']\n raw_spec = json.load(open(SPEC_PATH))\n raw_spec['host'] = config['nsxManager']['ip']\n http_client = RequestsClient()\n http_client.session.verify = False\n http_client.set_basic_auth(\n config['nsxManager']['ip'], config['nsxManager']['user'], config['nsxManager']['password'])\n config = {\n 'also_return_response': True,\n 'validate_swagger_spec': validation,\n 'validate_responses': False,\n 'validate_requests': False,\n 'use_models': False\n }\n client = SwaggerClient.from_spec(\n raw_spec, http_client=http_client, config=config)\n config['client'] = client\n return client\n\n\ndef get_thumbprint(ip):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(1)\n wrappedSocket = ssl.wrap_socket(sock)\n\n try:\n wrappedSocket.connect((ip, 443))\n except:\n response = False\n else:\n der_cert_bin = wrappedSocket.getpeercert(True)\n pem_cert = ssl.DER_cert_to_PEM_cert(wrappedSocket.getpeercert(True))\n # Thumbprint\n thumb_sha256 = hashlib.sha256(der_cert_bin).hexdigest()\n wrappedSocket.close()\n return ':'.join(map(''.join, zip(*[iter(thumb_sha256)] * 2)))\n\n\ndef convert_to_dict(model):\n try:\n model = model.__dict__['_Model__dict']\n for k, v in model.items():\n model[k] = convert_to_dict(v)\n except AttributeError:\n if isinstance(model, dict):\n for k, v in model.items():\n model[k] = convert_to_dict(v)\n if isinstance(model, list):\n for i, v in enumerate(model):\n model[i] = convert_to_dict(v)\n return model\n\n\ndef main():\n args = get_args()\n if args.debug:\n basicConfig(level=DEBUG)\n else:\n basicConfig(level=INFO)\n handler = StreamHandler()\n logger.addHandler(handler)\n args.func(args)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"pynsxt/pynsxt_utils.py","file_name":"pynsxt_utils.py","file_ext":"py","file_size_in_byte":4083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"527033444","text":"import boto3\nimport datetime\nimport gzip\n\nfrom src.data_models.SmartsAlertsDataModel import SmartsAlertsDataModel\nfrom src.utility.Configuration import Configuration\n\n\ndef update_alerts_to_s3(evaluation_date):\n alerts = SmartsAlertsDataModel().initialize(evaluation_date=evaluation_date).evaluate()\n alerts_compress_str = alerts.to_csv(compression='gzip', index=False)\n alerts_gzip_file = gzip.compress(bytes(alerts_compress_str, 'utf-8'))\n\n config = Configuration().get()['aws_s3']\n access_key_id = [key_id['access_key_id'] for key_id in config if list(key_id.keys())[0] == 'access_key_id'][0]\n secret_access_key = [secret_key['secret_access_key'] for secret_key in config if list(secret_key.keys())[0] == 'secret_access_key'][0]\n bucket = [bucket['bucket_name'] for bucket in config if list(bucket.keys())[0] == 'bucket_name'][0]\n\n session = boto3.Session(aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key)\n s3 = session.client('s3')\n\n s3.put_object(Body=alerts_gzip_file, Key='smarts_alerts_{}.csv.gz'.format(evaluation_date), Bucket=bucket)\n\n\nif __name__ == '__main__':\n update_alerts_to_s3(datetime.date.today() - datetime.timedelta(days=1))\n","sub_path":"src/aws_s3/UpdateAlertsToS3.py","file_name":"UpdateAlertsToS3.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"647301416","text":"#!/usr/bin/env python3\n\nimport json\nimport argparse\nimport http.client\n\nfrom homekit import find_device_ip_and_port, HapStatusCodes\n\n\ndef setup_args_parser():\n parser = argparse.ArgumentParser(description='HomeKit identify app - performs identify on given HomeKit device')\n parser.add_argument('-d', action='store', required=True, dest='device')\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = setup_args_parser()\n\n connection_data = find_device_ip_and_port(args.device)\n\n conn = http.client.HTTPConnection(connection_data['ip'], port=connection_data['port'])\n\n conn.request('POST', '/identify')\n\n resp = conn.getresponse()\n if resp.code == 400:\n data = json.loads(resp.read().decode())\n code = data['status']\n print('identify failed because: {reason} ({code}). Is it paired?'.format(reason=HapStatusCodes[code], code=code))\n elif resp.code == 200:\n print('identify succeeded.')\n conn.close()\n","sub_path":"homekit/identify.py","file_name":"identify.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"397917439","text":"from typing import Optional\nfrom termcolor import cprint\nimport sys\n\nBLACKJACK = 21\n_LINE_WIDTH = 60\n_CTRL_C = \"\\x03\"\n_LOG_COLOR = \"green\"\n\nif sys.platform == \"win32\":\n import msvcrt\n\n getch = lambda: msvcrt.getch()\n\nelse:\n import tty, termios\n\n def _getch():\n fd = sys.stdin.fileno()\n original_attributes = termios.tcgetattr(fd)\n try:\n tty.setraw(sys.stdin.fileno())\n ch = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, original_attributes)\n if ch == _CTRL_C:\n sys.tracebacklimit = 0\n raise KeyboardInterrupt\n return ch\n\n getch = _getch\n\n\ndef choose(action_1: str, action_2: str, other: Optional[str] = None):\n def choose_two_or(action_1: str, action_2: str, other: str):\n try:\n return [action_1, action_2][int(getch()) - 1]\n except ValueError:\n return other\n\n def choose_two(action_1: str, action_2: str):\n while True:\n result = choose_two_or(action_1, action_2, \"retry\")\n if result != \"retry\":\n return result\n print(\"잘못 입력하셨습니다. 1과 2 중 하나를 선택해주십시오.\")\n\n print(f\"1: {action_1}, 2: {action_2}\")\n if other:\n print(f\"다른 키: {other}\")\n return choose_two_or(action_1, action_2, other)\n\n return choose_two(action_1, action_2)\n\n\ndef log(line: str, *lines: str):\n cprint(\"=\" * _LINE_WIDTH, _LOG_COLOR)\n\n for line in [line, *lines]:\n cprint(f\"{line:>30}\", _LOG_COLOR)\n\n cprint(\"=\" * _LINE_WIDTH, _LOG_COLOR)\n\n\ndef how_to_play():\n log(\n \"블랙잭은 21에 가까운 수를 만들면 이기는 게임입니다.\",\n \"J, Q, K는 10으로, A는 1과 11 어느쪽으로든 계산할 수 있습니다.\",\n \"시작하며 카드 두장을 기본으로 지급받습니다.\",\n \"카드를 더 뽑으면 Hit, 뽑지 않고 차례를 마치면 Stay.\",\n \"숫자의 합이 21을 넘어가면 Bust로 즉시 패배합니다.\",\n \"플레이어의 차례가 끝나면 상대의 차례입니다.\",\n \"딜러는 숫자의 합이 17 이상이 될때까지 무조건 히트를 합니다.\",\n \"상대보다 합이 높거나, 상대가 Bust되면 플레이어의 승리입니다.\",\n )\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"307631411","text":"# two-sum 다른 해결 법\n\n\nclass Solution:\n # 1. Brute Force : 가장 느림. O(n^2)\n def brute_force(self, nums, target: int):\n for i in range(len(nums)):\n for j in range(i + 1, len(nums)):\n if nums[i] + nums[j] == target:\n return [i, j]\n\n # 2. in => 내가 사용한 방법 : O(n^2)으로 시간복잡도는 같지만 in이 더 빠름\n def using_in(self, nums, target: int):\n for i, n in enumerate(nums):\n complement = target - n\n if complement in nums[i + 1]:\n return [i, nums[i + 1 :].index(complement) + (i + 1)]\n\n # 3. 첫번째 수를 뺀 결과 키 조회 : 평균 O(1), 최악 O(n)\n def find_key_except_first_num_1(self, nums, target: int):\n # key <> value -> dict\n nums_map = {}\n for i, num in enumerate(nums):\n nums_map[num] = i\n\n # 타겟에서 첫번째 수를 뺀 결과를 키로 조회\n for i, num in enumerate(nums):\n if target - num in nums_map and i != nums_map[target - num]:\n return [i, nums_map[target - num]]\n\n # 4. 3 구조 개선 : 성능의 차이는 없지만 코드가 간결해짐\n def find_key_except_first_num_2(self, nums, target: int):\n nums_map = {}\n for i, num in enumerate(nums):\n if target - num in nums_map:\n return [nums_map[target - num], i]\n nums_map[num] = i\n\n # 만약 정렬된 리스트라면 투 포인터 방식을 이용해도 됨\n # 합이 타겟보다 작으면 왼쪽 포인터를 오른쪽으로\n # 합이 타겟보다 크면 오른쪽 포인터를 왼쪽으로\n # 합이 타겟과 같으면 return\n","sub_path":"python-algorithm-interview/3_linear_data_structures/07_array/7-1.py","file_name":"7-1.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"498240693","text":"\"\"\"\nThis program is beginning of the piping GUI, it creates 2 Axis frames, a Scan Frame, and a Fault Frame\n\nIt communicates with the Aerotech Controller and sets up a queue thread for Control Commands Enable and Jog only\n\nCurrently the program is able to enable axes and jog them *NOTE PROGRAM IS IN MM, PARAMETER FILE MAY BE IN INCHES*\n\nNo Fault conditions are handled, be cautious while running\n\nAbility to read in text file to set to last used tool configuration\n\nScan button logic complete - No scan commands\n\nUpdated to allow for Jog, GOTO, Move Inc, Set To\n\"\"\"\n\nfrom tkinter import *\nfrom tkinter import messagebox\nimport serial\nimport threading\nimport time\nimport queue\n\n\n# This class sets Frame parameters for the Circ Axis\nclass SetupAxis1Frame:\n def __init__(self):\n self.axisName = \"CIRC\"\n self.axisUnits = \"mm\"\n self.jogText1 = \"CCW\"\n self.jogText2 = \"CW\"\n self.speedMin = 0.1\n self.speedMax = 25.0\n self.speedRes = .5\n self.queue_name = \"CTRL\"\n\n\n# This class sets Frame parameters for the Trans Axis\nclass SetupAxis2Frame:\n def __init__(self):\n self.axisName = \"TRANSLATOR\"\n self.axisUnits = \"mm\"\n self.jogText1 = \"IN\"\n self.jogText2 = \"OUT\"\n self.speedMin = 0.1\n self.speedMax = 50.0\n self.speedRes = .5\n self.queue_name = \"CTRL\"\n\n\n# This class places a button on the top of the GUI to select between LAPIS and NOVA\nclass ToolFrame:\n def __init__(self, master, params):\n self.master = master\n self.tool = params\n topFrame = Frame(master, relief=SUNKEN, border=2)\n topFrame.pack(fill=X, padx=10, pady=10)\n self.toolButton = Button(topFrame, text=\"NOVA\", fg=\"Black\", bg=\"Sky Blue\", font=(\"Helvetica\", 14),\n command=lambda: self.toggle_tool())\n\n # Read in first parameter from TIMC setup file list to determine last configuration\n if self.tool[0] == \"LPS-1000\\n\":\n self.toolButton.config(text=\"LPS-1000\", fg=\"Black\", bg=\"Goldenrod\")\n elif self.tool[0] == \"NOVA\\n\":\n self.toolButton.config(text=\"NOVA\", fg=\"Black\", bg=\"Sky Blue\")\n self.toolButton.pack(fill=X)\n\n # Toggle tool between LAPIS and NOVA, overwrite setup file list\n def toggle_tool(self):\n if self.toolButton[\"text\"] == \"NOVA\":\n self.toolButton.config(text=\"LPS-1000\", fg=\"Black\", bg=\"Goldenrod\")\n self.tool[0] = \"LPS-1000\\n\"\n TIMC.params = self.tool\n else:\n self.toolButton.config(text=\"NOVA\", fg=\"Black\", bg=\"Sky Blue\")\n self.tool[0] = \"NOVA\\n\"\n TIMC.params = self.tool\n\n\n# This class pulls parameters from the specific axis and puts them in the GUI\nclass AxisFrame:\n def __init__(self, master, parameters):\n self.axisName = parameters.axisName\n self.axisUnits = parameters.axisUnits\n self.jogText1 = parameters.jogText1\n self.jogText2 = parameters.jogText2\n self.speedMin = parameters.speedMin\n self.speedMax = parameters.speedMax\n self.speedRes = parameters.speedRes\n self.queue = parameters.queue_name\n self.current_limit = 5 # (A) pull this in eventually\n self.pos_err_limit = 1.5 # (mm)\n\n self.state = 0 # Flag for Enabled/Disabled Axis\n\n self.frame = Frame(master, relief=SUNKEN, border=2)\n self.frame.pack(fill=X, padx=10, pady=5)\n\n self.position = float(0)\n self.current = float(0)\n self.velocity = float(0)\n self.setToText = StringVar(master, value=\"0\")\n self.GoToText = StringVar(master, value=\"0\")\n self.moveIncText = StringVar(master, value=\"0\")\n\n # Create Widgets\n # Frames\n self.pos_frame = Frame(self.frame, bg=\"White\", relief=SUNKEN, border=2)\n self.button_frame = Frame(self.frame)\n self.error_frame = Frame(self.frame)\n self.pos_err_graph = Canvas(self.error_frame, bg=\"white\", height=100, width=20)\n self.current_graph = Canvas(self.error_frame, bg=\"white\", height=100, width=20)\n\n # Labels\n self.label_0 = Label(self.frame, text=self.axisName, font=\"Helvetica, 14 bold\")\n self.label_1 = Label(self.pos_frame, text=self.axisUnits, fg=\"Gray\", bg=\"White\", font=\"Helvetica, 20 bold\")\n self.label_2 = Label(self.pos_frame, text=\"Velocity\", font=(\"Helvetica\", 8), bg=\"White\")\n self.label_3 = Label(self.pos_frame, text=(self.axisUnits + \"/s\"), font=(\"Helvetica\", 8), bg=\"White\")\n self.label_4 = Label(self.button_frame, text=(\"Speed (\" + self.axisUnits + \"/s)\"), font=(\"Helvetica\", 10))\n self.label_5 = Label(self.error_frame, text=\"Pos Err\", font=(\"Helvetica\", 8))\n self.label_6 = Label(self.error_frame, text=\"Current\", font=(\"Helvetica\", 8))\n\n # Buttons\n self.enableButton = Button(self.frame, text=\"OFF\", fg=\"Red\", bg=\"Light Grey\", height=1, width=8,\n command=lambda: self.toggle_axis(), font=\"Helvetica, 12 bold\")\n self.setToButton = Button(self.button_frame, text=\"SET TO:\", fg=\"Gray\", bg=\"Light Grey\", height=1, width=10,\n state=\"disabled\", command=lambda: self.setTo(1))\n self.GoToButton = Button(self.button_frame, text=\"GO TO:\", fg=\"Gray\", bg=\"Light Grey\", height=1, width=10,\n state=\"disabled\",\n command=lambda: self.GoTo(1))\n self.moveIncButton = Button(self.button_frame, text=\"MOVE INC:\", fg=\"Gray\", bg=\"Light Grey\", height=1, width=10,\n state=\"disabled\", command=lambda: self.moveInc())\n self.setToZero = Button(self.button_frame, text=\"Set To 0\", fg=\"Gray\", bg=\"Light Grey\", height=1,\n state=\"disabled\", command=lambda: self.setTo(0))\n self.GoToZero = Button(self.button_frame, text=\"Go To 0\", fg=\"Gray\", bg=\"Light Grey\", height=1,\n state=\"disabled\", command=lambda: self.GoTo(0))\n self.jogButtonFWD = Button(self.frame, text=\"Jog \" + self.jogText1, fg=\"Gray\", bg=\"Light Grey\", height=1, width=10,\n font=\"Helvetica, 12 bold\", state=\"disabled\")\n self.jogButtonREV = Button(self.frame, text=\"Jog \" + self.jogText2, fg=\"Gray\", bg=\"Light Grey\", height=1, width=10,\n font=\"Helvetica, 12 bold\", state=\"disabled\")\n\n # Feedback Labels and Entry Boxes\n self.position_box = Label(self.pos_frame, text=(\"%.2f\" % self.position), fg=\"Gray\", bg=\"White\", width=6, anchor=E, font=\"Helvetica, 26 bold\")\n self.velocity_box = Label(self.pos_frame, text=(\"%.1f\" % self.velocity), bg=\"White\", width=8, state=\"disabled\", font=(\"Helvetica\", 8))\n self.setToEntry = Entry(self.button_frame, textvariable=self.setToText, width=10, font=(\"Helvetica\", 10), justify=\"center\")\n self.GoToEntry = Entry(self.button_frame, textvariable=self.GoToText, width=10, font=(\"Helvetica\", 10), justify=\"center\")\n self.moveIncEntry = Entry(self.button_frame, textvariable=self.moveIncText, width=10, font=(\"Helvetica\", 10),\n justify=\"center\")\n\n # Velocity Scale\n self.vel = Scale(self.frame, from_=self.speedMin, to=self.speedMax, orient=HORIZONTAL, length=200, resolution=self.speedRes, troughcolor=\"White\")\n self.vel.set((self.speedMax - self.speedMin) * 0.5)\n\n # Jog Button Actions\n self.jogButtonFWD.bind('', lambda event: self.jogFWD())\n self.jogButtonFWD.bind('', lambda event: self.stopjog())\n self.jogButtonREV.bind('', lambda event: self.jogREV())\n self.jogButtonREV.bind('', lambda event: self.stopjog())\n\n # Grid Widgets\n self.label_0.grid(column=0, row=0, columnspan=2, pady=5, sticky=W)\n self.pos_frame.grid(column=0, row=1, rowspan=3, padx=5, sticky=N)\n self.position_box.grid(column=0, row=0, columnspan=2, pady=5)\n self.label_1.grid(column=2, row=0, padx=2, pady=5, sticky=W) # Units\n self.label_2.grid(column=0, row=1, pady=2, sticky=E) # Velocity\n self.velocity_box.grid(column=1, row=1)\n self.label_3.grid(column=2, row=1, pady=2, sticky=W) # Units/s\n self.enableButton.grid(column=0, row=4, rowspan=1, padx=5, sticky=N)\n\n self.button_frame.grid(column=1, row=0, rowspan=3, columnspan=3)\n self.setToButton.grid(column=0, row=0, padx=10, sticky=S)\n self.setToEntry.grid(column=0, row=1, sticky=N)\n self.setToZero.grid(column=0, row=2, pady=5, sticky=N)\n self.moveIncButton.grid(column=1, row=0, padx=10, sticky=S)\n self.moveIncEntry.grid(column=1, row=1, sticky=N)\n self.label_4.grid(column=1, row=2, sticky=S) # Units/s\n self.GoToButton.grid(column=2, row=0, padx=10, sticky=S)\n self.GoToEntry.grid(column=2, row=1, sticky=N)\n self.GoToZero.grid(column=2, row=2, pady=5, sticky=N)\n\n self.vel.grid(column=1, row=2, rowspan=2, columnspan=3, sticky=S)\n self.vel.lower()\n self.jogButtonFWD.grid(column=1, row=4, rowspan=2, padx=10, pady=5, sticky=SW)\n self.jogButtonREV.grid(column=3, row=4, rowspan=2, padx=10, pady=5, sticky=SE)\n\n self.error_frame.grid(column=4, row=0, rowspan=5, padx=10)\n self.pos_err_graph.grid(column=0, row=0)\n self.current_graph.grid(column=1, row=0)\n self.label_5.grid(column=0, row=1, sticky=S) # Current\n self.label_6.grid(column=1, row=1, sticky=S) # Pos Err\n self.pos_err_rect = self.pos_err_graph.create_rectangle(2, 98, 21, 100, fill=\"red\", outline=\"red\")\n self.current_rect = self.current_graph.create_rectangle(2, 98, 21, 100, fill=\"red\", outline=\"red\")\n\n # This function toggles the button between OFF and ON\n def toggle_axis(self):\n if self.state == 0:\n self.enable_axis()\n else:\n self.disable_axis()\n\n # This function enables the axis\n def enable_axis(self):\n self.activate_axis_btns()\n self.enableButton.config(text=\"ON\")\n if TIMC.online == 1:\n TIMC.acmd(self.queue, \"ENABLE \" + self.axisName) # Aerotech command to enable axis\n\n if TIMC.axis1.state & TIMC.axis2.state == 1:\n TIMC.scan.activate_scan_btns() # Once both axes are enabled, scan button is active\n\n if TIMC.scan.scan_state == 1:\n TIMC.axis1.enableButton.config(state=\"disabled\") # If scan is active, disable axis 1 enable button\n TIMC.axis2.enableButton.config(state=\"disabled\") # If scan is active, disable axis 2 enable button\n TIMC.scan.start.config(state=\"disabled\") # If scan is active, disable start scan button\n TIMC.scan.stop.config(state=\"normal\", fg=\"Black\", bg=\"Indian Red\")\n TIMC.scan.resume.config(state=\"normal\", fg=\"Black\", bg=\"Dodger Blue\")\n\n # This function disables the axis\n def disable_axis(self):\n self.deactivate_axis_btns()\n self.enableButton.config(text=\"OFF\", fg=\"Red\", bg=\"Light Grey\")\n TIMC.scan.deactivate_scan_btns()\n if TIMC.online == 1:\n TIMC.acmd(self.queue, \"DISABLE \" + self.axisName) # Aerotech command to disable axis\n\n def activate_axis_btns(self):\n self.state = 1\n self.enableButton.config(state=\"normal\", fg=\"Black\", bg=\"Lawn Green\")\n self.setToButton.config(state=\"normal\", fg=\"Black\", bg=\"Lawn Green\")\n self.GoToButton.config(state=\"normal\", fg=\"Black\", bg=\"Lawn Green\")\n self.moveIncButton.config(state=\"normal\", fg=\"Black\", bg=\"Lawn Green\")\n self.setToZero.config(state=\"normal\", fg=\"Black\", bg=\"Lawn Green\")\n self.GoToZero.config(state=\"normal\", fg=\"Black\", bg=\"Lawn Green\")\n self.jogButtonFWD.config(state=\"normal\", fg=\"Black\", bg=\"SteelBlue2\")\n self.jogButtonREV.config(state=\"normal\", fg=\"Black\", bg=\"SteelBlue2\")\n self.position_box.config(fg=\"Black\")\n self.label_1.config(fg=\"Black\")\n self.velocity_box.config(state=\"normal\")\n\n def deactivate_axis_btns(self):\n self.state = 0\n self.setToButton.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Grey\")\n self.GoToButton.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Grey\")\n self.moveIncButton.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Grey\")\n self.setToZero.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Grey\")\n self.GoToZero.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Grey\")\n self.jogButtonFWD.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Grey\")\n self.jogButtonREV.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Grey\")\n self.position_box.config(fg=\"Gray\")\n self.label_1.config(fg=\"Gray\")\n self.velocity_box.config(state=\"disabled\")\n\n # This function starts Jogging in the FORWARD Direction\n def jogFWD(self):\n if TIMC.online == 1:\n TIMC.acmd(self.queue, \"ABORT \" + self.axisName)\n speed = str(self.vel.get())\n TIMC.acmd(self.queue, \"FREERUN \" + self.axisName + \" \" + speed)\n\n # This function starts Jogging in the REVERSE Direction\n def jogREV(self):\n if TIMC.online == 1:\n TIMC.acmd(self.queue, \"ABORT \" + self.axisName)\n speed = str(-1 * self.vel.get())\n TIMC.acmd(self.queue, \"FREERUN \" + self.axisName + \" \" + speed)\n\n # This function stops Jogging\n def stopjog(self):\n if TIMC.online == 1:\n TIMC.acmd(self.queue, \"FREERUN \" + self.axisName + \" 0\")\n\n # This function sets the position in the position label based on Set To Entry Box\n def setTo(self, zero):\n if zero == 0:\n position = \"0\"\n else:\n position = str(self.setToEntry.get())\n if checkIsDigit(position):\n if TIMC.online == 1:\n TIMC.acmd(self.queue, \"POSOFFSET SET \" + self.axisName + \", \" + position)\n else:\n self.position = float(position)\n self.position_box.config(text=(\"%.2f\" % self.position))\n\n def GoTo(self, zero):\n if zero == 0:\n position = \"0\"\n else:\n position = str(self.GoToEntry.get())\n if checkIsDigit(position):\n if TIMC.online == 1:\n speed = str(self.vel.get())\n TIMC.acmd(self.queue, \"MOVEABS \" + self.axisName + \" \" + position + \" F \" + speed)\n else:\n self.position = float(position)\n self.position_box.config(text=(\"%.2f\" % self.position))\n\n def moveInc(self):\n distance = self.moveIncEntry.get()\n if checkIsDigit(distance):\n if TIMC.online == 1:\n speed = str(self.vel.get())\n TIMC.acmd(self.queue, \"MOVEINC \" + self.axisName + \" \" + distance + \" F \" + speed)\n else:\n self.position = self.position + float(distance)\n self.position_box.config(text=(\"%.2f\" % self.position))\n\n def updateCurrent(self, cur):\n # Max Error for x1 = 401+61 = 462\n cur = abs((cur/self.current_limit)*100)\n # Delete the old representation of current\n self.current_graph.delete(self.current_rect)\n # Draw the new position error box\n self.current_rect = self.current_graph.create_rectangle(2, 98-cur, 21, 100, fill=\"red\", outline=\"red\")\n\n def updatePosErr(self, pos_err):\n # Max Error for x1 = 401+61 = 462\n pos_err = abs((pos_err/self.pos_err_limit)*100)\n # Delete the old representation of current\n self.pos_err_graph.delete(self.pos_err_rect)\n # Draw the new position error box\n self.pos_err_rect = self.pos_err_graph.create_rectangle(2, 98-pos_err, 21, 100, fill=\"red\", outline=\"red\")\n\n\n# This class creates a Scan Window in the GUI\nclass ScanFrame:\n def __init__(self, master, parameters1, parameters2):\n self.master = master\n self.axis1 = parameters1\n self.axis2 = parameters2\n self.scanConfig = IntVar()\n self.scanType = IntVar()\n self.scan_setup = [\"0.0\", \"20.0\", \"0.0\", \"10.0\", \"1.0\"]\n self.scan_state = 0\n\n scan_frame = Frame(master, relief=SUNKEN, border=2)\n left_frame = Frame(scan_frame)\n right_frame = Frame(scan_frame)\n bottom_frame = Frame(master, relief=RAISED, border=2)\n scan_frame.pack(fill=X, padx=10)\n\n left_frame.grid(column=0, row=0, sticky=W)\n right_frame.grid(column=1, row=0, padx=20)\n bottom_frame.pack(fill=X, padx=10)\n\n # Labels and Axis Names\n self.label_0 = Label(left_frame, text=\"SCAN WINDOW\", font=(\"Helvetica\", 14))\n self.axis_label_1 = Label(left_frame, text=self.axis1.axisName, font=(\"Helvetica\", 12))\n self.axis_label_2 = Label(left_frame, text=self.axis2.axisName, font=(\"Helvetica\", 12))\n\n # Start, Stop, Pause, and Resume Buttons\n self.start = Button(bottom_frame, text=\"START\", font=\"Helvetica, 12 bold\", fg=\"Gray\", bg=\"Light Green\", height=2,\n width=10, command=lambda: self.start_scan(), state=\"disabled\")\n self.stop = Button(bottom_frame, text=\"STOP\", font=\"Helvetica, 12 bold\", fg=\"Gray\", bg=\"Light Coral\", height=2,\n width=10, command=lambda: self.stop_scan(), state=\"disabled\")\n self.pause = Button(bottom_frame, text=\"PAUSE\", font=\"Helvetica, 10 bold\", fg=\"Gray\", bg=\"Light Yellow\", height=1, width=10,\n command=lambda: self.pause_scan(), state=\"disabled\")\n self.resume = Button(bottom_frame, text=\"RESUME\", font=\"Helvetica, 10 bold\", fg=\"Gray\", bg=\"Light Blue\", height=1, width=10,\n command=lambda: self.resume_scan(), state=\"disabled\")\n\n # Speed slider bars\n self.vel1 = Scale(left_frame, from_=self.axis1.speedMin, to=self.axis1.speedMax, orient=HORIZONTAL, length=100,\n label=\"Speed \" + self.axis1.axisUnits + \"/sec\", font=(\"Helvetica\", 10),\n resolution=self.axis1.speedRes)\n self.vel1.set((self.axis1.speedMax - self.axis1.speedMin) * 0.5)\n self.vel2 = Scale(left_frame, from_=self.axis2.speedMin, to=self.axis2.speedMax, orient=HORIZONTAL, length=100,\n label=\"Speed \" + self.axis2.axisUnits + \"/sec\", font=(\"Helvetica\", 10),\n resolution=self.axis2.speedRes)\n self.vel2.set((self.axis2.speedMax - self.axis2.speedMin) * 0.5)\n\n # Radio buttons to select scan configuration\n self.axis1_radio_0 = Radiobutton(left_frame, text=\"Scan\", font=(\"Helvetica\", 10), variable=self.scanConfig, value=0)\n self.axis1_radio_1 = Radiobutton(left_frame, text=\"Index\", font=(\"Helvetica\", 10), variable=self.scanConfig, value=1)\n self.axis2_radio_0 = Radiobutton(left_frame, text=\"Scan\", font=(\"Helvetica\", 10), variable=self.scanConfig, value=1)\n self.axis2_radio_1 = Radiobutton(left_frame, text=\"Index\", font=(\"Helvetica\", 10), variable=self.scanConfig, value=0)\n self.bidirectional_radio = Radiobutton(right_frame, text=\"Bidirectional\", font=(\"Helvetica\", 10),\n variable=self.scanType, value=1)\n self.unidirectional_radio = Radiobutton(right_frame, text=\"Unidirectional\", font=(\"Helvetica\", 10),\n variable=self.scanType, value=0)\n\n # Scan Entry Boxes\n self.label_1 = Label(right_frame, text=\"Scan Start\", font=(\"Helvetica\", 10))\n self.label_2 = Label(right_frame, text=\"Scan Stop\", font=(\"Helvetica\", 10))\n self.label_3 = Label(right_frame, text=\"Index Start\", font=(\"Helvetica\", 10))\n self.label_4 = Label(right_frame, text=\"Index Stop\", font=(\"Helvetica\", 10))\n self.label_5 = Label(right_frame, text=\"Index Size\", font=(\"Helvetica\", 10))\n self.label_6 = Label(right_frame, text=\"Remaining Time\", font=(\"Helvetica\", 10))\n self.e_scanStart = Entry(right_frame, width=8, font=(\"Helvetica\", 10), justify=\"center\")\n self.e_scanStart.insert(0, self.scan_setup[0])\n self.e_scanStop = Entry(right_frame, width=8, font=(\"Helvetica\", 10), justify=\"center\")\n self.e_scanStop.insert(0, self.scan_setup[1])\n self.e_indexStart = Entry(right_frame, width=8, font=(\"Helvetica\", 10), justify=\"center\")\n self.e_indexStart.insert(0, self.scan_setup[2])\n self.e_indexStop = Entry(right_frame, width=8, font=(\"Helvetica\", 10), justify=\"center\")\n self.e_indexStop.insert(0, self.scan_setup[3])\n self.e_indexSize = Entry(right_frame, width=8, font=(\"Helvetica\", 10), justify=\"center\")\n self.e_indexSize.insert(0, self.scan_setup[4])\n self.label_rem_time = Label(right_frame, text=\"00:00:00\", font=(\"Helvetica\", 12))\n\n # Place widgets in frames\n self.label_0.grid(column=0, row=0, columnspan=2, sticky=W)\n self.axis_label_1.grid(column=0, row=2, rowspan=2, sticky=E, pady=5)\n self.axis_label_2.grid(column=0, row=4, rowspan=2, sticky=E, pady=5)\n self.vel1.grid(column=1, row=2, rowspan=2, columnspan=2, pady=5)\n self.vel2.grid(column=1, row=4, rowspan=2, columnspan=2, pady=5)\n\n self.axis1_radio_0.grid(column=3, row=2, padx=5, sticky=S)\n self.axis1_radio_1.grid(column=3, row=3, padx=5, sticky=N)\n self.axis2_radio_0.grid(column=3, row=4, padx=5, sticky=S)\n self.axis2_radio_1.grid(column=3, row=5, padx=5, sticky=N)\n\n self.bidirectional_radio.grid(column=0, row=0, columnspan=2, padx=20, sticky=W)\n self.unidirectional_radio.grid(column=0, row=1, columnspan=2, padx=20, sticky=W)\n self.label_1.grid(column=2, row=0, sticky=E)\n self.label_2.grid(column=2, row=1, sticky=E)\n self.label_3.grid(column=2, row=2, sticky=E)\n self.label_4.grid(column=2, row=3, sticky=E)\n self.label_5.grid(column=2, row=4, sticky=E)\n self.label_6.grid(column=0, row=5, sticky=E)\n self.e_scanStart.grid(column=3, row=0, padx=5, pady=2)\n self.e_scanStop.grid(column=3, row=1, padx=5, pady=2)\n self.e_indexStart.grid(column=3, row=2, padx=5, pady=2)\n self.e_indexStop.grid(column=3, row=3, padx=5, pady=2)\n self.e_indexSize.grid(column=3, row=4, padx=5, pady=2)\n self.label_rem_time.grid(column=1, row=5, columnspan=2, pady=5, padx=5, sticky=W)\n self.start.pack(side=LEFT, padx=22, pady=5)\n self.stop.pack(side=LEFT, padx=22, pady=5)\n self.resume.pack(side=RIGHT, padx=22, pady=5)\n self.pause.pack(side=RIGHT, padx=22, pady=5)\n\n # Create a scan thread which will command movements to each scan point in self.scan_points\n self.process_scan = ScanThread(self.scan_setup)\n\n def start_scan(self):\n print(\"Start Scan\")\n # Get values from Entry Boxes\n self.scan_setup[0] = self.e_scanStart.get()\n self.scan_setup[1] = self.e_scanStop.get()\n self.scan_setup[2] = self.e_indexStart.get()\n self.scan_setup[3] = self.e_indexStop.get()\n self.scan_setup[4] = self.e_indexSize.get()\n\n # Deactivate Start Scan Button and Axis Buttons During Scan\n self.start.config(state=\"disabled\", fg=\"Gray\")\n TIMC.axis1.deactivate_axis_btns()\n TIMC.axis2.deactivate_axis_btns()\n TIMC.axis1.enableButton.config(state=\"disabled\")\n TIMC.axis2.enableButton.config(state=\"disabled\")\n\n # Activate Stop and Pause Buttons\n self.stop.config(state=\"normal\", fg=\"Black\", bg=\"Indian Red\")\n self.pause.config(state=\"normal\", fg=\"Black\", bg=\"Gold\")\n\n \"\"\"\n Check values here to make sure scan is okay to proceed\n \"\"\"\n # Is Start Scan Less Than End Scan\n if self.scan_setup[0] >= self.scan_setup[1] or self.scan_setup[2] >= self.scan_setup[3]:\n messagebox.showinfo(\"Bad Scan Inputs\", \"Start/Stop Values are Same or In Wrong Direction\")\n self.stop_scan()\n else:\n self.scan_state = 1\n self.process_scan = ScanThread(self.scan_setup)\n self.process_scan.start()\n\n def stop_scan(self):\n self.process_scan.stop()\n self.scan_state = 0\n\n # Activate Start Scan Button and Axis Buttons\n self.start.config(state=\"normal\", fg=\"Black\")\n TIMC.axis1.activate_axis_btns()\n TIMC.axis2.activate_axis_btns()\n TIMC.axis1.enableButton.config(state=\"normal\")\n TIMC.axis2.enableButton.config(state=\"normal\")\n\n # Deactivate Stop, Pause and Resume\n self.stop.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Coral\")\n self.pause.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Yellow\")\n self.resume.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Blue\")\n\n def pause_scan(self):\n self.process_scan.pause()\n\n # Activate Axis Buttons and Resume Button\n TIMC.axis1.activate_axis_btns()\n TIMC.axis2.activate_axis_btns()\n TIMC.axis1.enableButton.config(state=\"normal\")\n TIMC.axis2.enableButton.config(state=\"normal\")\n self.resume.config(state=\"normal\", fg=\"Black\", bg=\"Dodger Blue\")\n self.pause.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Yellow\")\n\n def resume_scan(self):\n self.process_scan.resume()\n\n # Deactivate Axis Buttons and Resume Button\n TIMC.axis1.deactivate_axis_btns()\n TIMC.axis2.deactivate_axis_btns()\n TIMC.axis1.enableButton.config(state=\"disabled\")\n TIMC.axis2.enableButton.config(state=\"disabled\")\n self.resume.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Blue\")\n self.pause.config(state=\"normal\", fg=\"Black\", bg=\"Gold\")\n\n def activate_scan_btns(self):\n self.start.config(state=\"normal\", fg=\"Black\", bg=\"Lawn Green\")\n\n def deactivate_scan_btns(self):\n self.start.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Green\")\n self.stop.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Coral\")\n self.pause.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Yellow\")\n self.resume.config(state=\"disabled\", fg=\"Gray\", bg=\"Light Blue\")\n\n\n# This class creates a Fault Frame in the GUI\nclass FaultFrame:\n def __init__(self, master):\n self.frame = Frame(master, borderwidth=2, relief=SUNKEN)\n self.canvas = Canvas(self.frame, highlightthickness=0)\n self.canvas.pack(fill=X)\n self.frame.pack(fill=X, padx=10, pady=5)\n self.frame.pack()\n self.status_text = StringVar()\n\n self.label_0 = Label(self.canvas, text=\"FAULT STATUS\", height=1, font=(\"Helvetica\", 14))\n self.button = Button(self.canvas, text=\"FAULT\\nRESET\", font=\"Helvetica, 12\", fg=\"black\", bg=\"#d3d3d3\", height=2,\n width=6, command=lambda: self.fault_ack())\n self.entry = Entry(self.canvas, width=50, textvariable=self.status_text, font=\"Helvetica, 12\", justify=\"center\")\n self.label_0.grid(row=0, column=0, columnspan=2, sticky=W)\n self.entry.grid(row=1, column=0, columnspan=2, padx=30)\n self.button.grid(row=0, column=2, rowspan=2, pady=10, padx=5)\n\n # Method to display the fault text and change the background color to red\n def fault_status(self, text):\n self.canvas.config(bg=\"red\")\n self.label_0.config(bg=\"red\")\n self.status_text.set(text)\n\n # Method to display information text and keep the background color default\n def update_status(self, text):\n self.canvas.config(bg=\"SystemButtonFace\")\n self.label_0.config(bg=\"SystemButtonFace\")\n self.entry.config(bg=\"Yellow\")\n self.status_text.set(text)\n\n # Method to reset the fault and change background color back to default\n def fault_ack(self):\n if TIMC.online:\n TIMC.acmd(\"CTRL\", \"ACKNOWLEDGEALL\")\n self.canvas.config(bg=\"SystemButtonFace\")\n self.label_0.config(bg=\"SystemButtonFace\")\n self.entry.config(bg=\"White\")\n self.status_text.set(\"\")\n\n\n# This class starts a thread that opens communication and puts queued commands to the Ensemble\nclass SerialThread(threading.Thread):\n def __init__(self, baud, qControl_read, qControl_write, qScan_read, qScan_write, qFBK_read, qFBK_write):\n threading.Thread.__init__(self)\n self.qControl_read = qControl_read\n self.qControl_write = qControl_write\n self.qScan_read = qScan_read\n self.qScan_write = qScan_write\n self.qFBK_read = qFBK_read\n self.qFBK_write = qFBK_write\n\n self._is_running = 1\n self.port_open = 0\n self.baud = baud\n\n def run(self):\n # Open the serial port\n ports = ['COM%s' % (i + 1) for i in range(100)]\n result = []\n for port in ports:\n try:\n s = serial.Serial(port)\n s.close()\n result.append(port)\n except (OSError, serial.SerialException):\n pass\n if len(result) == 1:\n self.s = serial.Serial(result[0], self.baud, timeout=0.05)\n\n # Send a command to check if communication has been established\n self.s.write(\"ACKNOWLEDGEALL\".encode('ascii') + b' \\n')\n data = self.s.readline().decode('ascii')\n if '%' in data:\n self.port_open = 1\n self.s.write(\"WAIT MODE NOWAIT\".encode('ascii') + b' \\n')\n # Throw away second response\n data = self.s.readline().decode('ascii')\n elif len(result) > 1:\n self.port_open = 0\n self._is_running = 0\n else:\n self._is_running = 0\n\n # Serial Thread Main Loop - Check Queue for Commands to Send to Aerotech Drive\n while self._is_running:\n time.sleep(.0001)\n # Check if control queue has commands in the queue to send to the Aerotech drive\n if self.qControl_write.qsize():\n command = self.qControl_write.get().encode('ascii') + b' \\n'\n self.s.write(command)\n data = self.s.readline().decode('ascii')\n self.qControl_read.put(data)\n # Check if scan queue has commands in the queue to send to the Aerotech drive\n elif self.qScan_write.qsize():\n command = self.qScan_write.get().encode('ascii') + b' \\n'\n self.s.write(command)\n data = self.s.readline().decode('ascii')\n self.qScan_read.put(data)\n # Check if feedback queue has commands in the queue. This is the least priority\n elif self.qFBK_write.qsize():\n command = self.qFBK_write.get().encode('ascii') + b' \\n'\n self.s.write(command)\n data = self.s.readline().decode('ascii')\n self.qFBK_read.put(data)\n\n # Stop the thread from running\n def stop(self):\n self._is_running = 0\n try:\n self.s.close()\n print(\"Serial Port Closed\")\n except:\n print(\"No Serial Port to Close\")\n\n\n# This class starts a thread for automated scanning\nclass ScanThread(threading.Thread):\n def __init__(self, scan_setup):\n threading.Thread.__init__(self)\n self._is_running = 1\n self._is_paused = 0\n self.scan_setup = scan_setup\n self.setDaemon(True)\n\n \"\"\"\n Calculate scan points\n \"\"\"\n\n def run(self):\n print(\"Running Scan\")\n while self._is_running:\n time.sleep(.25)\n if self._is_paused != 1:\n print(\"Scanning...\")\n\n \"\"\"\n Scan profile\n \"\"\"\n\n def stop(self):\n self._is_running = 0\n self._is_paused = 1\n print(\"Stopping Scan\")\n\n def pause(self):\n self._is_paused = 1\n print(\"Pause Scan\")\n\n def resume(self):\n self._is_paused = 0\n print(\"Resume Scan\")\n\n\n# Thread to update the feedback on the GUI. The thread always loads commands into the feedback write queue\n# and the feedback read queue is synced to the write queue to update the appropriate feedback variable on the GUI\nclass UpdateFeedback(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.setDaemon(True)\n self._is_running = 1\n self.write_index = 0 # Variable to sync read and write queue\n self.read_index = 0 # Variable to sync read and write queue\n\n # Array of text which are ASCII commands compatible with the Aerotech drive\n self.write_cmd = [\"PFBKPROG(\" + TIMC.axis1.axisName + \")\", \"IFBK(\" + TIMC.axis1.axisName + \")\",\n \"VFBK(\" + TIMC.axis1.axisName + \")\", \"PERR(\" + TIMC.axis1.axisName + \")\",\n \"PFBKPROG(\" + TIMC.axis2.axisName + \")\", \"IFBK(\" + TIMC.axis2.axisName + \")\",\n \"VFBK(\" + TIMC.axis2.axisName + \")\", \"PERR(\" + TIMC.axis2.axisName + \")\"]\n\n def run(self):\n while self._is_running:\n time.sleep(.02)\n # If there is something in the read queue, update the correct variable\n if TIMC.qFBK_read.qsize():\n data = TIMC.qFBK_read.get()\n data = data.replace(\"%\", \"\")\n if self.read_index == 0:\n pos = round(float(data), 2)\n TIMC.axis1.position_box.config(text=(\"%.2f\" % pos))\n self.read_index += 1\n elif self.read_index == 1:\n cur = float(data)\n TIMC.axis1.updateCurrent(cur)\n self.read_index += 1\n elif self.read_index == 2:\n vel = round(float(data), 1)\n TIMC.axis1.velocity_box.config(text=(\"%.2f\" % vel))\n self.read_index += 1\n elif self.read_index == 3:\n pos_err = float(data)\n TIMC.axis1.updatePosErr(pos_err)\n self.read_index += 1\n elif self.read_index == 4:\n pos = round(float(data), 2)\n TIMC.axis2.position_box.config(text=(\"%.2f\" % pos))\n self.read_index += 1\n elif self.read_index == 5:\n cur = float(data)\n TIMC.axis2.updateCurrent(cur)\n self.read_index += 1\n elif self.read_index == 6:\n vel = round(float(data), 1)\n TIMC.axis2.velocity_box.config(text=(\"%.2f\" % vel))\n self.read_index += 1\n elif self.read_index == 7:\n pos_err = float(data)\n TIMC.axis2.updatePosErr(pos_err)\n self.read_index = 0\n\n # Auto-populate the feedback write queue with commands so the queue is never empty\n if TIMC.qFBK_write.qsize() == 0:\n TIMC.qFBK_write.put(self.write_cmd[self.write_index])\n if self.write_index < len(self.write_cmd) - 1:\n self.write_index += 1\n else:\n self.write_index = 0\n\n def stop(self):\n self._is_running = 0\n\n\n# This class is starts Main GUI Window and starts communication with controller\nclass Main:\n def __init__(self, master):\n self.master = master\n master.geometry(\"650x750\")\n master.title(\"R0: TIMC - Piping\")\n master.resizable(width=False, height=False)\n\n # Open Setup File\n self.params = str()\n self.filename = \"TIMC-P-SETUP.txt\"\n try:\n f = open(self.filename, \"r+\")\n self.params = f.readlines()\n f.close()\n print(\"Setup File Parameters \" + str(self.params))\n except FileNotFoundError:\n print(\"Cannot find file: \" + self.filename)\n print(\"Creating Default \" + self.filename)\n f = open(self.filename, \"w+\")\n self.params = [\"NOVA\\n\", \"0\\n\", \"20\\n\"] # Default TIMC Parameters\n f.writelines(self.params)\n f.close()\n print(self.params)\n\n self.baud = 115200\n self.online = 0 # If communication is successful with the controller this value will be set to 1\n self.qControl_read = queue.Queue() # Results of the qControl_write commands recorded here\n self.qControl_write = queue.Queue() # Jog, GoTo, Index, and Set button press commands are sent to this queue\n self.qScan_read = queue.Queue() # Results of the qScan_write commands recorded here\n self.qScan_write = queue.Queue() # Commands from the scan thread are written to this queue\n self.qFBK_read = queue.Queue() # Results of the qFBK_write commands recorded here\n self.qFBK_write = queue.Queue() # Commands to update the feedback are sent to this queue\n self.write_queue = queue.Queue()\n self.read_queue = queue.Queue()\n\n # Start serial thread\n self.process_serial = SerialThread(self.baud,\n self.qControl_read, self.qControl_write,\n self.qScan_read, self.qScan_write,\n self.qFBK_read, self.qFBK_write)\n self.process_serial.start()\n\n # Wait for serial thread to establish communication\n time.sleep(1.0)\n\n # Setup GUI Frames\n self.tool = ToolFrame(self.master, self.params)\n self.axis1 = AxisFrame(self.master, SetupAxis1Frame())\n self.axis2 = AxisFrame(self.master, SetupAxis2Frame())\n self.scan = ScanFrame(self.master, self.axis1, self.axis2)\n self.fault = FaultFrame(self.master)\n\n # Determine if GUI should be started offline\n self.is_offline()\n\n # Main method for sending commands to TIMC, command syntax specified by Aerotech: ASCII Commands\n def acmd(self, queue_name, text):\n if queue_name == \"CTRL\":\n self.write_queue = self.qControl_write\n self.read_queue = self.qControl_read\n elif queue_name == \"SCAN\":\n self.write_queue = self.qScan_write\n self.read_queue = self.qScan_read\n elif queue_name == \"STATUS\": # Not ready yet\n print(\"Status\")\n # self.write_queue = self.qStatus_write\n # self.read_queue = self.qStatus_read\n elif queue_name == \"FBK\": # Doesn't ever get called\n print(\"FBK\")\n # self.write_queue = self.qFBK_write\n # self.read_queue = self.qFBK_read\n\n # Put command on the queue, process_serial sends the command and returns the result in the read queue\n print(text) # For now, print all Aerotech commands\n self.write_queue.put(text)\n data = self.read_queue.get()\n\n # Aerotech drive sends back special characters in response to the command given\n if \"!\" in data:\n print(\"(!) Bad Execution, Queue: \" + queue_name + \" CMD: \" + text)\n return 0\n elif \"#\" in data:\n print(\"(#) ACK but cannot execute, Queue:\", queue_name, \"CMD:\", text)\n return 0\n elif \"$\" in data:\n print(\"($) CMD timeout, Queue:\", queue_name, \"CMD:\", text)\n return 0\n elif data == \"\":\n print(\"No data returned, check serial connection, Queue:\", queue_name, \"CMD:\", text)\n return 0\n elif \"%\" in data:\n data = data.replace(\"%\", \"\")\n return data\n else:\n print(\"Error\")\n\n # if communication is not successful then use Offline Mode\n def is_offline(self):\n if self.process_serial.port_open == 0: # OFFLINE\n self.fault.update_status(\"OFFLINE MODE\")\n self.process_serial.stop()\n self.online = 0\n elif self.process_serial.port_open == 1: # ONLINE\n self.online = 1\n\n\ndef on_closing():\n print(\"Closing...\")\n exception_flag = 0\n\n if TIMC.online:\n print(\"Disconnecting...\")\n try:\n TIMC.axis1.disable_axis() # Disable Axis 1\n except :\n exception_flag = 1\n try:\n TIMC.axis2.disable_axis() # Disable Axis 2\n except:\n exception_flag = 1\n try:\n process_feedback.stop()\n except:\n exception_flag = 1\n try:\n time.sleep(0.5)\n TIMC.process_serial.stop() # Close Serial Port Communication\n except:\n exception_flag = 1\n\n if exception_flag == 1:\n print(\"ERROR CLOSING A THREAD\")\n\n # Overwrite setup file with parameters to include any changes during program execution\n try:\n new_f = open(TIMC.filename, \"r+\")\n new_f.close()\n print(\"Setup File Parameters \" + str(TIMC.params))\n new_f = open(TIMC.filename, \"w+\")\n new_f.writelines(TIMC.params)\n new_f.close()\n except FileExistsError:\n print(\"No File to Overwrite\")\n\n root.destroy()\n\n\ndef checkIsDigit(text):\n if \"-\" in text:\n text = text.replace(\"-\", \"0\")\n if \".\" in text:\n text = text.replace(\".\", \"0\")\n if text.isdigit():\n return True\n else:\n messagebox.showinfo(\"Bad Input\", \"Value is not a number\")\n return False\n\n\nroot = Tk()\nTIMC = Main(root)\n\nif TIMC.online:\n # Start thread to updated position, current and error feedback for each axis\n process_feedback = UpdateFeedback()\n process_feedback.start()\n\n # Start thread to monitor for ESTOP and faults etc.\n\n\nroot.protocol(\"WM_DELETE_WINDOW\", on_closing)\nroot.mainloop()\n","sub_path":"Development/Scantest3.py","file_name":"Scantest3.py","file_ext":"py","file_size_in_byte":41602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"250836410","text":"from agent.TradingAgent import TradingAgent\nimport pandas as pd\nimport numpy as np\n\n\nclass MeanReversionAgent(TradingAgent):\n\n \"\"\"\n Simple Trading Agent that compares the \"n\" past mid-price observations with\n the \"m\" past observations and places a buy limit order if the\n \"n\" mid-price average <= \"m\" mid-price average minus margin, or a sell\n limit order if the \"n\" mid-price average >= \"m\" mid-price average plus\n margin\n \n THIS HAS TO BE A MARKET ORDER, NOT A LIMIT ORDER\n \"\"\"\n\n def __init__(self, id, name, type, symbol='IBM', starting_cash=100000,\n min_size=50, max_size=100, lambda_a=0.05,\n log_orders=False, random_state=None, short_duration=20,\n long_duration=40, margin=0):\n\n super().__init__(id, name, type, starting_cash=starting_cash,\n log_orders=log_orders, random_state=random_state)\n\n # received information\n self.symbol = symbol\n self.min_size = min_size # Minimum order size\n self.max_size = max_size # Maximum order size\n self.short_duration = short_duration\n self.long_duration = long_duration\n self.margin = margin\n self.lambda_a = lambda_a\n self.log_orders = log_orders\n\n # initialise setup\n self.order_size = self.random_state.randint(self.min_size, self.max_size)\n self.mid_list = []\n self.ma_short_list = []\n self.ma_long_list = []\n self.state = \"AWAITING_WAKEUP\"\n\n def kernelStarting(self, startTime):\n super().kernelStarting(startTime)\n\n def kernelStopping(self):\n # Always call parent method to be safe.\n super().kernelStopping()\n\n def wakeup(self, currentTime):\n\n \"\"\" Agent wakeup is determined by self.wake_up_freq \"\"\"\n\n can_trade = super().wakeup(currentTime)\n\n if not can_trade:\n return\n\n self.getCurrentSpread(self.symbol)\n self.state = 'AWAITING_SPREAD'\n\n\n def receiveMessage(self, currentTime, msg):\n\n \"\"\"\n Mean reversion agent actions are determined after obtaining the best\n bid and ask in the LOB\n \"\"\"\n\n super().receiveMessage(currentTime, msg)\n if (self.state == 'AWAITING_SPREAD' and\n msg.body['msg'] == 'QUERY_SPREAD'):\n\n # query bid/ask price\n bid, bidvolume, ask, askvolume = self.getKnownBidAsk(self.symbol)\n self.bidvol = bidvolume\n self.askvol = askvolume\n\n if bid and ask:\n \n mid = (bid + ask) / 2\n self.mid_list.append(mid)\n\n # determine mid-price\n\n if len(self.mid_list) > self.long_duration:\n\n self.mid_list.pop(0)\n\n\n # Determine Moving Average \"n\" after n datapoints\n self.ma_short = MeanReversionAgent.ma(self.mid_list, n=self.short_duration)[-1].round(0)\n self.ma_long = MeanReversionAgent.ma(self.mid_list, n=self.long_duration)[-1].round(0)\n\n # Only start comparing once both MAs become available\n if self.ma_short and self.ma_long:\n \n # 20210513 Chris Cho: Query new order size\n buyorder = np.round(self.askvol**0.35)\n sellorder = np.round(self.bidvol**0.35)\n # 20200928 Chris Cho: Added the margin function\n if (self.ma_short > self.ma_long - self.margin):\n\n self.placeMarketOrder(self.symbol, quantity=sellorder,\n is_buy_order=False)\n\n\n elif (self.ma_short < self.ma_long + self.margin):\n\n self.placeMarketOrder(self.symbol, quantity=buyorder,\n is_buy_order=True)\n \n\n # set wakeup time\n self.setWakeup(currentTime + self.getWakeFrequency())\n self.state = 'AWAITING_WAKEUP'\n\n def getWakeFrequency(self):\n\n delta_time = self.random_state.exponential(scale=1.0 / self.lambda_a)\n return pd.Timedelta('{}ns'.format(int(round(delta_time))))\n\n # 20201026 Chris Cho: function to query order size\n def getOrderSize(self):\n\n # round up the order size to prevent orders of size 0\n order_size = np.ceil(70/np.random.power(3.5))\n\n # select random number\n i = self.random_state.rand()\n\n # with a chance, submit order as it is\n if i < 0.8:\n self.order_size = order_size\n\n # otherwise, round to nearest 10 orders\n else:\n\n # quick hack to prevent orders rounding to 0\n if order_size < 5:\n order_size += 5\n\n # round to nearest 10\n self.order_size = np.round(order_size, -1)\n\n return None\n\n @staticmethod\n def ma(a, n=20):\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n\n","sub_path":"agent/MeanReversionAgent.py","file_name":"MeanReversionAgent.py","file_ext":"py","file_size_in_byte":5092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"151684745","text":"from enum import Enum\n\nfrom rv.controller import Controller\nfrom rv.modules import Behavior as B, Module\n\n\nclass Reverb(Module):\n\n name = mtype = \"Reverb\"\n mgroup = \"Effect\"\n flags = 0x000051\n\n behaviors = {B.receives_audio, B.sends_audio}\n\n class Mode(Enum):\n hq = 0\n hq_mono = 1\n lq = 2\n lq_mono = 3\n\n dry = Controller((0, 256), 256)\n wet = Controller((0, 256), 64)\n feedback = Controller((0, 256), 256)\n damp = Controller((0, 256), 128)\n stereo_width = Controller((0, 256), 256)\n freeze = Controller(bool, False)\n mode = Controller(Mode, Mode.hq)\n all_pass_filter = Controller(bool, True)\n room_size = Controller((0, 128), 16)\n random_seed = Controller((0, 32768), 0)\n","sub_path":"rv/modules/reverb.py","file_name":"reverb.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"517500026","text":"#! /usr/bin/python\n# -*- coding: latin1 -*-\n\n\"\"\"\nConverts a GRADS readable dataset to the data model.\n\nModule for reading a GRADS compatible raster file and exporting it to the data model\n(or netCDF-file by using GRADS functions) that is consisting of the following files:\nfiles numpy data array,coordinate metadata xml file and NCML NetCDF XML file.\nData is considered as grid, therefore the shape of the output numpy array is:\n(variable, time, z, lat, lon). This program was particularly written to convert\nGRAPES GRIB raster files. Find more information in the documentation.\n\"\"\"\n\n__author__= \"Nicolai Holzer\"\n__author_email__ = \"first-name dot last-name @ mailbox.tu-dresden.de\"\n__date__ =\"2011-03-28\"\n__version__ = \"v0.1.3\" #MajorVersion(backward_incompatible).MinorVersion(backward_compatible).Patch(Bug_fixes)\n\n\n#Changelog\n#-------------------------------------------------------------------------------\n#2011-01-14: v0.1.3 logging implemented, functionalities changed\n#2010-12-14: v0.1.2 parser added, functionalities changed\n#2010-11-24: v0.1.1 comments and docstrings added\n#2010-11-15: v0.1.0 first version\n\n\n#Imported libraries\n#-------------------------------------------------------------------------------\n#standard libraries\nimport sys\nimport time\nfrom optparse import OptionParser #Parser\nimport logging\n\n#related libraries\nimport numpy\n\n#Importing GRADS\n#Extends the GrADS client class GaCore, providing methods for exchanging\n#n-dimensional NumPy array data between Python and GrADS.\nimport grads.ganum as ganum\n\n#This module extends the GrADS client class by providing methods for\n#exchanging n-dimensional NumPy array data between Python and GrADS\n#import grads.numtypes as numtypes\n\n#A simple container class to collect output for query() operations.\n#import grads.gahandle as gahandle\n\n\n#local applications / library specific import\nfrom interface_Settings import *\nfrom interface_ProcessingTools import *\nfrom etc.progressBar import * #needs empty '__init__.py' file in directory\n\n#===============================================================================\n\n#Module constants (Parser)\n#-------------------------------------------------------------------------------\nUSAGE = \"%prog [options] operation data\\\n \\n[options]:\\\n \\n type '--help' for more information\\\n \\n\\\n \\noperation:\\\n \\n - grads2Model Convert GRADS raster image file (here GRAPES GRIB data) to data model\\\n \\n - printGrads Read GRADS file and print it on screen\\\n \\n - testGrads Test GRADS functionalities\\\n \\n\\\n \\ndata:\\\n \\n Raster data file that is readable by GRADS library\"\n\nDESCRIPTION= \"Conversion tool of CEOP-AEGIS data model for GRADS readable raster data\"\nEPILOG = \"Author: \"+__author__+\" (E-mail: \"+__author_email__+\")\"\n\nVERSION = \"%prog version \"+__version__+\" from \"+__date__\n\n\n#Module default values / constants, may be overwritten by OptionParser\n#-------------------------------------------------------------------------------\nNUMPYDATA_DTYPE = 'float32' #Default data type of output numpy array\nNODATA = 0 #Default nodata value of output numpy array\n\n#Multiplicator for each time value, should be same unit as of reference time\n#Value can't yet be extracted of Grib Metadata automatically. See Grib Metadata file for finding this value\nDATATIMESTEP = 0.5 \n\nMODULE_LOGGER_ROOT = 'grads' #Logger root name\n\n#_______________________________________________________________________________\n\nclass ControlModelGrads:\n \"\"\"Control class for model 'ModelGradsRead'. This class is providing all available functions for reading data\"\"\"\n\n def __init__(self, infile_, option_):\n \"\"\"\n Constructor for new control instance of specific file.\n\n INPUT_PARAMETERS:\n infile - name of data file with filename extension (string)\n option - Parser.options arguments\n\n COMMENTS:\n Suffixes will be automatically assigned and must respect the declarations\n in the module 'interface_Settings'.\n \"\"\"\n \n infile = str(infile_).rsplit('__',1)\n self.inputFile = infile[0]\n self.pModelGradsRead = ModelGradsRead(self.inputFile)\n\n self.pParserOptions = option_\n self.pLogger = logging.getLogger(MODULE_LOGGER_ROOT+\".\"+__name__+\".\"+self.__class__.__name__)\n self.pLogger.info(\"Open project '\" + self.inputFile + \"':\")\n \n\n #def __del__(self):\n #\"\"\"Desctructor\"\"\"\n \n\n def writeGradsNumpyData(self):\n \"\"\"Read GRADS file and save data as numpy data array according to the specifications\n of the data interface\"\"\"\n\n #Make a copy of the GRADS-file as numpy file\n pGradsData = self.pModelGradsRead.readGradsFile(self.pParserOptions.dataType)\n \n #Optional to select specific data from time stamp\n if not self.pParserOptions.specificData is None: #specificData is choosen\n pGradsData = self.pModelGradsRead.choseSpecificData(pGradsData, self.pParserOptions.specificData)\n\n #Export data as new numpy file\n self.pModelGradsRead.writeNumpyData(pGradsData)\n return\n\n\n def writeGradsMetadata(self):\n \"\"\"Get metadata from a GRADS readable file and write metadata to coordinate metadata file and\n NCML XML file according to the specifications of the data interface\"\"\"\n\n self.pModelGradsRead.writeMetadataNcml(self.pParserOptions.nodataValue)\n self.pModelGradsRead.writeMetadataNumpymeta(self.pParserOptions.specificData)\n return\n\n\n #optional\n def completeDataModelManually(self):\n \"\"\"Complete missing data and metadata manually\"\"\"\n\n self.pModelGradsRead.completeDataVariables()\n self.pModelGradsRead.completeMetadataNcml()\n self.pModelGradsRead.completeMetadataNumpymeta() #not implemented\n return\n\n\n #optional\n def printGradsMetadata(self):\n \"\"\"Read GRADS readable file and print metadata on screen\"\"\"\n\n self.pModelGradsRead.printGradsMetadata()\n return\n\n\n #optional\n def testGradsFunctionality(self):\n \"\"\"Test GRADS functionality by testing its functions and creating a NetCDF\n file automatically\"\"\"\n\n self.pModelGradsRead.grib2NetCdf_gradsTest()\n return\n\n\n#_______________________________________________________________________________\n\nclass ModelGradsRead:\n \"\"\"This class contains functions to handle read operations on GRADS data and is controlled by\n the class 'ControlModelGrads'.\n This class was in particularly written to handle GRAPES GRIB data.\"\"\"\n\n\n def __init__(self, infile_):\n \"\"\"\n Constructor.\n\n INPUT_PARAMETERS:\n infile - name of GRADS file name with filename extension (string)\n \"\"\"\n self.pDefaultSettings = DefaultSettings()\n \n self.gradsFileName = infile_ #With file name extension\n\n #infile = self.gradsFileName.rsplit('.',1) #without file name extension\n self.numpyDataName = infile_+FILENAME_SUFFIX_NUMPYDATA\n self.ncmlName = infile_+FILENAME_SUFFIX_NCML\n self.numpymetaName = infile_+FILENAME_SUFFIX_NUMPYXML\n\n #Use Processing Tools\n self.pProcessingTool = ProcessingTool()\n self.pProcessNcml = ProcessNcml(self.ncmlName)\n self.pProcessNumpymeta = ProcessNumpymeta(self.numpymetaName)\n\n self.pLogger = logging.getLogger(MODULE_LOGGER_ROOT+\".\"+__name__+\".\"+self.__class__.__name__)\n\n #Read GRADS file\n #Start the GRADS application, creating new instance\n #Depending on GRADS version, 'Bin' is telling which GRADS executable to start\n #For 2.0a7 this is 'grads' and 'gradsdap'\n try:\n self.pGa = ganum.GaNum(Bin='grads', Echo=False, Window=False)\n self.pGa.open(self.gradsFileName)\n except:\n raise Exception (\"Opening of file '\" + str(self.gradsFileName) + \"' failed. Check if it exists and if filename suffix is set.\")\n \n\n def __del__(self):\n \"\"\"Destructor\"\"\"\n #Close GRADS instance\n del self.pGa\n\n\n def readGradsFile(self, dataType_):\n \"\"\"Reads a GRADS file and returns GRADS data as numpy array.\n Argument 'dataType' defines the data type of the resulting numpy array.\"\"\"\n\n pGa = self.pGa\n \n #Get file information via GRADS\n #-------------------------------------------------------------------------------\n # Query dataset information, command available for \"file\" and \"dims\"\n pGa_queryFile = pGa.query(\"file\")\n pGa_queryDims = pGa.query(\"dims\")\n\n #Get dimension values and set dimensions\n dimX = pGa_queryFile.nx #number of longitude points\n dimY = pGa_queryFile.ny #number of latitude points\n #dimZ = pGa_queryFile.nz #z-dimension not used in GRAPES data, level = 1\n dimT = pGa_queryFile.nt #number of time values in file\n dimVar = pGa_queryFile.nvars #numbers of variables in file\n\n varsNames = pGa_queryFile.vars #names of variables in file\n\n pGa(\"set z 1\") #GRADS command to set dimensions\n pGa(\"set t 1 last\") #Get all time values; define timestamp later in python\n\n #Define progress bar settings\n widgetsBar = ['Import status: ', Percentage(), ' ', Bar(marker=RotatingMarker()),\n ' ', ETA(), ' ', FileTransferSpeed()]\n progressBar = ProgressBar(widgets=widgetsBar, maxval=dimVar).start()\n\n #Print Dataset information on the screen\n #print \"\\nCoordinate information: \\n\", pGa.coords()\n #print \"\\nFile information: \\n\", pGa_queryFile\n #print \"\\nDimension information: \\n\", pGa_queryDims\n\n\n #Writing numpy file\n #-------------------------------------------------------------------------------\n pDataType = self.pProcessingTool.dataType_2Numpy(dataType_)\n pGradsData = numpy.zeros((dimVar,dimT,dimY,dimX), dtype = pDataType)# All data\n \n #Reading all variables in GRADS file\n for i_var in range(0,dimVar,1): # otherwise returns list of ints from >= start and < end: 0 .. 10\n\n self.pLogger.info(\"Reading GRADS variable ID '\" + str(i_var) + \"' with name '\" + str(varsNames[i_var]) + \"'...\")\n pDataArray = pGa.expr(varsNames[i_var]) #Export GRADS field of specific variable as numpy-like array\n \n #Create a numpy file per file, so for all variables per file\n pGradsData[i_var,:,:,:] = numpy.asarray(pDataArray.astype(pDataType))\n\n progressBar.update(i_var+1)# Progress bar\n\n\n #Change dimensions of numpy array so that it gets conform with the data model specifications\n #-------------------------------------------------------------------------------\n dimVar = pGradsData.shape[0] #Number of variables in array\n dimT = pGradsData.shape[1] #Time Dimension\n dimZ = int(1) #Level Dimensions\n dimY = pGradsData.shape[2] #Last but one axis top to bottom: lat -> row\n dimX = pGradsData.shape[3] #Last axis left to right: lon -> col\n\n pBuffer = numpy.zeros((dimY,dimX), dtype = pGradsData.dtype) #Buffer for calculation\n pGradsDataNorm = numpy.zeros((dimVar,dimT,dimZ,dimY, dimX), dtype = pGradsData.dtype) #Normed numpy data array\n\n #Change dimension order that is \"var,time,y,x' and is to be 'var,time,level,y,x'.\n #This is neccessary so that the time variables dimension can be set to unlimited (only possible for first variable).\n #Define progress bar settings\n widgetsBar = ['Making data conform for data model: ', Percentage(), ' ', Bar(marker=RotatingMarker()),\n ' ', ETA(), ' ', FileTransferSpeed()]\n progressBar = ProgressBar(widgets=widgetsBar, maxval=dimVar).start()\n\n for i_var in range(0,dimVar,1):\n for i_time in range(0, dimT, 1):\n pBuffer[:,:] = pGradsData[i_var,i_time,:,:] #Extract data to buffer\n pGradsDataNorm[i_var,i_time,0,:,:] = pBuffer[:,:] #Write Data in output numpy array\n\n progressBar.update(i_var+1)# Progress bar\n\n return pGradsDataNorm\n\n\n def choseSpecificData(self, pGradsData_, dataTime_):\n \"\"\"Optional: Extract those datasets that fall within the wanted timestamp\n\n Define time stamp in list dataTime. dataTime[0] is start value, dataTime[1]\n end value, as time units since reference time. \n Example: nt = 97 values; first (1st) value first day 0h00, half hour steps,\n 96th value: second day 23h30, 97th value third day 0h00\n Time intervall has for example to consist of 24 hours, so 47 values!\n position numbers (start value = 1, not 0!!!), not index numbers of arrays; needed for dimension setting\n DATASTART = 25 #12h00 first day\n DATASTOP = 72 #11h30 second day\n \"\"\"\n\n self.pLogger.info(\"Extract specific data as implemented in function 'choseSpecificData'...\")\n\n pGradsData = pGradsData_\n dataStart = int(dataTime_[0])\n dataStop = int(dataTime_[1])\n\n #Define progress bar settings\n widgetsBar = ['Extracting specific data: ', Percentage(), ' ', Bar(marker=RotatingMarker()),\n ' ', ETA(), ' ', FileTransferSpeed()]\n progressBar = ProgressBar(widgets=widgetsBar, maxval=(dataStop-(dataStart-1))).start()\n\n\n #Get all datasets from wanted time intervall. Index of time value is defined in\n #global variables. Export data as numpy file\n\n #Number of time values for wanted time stamp\n grapesTimeStamp = dataStop - dataStart + 1#dimension value, not index value for array!!! E.g. (48-1)+1=48\n\n #Only GRADS data of wanted time stamp\n pGradsDataTS = numpy.zeros((pGradsData.shape[0],grapesTimeStamp,pGradsData.shape[2],\\\n pGradsData.shape[3], pGradsData.shape[4]), dtype = pGradsData.dtype)\n\n #!Range: Last value not taken for iteration! So don't use DATASTOP-1, but DATASTOP!\n i = 0\n for j in range(dataStart-1, dataStop, 1): #array index numbers, not position numbers\n pGradsDataTS[:,i,:,:,:] = pGradsData[:,j,:,:,:]\n i = i+1\n\n progressBar.update(j-(dataStart-1)+1)# Progress bar\n \n return pGradsDataTS\n\n\n def writeNumpyData(self, pNumpyData_):\n \"\"\"Export numpy data array to file\"\"\"\n\n self.pLogger.info(\"Numpy output will be file saved as '\"+ str(self.numpyDataName) + \"'...\")\n numpy.save(str(self.numpyDataName), pNumpyData_) #Better as 'tofile'. Also possible: 'dump'\n self.pLogger.info(\"Done. Shape of resulting numpy file: '\" + str(pNumpyData_.shape) + \"'; Data type: '\" + str(pNumpyData_.dtype) + \"'.\")\n\n return\n\n\n def writeMetadataNcml(self, nodata_):\n \"\"\"Create new NCML XML file according to the specifications of the data model and\n complete this file by the metadata that can be extracted out of the GRADS file\"\"\"\n\n #Get metadata information from file \n #-------------------------------------------------------------------------------\n pGa = self.pGa\n pGa_queryFile = pGa.query(\"file\") # Query dataset information, command available for \"file\" and \"dims\"\n\n pNumpyData = numpy.load(self.numpyDataName)\n\n dimVar = pNumpyData.shape[0] #Number of variables in array\n varsNames = pGa_queryFile.vars #names of variables on file\n varsTitles = pGa_queryFile.var_titles #var_titles are equivalent to long_name\n\n #Define progress bar settings\n widgetsBar = ['Creating Ncml metadata file: ', Percentage(), ' ', Bar(marker=RotatingMarker()),\n ' ', ETA(), ' ', FileTransferSpeed()]\n progressBar = ProgressBar(widgets=widgetsBar, maxval=dimVar).start()\n \n #Write metadata NCML file\n #-------------------------------------------------------------------------------\n self.pProcessNcml.createMacroNcmlFile()\n self.pProcessNcml.fillNcmlMacroWithNumpy(pNumpyData) \n\n #Correct and complete entries\n self.pProcessNcml.changeGlobalAttribute('title', 'value', pGa_queryFile.title)\n\n for i_var in range(0,dimVar,1): # otherwise returns list of ints from >= start and < end: 0 .. 10\n varName = 'variable #'+str(i_var)\n varsDescriptions = varsTitles[i_var].rsplit('0 ') #To get rid of weird values at beginning\n \n self.pProcessNcml.changeVariable(varName, 'name', varsNames[i_var])\n self.pProcessNcml.changeLocalAttribute(varsNames[i_var], 'long_name', 'value', varsDescriptions[1])\n self.pProcessNcml.changeLocalAttribute(varsNames[i_var], '_FillValue', 'value', str(nodata_))\n\n progressBar.update(i_var+1)# Progress bar\n\n return\n\n\n def writeMetadataNumpymeta(self, dataTime_):\n \"\"\"Create new metadata coordinate XML file according to the specifications of the data model and\n complete this file by the metadata that can be extracted out of the GRADS file\"\"\"\n\n pGa = self.pGa\n\n #Get metadata information from file by the use of GRADS\n #-------------------------------------------------------------------------------\n #Query dataset information, command available for \"file\" and \"dims\"\n pGa_queryDims = pGa.query(\"dims\")\n pGa_queryFile = pGa.query(\"file\")\n\n #Get latitude / longitude values\n latMin = pGa_queryDims.lat[0]#ymin\n latMax = pGa_queryDims.lat[1]#ymax\n lonMin = pGa_queryDims.lon[0]#xmin\n lonMax = pGa_queryDims.lon[1]#xmax\n\n #Get time values\n #Number of time values for wanted time stamp, otherwise DimT\n if not dataTime_ is None: #specificData is choosen\n dataStart = int(dataTime_[0])\n dataStop = int(dataTime_[1])\n else:\n dataStart = 1\n dataStop = pGa_queryFile.nt #dimT, number of time values in file\n grapesTimeStamp = dataStop - dataStart + 1 #Dimension value, not index value for array!!! E.g. (48-1)+1=48\n\n referenceTimeGrib = pGa_queryDims.time[0] #Reference time of data in grib metadata format\n referenceTimeNetCdf = self.__timeUnitGrib2NetCdf(referenceTimeGrib, dataStart) #Reference time of data translated to NetCDF metadata format\n pTimes = self.pProcessingTool.createTimeValuesNumpy(referenceTimeNetCdf, grapesTimeStamp, DATATIMESTEP) #Calculate time values\n\n\n #Write coordinate metadata file\n #-------------------------------------------------------------------------------\n self.pProcessNumpymeta.createMacroNumpymetaFile()\n\n self.pProcessNumpymeta.writeNumpyMetadataValues(pTimes, 'time') #Either time values or min/max\n\n self.pProcessNumpymeta.setAttribute('numpymeta', 'latitude', 'min', str(latMin))\n self.pProcessNumpymeta.setAttribute('numpymeta', 'latitude', 'max', str(latMax))\n self.pProcessNumpymeta.setAttribute('numpymeta', 'longitude', 'min', str(lonMin))\n self.pProcessNumpymeta.setAttribute('numpymeta', 'longitude', 'max', str(lonMax))\n\n self.pProcessNumpymeta.setAttribute('numpymeta', 'height', 'values', str(1))\n self.pProcessNumpymeta.setAttribute('numpymeta', 'height', 'separator', str(','))\n\n return\n\n\n #Other functions\n #-------------------------------------------------------------------------------\n\n def printGradsMetadata(self):\n \"\"\"Read GRADS file and print metadata on screen\"\"\"\n\n infile = self.gradsFileName\n\n ga = ganum.GaNum(Bin='grads', Echo=False, Window=True)#Starts the GRADS application\n #Depending on GRADS version, bin is telling which GRADS executable to start\n #For 2.0a7 this is 'grads' and 'gradsdap'\n try:\n fh = ga.open(infile)\n except:\n raise Exception(\"Error: GRADS file does not exist: '\" + str(infile) + \"'.\")\n exit()\n\n #Query metadata information\n qh_file = ga.query(\"file\")\n qh_dims = ga.query(\"dims\")\n\n #Print metadata information on screen\n self.pLogger.info(\"---------------------------------------------------------------------\")\n self.pLogger.info(\"File information:\")\n self.pLogger.info(qh_file)\n self.pLogger.info(\"Dimension information:\")\n self.pLogger.info(qh_dims)\n self.pLogger.info(\"---------------------------------------------------------------------\")\n\n return\n\n\n def __timeUnitGrib2NetCdf(self, timeGribStart_, dataStart_):\n \"\"\"Transforms reference time from Grib metadata format to time format of\n NetCDF time units attribute. timeGribStart is reference time, dataStart\n is offset time.\"\"\"\n\n #Get time values from infofile and adapt data\n #-------------------------------------------------------------------------------\n fileTimeStart = timeGribStart_ #Start value of data\n\n if len(fileTimeStart) == 12: #Data format for example 00Z11JAN2008\n timestart_time = fileTimeStart[0:2]+':00:0.0'\n timestart_day = fileTimeStart[3:5]\n timestart_month = fileTimeStart[5:8]\n timestart_year = fileTimeStart[8:12]\n #print timestart_year, timestart_month, timestart_day, timestart_time\n elif len(fileTimeStart) == 15: #Data format for example 00:30Z11JAN2008\n timestart_time = fileTimeStart[0:5]+':0.0'\n timestart_day = fileTimeStart[6:8]\n timestart_month = fileTimeStart[8:11]\n timestart_year = fileTimeStart[11:15]\n #print timestart_year, timestart_month, timestart_day, timestart_time\n else:\n raise Exception(\"Error in function 'timeUnitGrib2NetCdf': Time specification in infofile can't be read, process aborted...\")\n \n #Change month from word statement to number\n if timestart_month == 'JAN':\n timestart_month_nr = '01'\n elif timestart_month == 'FEB':\n timestart_month_nr = '02'\n elif timestart_month == 'MAR':\n timestart_month_nr = '03'\n elif timestart_month == 'APR':\n timestart_month_nr = '04'\n elif timestart_month == 'MAI':\n timestart_month_nr = '05'\n elif timestart_month == 'JUN':\n timestart_month_nr = '06'\n elif timestart_month == 'JUL':\n timestart_month_nr = '07'\n elif timestart_month == 'AUG':\n timestart_month_nr = '08'\n elif timestart_month == 'SEP':\n timestart_month_nr = '09'\n elif timestart_month == 'OCT':\n timestart_month_nr = '10'\n elif timestart_month == 'NOV':\n timestart_month_nr = '11'\n elif timestart_month == 'DEC':\n timestart_month_nr = '12'\n else:\n raise Exception(\"Error in function 'timeUnitGrib2NetCdf': Month specification in infofile corrupt, process aborted...\")\n \n########### Hack to change timeUnitNetCdf in case that not all GRAPES data is used (like here the timestamp)\n offsetTime = (dataStart_ - 1) * DATATIMESTEP #(25-1)*0.5=12h00\n if (offsetTime < 24 and offsetTime % 2 == 0): #must be full hours\n timestart_time_hours = int(timestart_time[0:1])+int(offsetTime)\n timestart_time = str(timestart_time_hours)+str(timestart_time[2:9])\n else:\n raise Exception (\"Error in function 'timeUnitGrib2NetCdf': time offset >= 24 is not implemented yet! DATATIMESTEP unless full hours is not implemented yet!\")\n\n\n #Set NetCDF time unit, e.g. \"hours since 2008-01-11 00:00:0.0\"\n #-------------------------------------------------------------------------------\n timeUnitNetCdf = 'hours since '+ str(timestart_year)+ '-'+ str(timestart_month_nr)+\\\n '-'+ str(timestart_day)+' '+ str(timestart_time)\n\n return timeUnitNetCdf\n\n\n def grib2NetCdf_gradsTest(self):\n \"\"\"Test GRADS functionality by testing functions and creating a NetCdf file\"\"\"\n\n #Open file\n infile = self.gradsFileName\n\n ga = ganum.GaNum(Bin='grads', Echo=False, Window=True)#Starts the grads application\n #Depending on Grads version, bin is telling which grads executable to start\n #For 2.0a7 this is 'grads' and 'gradsdap'\n try:\n fh = ga.open(infile)\n except:\n raise Exception(\"Error: GRADS file does not exist: '\" + str(infile) + \"'.\")\n exit()\n\n #Printing metadata on screen\n qh_file = ga.query(\"file\")\n qh_dims = ga.query(\"dims\")\n\n self.pLogger.info(\"---------------------------------------------------------------------\")\n self.pLogger.info(\"File information:\")\n self.pLogger.info(qh_file)\n self.pLogger.info(\"Dimension information:\")\n self.pLogger.info(qh_dims)\n self.pLogger.info(\"---------------------------------------------------------------------\")\n\n\n #Create one netCDF-file of specific variable by using GRADS commands\n ga(\"set z 1\")\n ga(\"set t 1 last\")\n\n ga(\"display gsw\")\n ga(\"define out = gsw\")\n ga(\"set sdfwrite output_file_GRADS_gsw.nc\")\n ga(\"sdfwrite out\")\n\n raw_input(\"Press Enter to terminate.\") #Wait\n\n del ga\n\n return\n\n\n #Data specific functions\n #-------------------------------------------------------------------------------\n\n def completeDataVariables(self):\n \"\"\"Complete missing data variable value modification manually\n\n Example: Scale data values in case that units prefix have to be changed\n (e.g. from hPa to Pa) due to defined unit in standard_name entry.\"\"\"\n\n pGradsData = numpy.load(self.numpyDataName)\n\n #Scale of data. Here: data is in hPa, must be in Pa\n pGradsData = self.pProcessingTool.scaleNumpyDataVariable(pGradsData, 5, 100.0) #p_pbl\n pGradsData = self.pProcessingTool.scaleNumpyDataVariable(pGradsData, 7, 100.0) #ps\n pGradsData = self.pProcessingTool.scaleNumpyDataVariable(pGradsData, 8, 100.0) #psl\n\n numpy.save(self.numpyDataName, pGradsData) #Better then 'tofile'. Also possible: 'dump'\n\n return\n\n\n def completeMetadataNcml(self):\n \"Complete missing data in NCML XML file manually\"\n\n self.pProcessNcml.changeGlobalAttribute('source', 'value', 'No information available')\n self.pProcessNcml.changeGlobalAttribute('references', 'value', 'No information available')\n self.pProcessNcml.changeGlobalAttribute('comment', 'value', 'No information available')\n\n self.pProcessNcml.changeLocalAttribute(str(self.pDefaultSettings.axisHeightName), 'units', 'value', '1') #'Level' is not conform to udunits!\n self.pProcessNcml.changeLocalAttribute(str(self.pDefaultSettings.axisHeightName), 'long_name', 'value', 'level')\n###############Define Standard Name!\n #self.pProcessNcml.changeLocalAttribute(str(self.pDefaultSettings.axisHeightName), 'standard_name', 'value', '???')\n self.pProcessNcml.removeLocalAttribute(str(self.pDefaultSettings.axisHeightName), 'standard_name')\n\n self.pProcessNcml.changeLocalAttribute('pblh', 'units', 'value', 'm')\n self.pProcessNcml.changeLocalAttribute('pblh', 'standard_name', 'value', 'atmosphere_boundary_layer_thickness')\n \n self.pProcessNcml.changeLocalAttribute('tpbl', 'units', 'value', 'K')\n self.pProcessNcml.changeLocalAttribute('tpbl', 'standard_name', 'value', 'tropopause_air_temperature')\n \n self.pProcessNcml.changeLocalAttribute('qpbl', 'units', 'value', 'kg kg-1')\n self.pProcessNcml.changeLocalAttribute('qpbl', 'standard_name', 'value', 'specific_humidity')\n \n self.pProcessNcml.changeLocalAttribute('upbl', 'units', 'value', 'm s-1')\n self.pProcessNcml.changeLocalAttribute('upbl', 'standard_name', 'value', 'x_wind')\n \n self.pProcessNcml.changeLocalAttribute('vpbl', 'units', 'value', 'm s-1')\n self.pProcessNcml.changeLocalAttribute('vpbl', 'standard_name', 'value', 'y_wind')\n \n self.pProcessNcml.changeLocalAttribute('p_pbl', 'units', 'value', 'Pa')\n self.pProcessNcml.changeLocalAttribute('p_pbl', 'standard_name', 'value', 'tropopause_air_pressure')\n \n self.pProcessNcml.changeLocalAttribute('q2', 'units', 'value', 'kg kg-1')\n self.pProcessNcml.changeLocalAttribute('q2', 'standard_name', 'value', 'surface_specific_humidity')\n \n self.pProcessNcml.changeLocalAttribute('ps', 'units', 'value', 'Pa')\n self.pProcessNcml.changeLocalAttribute('ps', 'standard_name', 'value', 'surface_air_pressure')\n \n self.pProcessNcml.changeLocalAttribute('psl', 'units', 'value', 'Pa')\n self.pProcessNcml.changeLocalAttribute('psl', 'standard_name', 'value', 'air_pressure_at_sea_level')\n \n self.pProcessNcml.changeLocalAttribute('glw', 'units', 'value', 'W m-2')\n self.pProcessNcml.changeLocalAttribute('glw', 'standard_name', 'value', 'atmosphere_net_rate_of_absorption_of_longwave_energy')\n \n self.pProcessNcml.changeLocalAttribute('gsw', 'units', 'value', 'W m-2')\n self.pProcessNcml.changeLocalAttribute('gsw', 'standard_name', 'value', 'atmosphere_net_rate_of_absorption_of_shortwave_energy')\n \n return\n\n\n def completeMetadataNumpymeta(self):\n \"Complete missing data in metadata coordinate XML file manually\"\n #--> Nothing to complete at the moment\n return\n\n\n\n#_______________________________________________________________________________\n\ndef main():\n \"\"\"\n Main function.\n\n This function represents the user interface and is called when the\n program is executed. Start the program by executing it with the following\n statement in your shell: grads_2Interface.py --help\n \"\"\"\n\n startTime = time.time()\n pDefaultSettings = DefaultSettings()\n\n #Parser definition\n #-------------------------------------------------------------------------------\n pParser = OptionParser(usage=USAGE, version = VERSION, description = DESCRIPTION, epilog = EPILOG)\n\n pParser.set_defaults(completeModel = False)\n pParser.set_defaults(isDoc = False)\n pParser.set_defaults(logLevel = pDefaultSettings.loggerLevelConsole)\n pParser.set_defaults(nodataValue = NODATA)\n pParser.set_defaults(dataPath = pDefaultSettings.dataDirectory) \n pParser.set_defaults(dataType = NUMPYDATA_DTYPE)\n\n \n pParser.add_option(\"-c\", \"--complModel\", action=\"store_true\", dest='completeModel', help=\"Complete data model by functions particularly written for specific data (default = %default)\")\n pParser.add_option(\"-d\", \"--doc\", action=\"store_true\", dest='isDoc', help=\"Give more information by printing docstrings (default = %default)\")\n pParser.add_option('-l', '--log', action = 'store', dest='logLevel', choices = ['debug','info','warning','error','critical'], nargs = 1, help=\"Minimum level for printing information to the console (default = %default)\")\n pParser.add_option('-n', '--nodata', action = 'store', dest='nodataValue', nargs = 1, help=\"Set nodata value (default = %default)\")\n pParser.add_option('-p', '--path', action = 'store', type ='string', dest='dataPath', nargs = 1, help=\"Directory for input / output files (default = %default)\")\n pParser.add_option('-s', '--specData', action = 'store', dest='specificData', nargs = 2, help=\"Only extract specific data as implemented in function 'choseSpecificData' \\\n between DATASTART (arg1) and DATASTOP (arg2)\") #(default = %default)\")\n pParser.add_option('-t', '--dtype', action = 'store', dest='dataType', choices = [''] + NUMPY_DTYPES, nargs = 1, help=\"Define output data type of numpy array (default = %default)\")\n \n (options, args) = pParser.parse_args()\n\n\n #Initialize logger\n #-------------------------------------------------------------------------------\n pLog = LoggingInterface(MODULE_LOGGER_ROOT, options.logLevel, pDefaultSettings.loggerLevelFile) #Instance is necessary although if not used.\n pLogger = logging.getLogger(MODULE_LOGGER_ROOT+\".\"+__name__)\n pLogger.info(\"_____________________________________________________________________________________________\")\n pLogger.info(\"Starting program 'GRADS2INTERFACE' version '\" + str(__version__) + \"' from '\" + str(__date__) + \"':\")\n\n\n try:\n\n #Parse command line arguments and options\n #-------------------------------------------------------------------------------\n if len(args) != 2:\n pLogger.error(\"Parser error occured. See error messages on the screen.\")\n pParser.error(\"Incorrect number of arguments. Two arguments 'operation' and 'data' are nedded. \" \\\n +str(len(args))+\" arguments are given. Execute '%prog --help' for more information\")\n else:\n #args = sys.argv[1:]#sys.argv[0] is name of program being executed\n operation_ = args[0]\n infile_ = args[1]\n\n\n #Process parser options\n #-------------------------------------------------------------------------------\n if options.isDoc:\n pLogger.info(__doc__)\n sys.exit(0)\n\n dataPath = options.dataPath\n if not dataPath.endswith('/') and dataPath != '': #Adds '/' to path in case that this is not the case\n dataPath = dataPath+'/'\n infileName = dataPath+infile_ #Add path of data directory to filename\n\n\n #Run program\n #-------------------------------------------------------------------------------\n pControlModelGrads = ControlModelGrads(infileName, options)\n\n if operation_ == 'grads2Model':\n pLogger.info(\"Operation: Convert GRADS to data model\")\n pControlModelGrads.writeGradsNumpyData() #Write numpy data array\n pControlModelGrads.writeGradsMetadata() #Write metadata\n\n if options.completeModel:#optional\n pControlModelGrads.completeDataModelManually() #Complete data model manually\n\n elif operation_ == 'printGrads':\n pLogger.info(\"Operation: Print GRADS data on the screen\")\n pControlModelGrads.printGradsMetadata()\n\n elif operation_ == 'testGrads':\n pLogger.info(\"Operation: Test GRADS functionalities\")\n pControlModelGrads.testGradsFunctionality()\n\n else:\n pLogger.error(\"Parser error: Operation '\" + str(operation_) + \"' is unknown.\")\n pParser.error(\"Operation '\" + str(operation_) + \"' is unknown.\") #System exit code 2\n\n\n except Exception: #If Exceptiation occured in this module or all connected sub-modules\n pLogger.exception('Exception Error occured: ')\n raise\n\n finally:\n pLogger.info(\"Finished. Total processing time [s]: '\" + str(time.time() - startTime) + \"'.\")\n pLogger.info(\"_____________________________________________________________________________________________\")\n pLog.__del__()\n \n #pControlModelGrads.__del__()\n\n \nif __name__ == \"__main__\":\n main()\n","sub_path":"samples/grads_2Interface.py","file_name":"grads_2Interface.py","file_ext":"py","file_size_in_byte":35013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"356548249","text":"\"\"\"\n* user: VR432075\n* fname: BUSATTO\n* lname: ALESSANDRO\n* task: biancaneve\n* score: 2.0\n* date: 2019-06-25 10:43:37.634220\n\"\"\"\nfrom __future__ import print_function\nimport sys\nif sys.version_info<(3,0):\n input=raw_input\n\n\ndef scambia(nani,p1,p2):\n x=nani[p1-1]\n nani[p1-1]=nani[p2-1]\n nani[p2-1]=x\n\ndef check(nani, h1, h2):\n num_nani=h2-h1+1\n total=0\n y=len(nani)+1\n prefix_sum=[0]*y\n \n for i in range(0,y-1):\n prefix_sum[i+1]=prefix_sum[i]+nani[i]\n\n for i in range(h1,h2+1):\n total += i\n i=len(prefix_sum)-1\n while i-num_nani >= 0 and prefix_sum[i]>=total:\n if(prefix_sum[i]-prefix_sum[i-num_nani]==total):\n return 1\n i=i-1\n return 0\n\n\ndef main():\n #r1=input()\n #split=r1.split()\n #n=int(split[0])\n #m=int(split[1])\n \n #disp_nani=input()\n #nani=int(disp_nani.split())\n n, m = map(int, input().split()) \n nani = map(int, input().split())\n for i in range(0,m):\n t, p1, p2 = map(int, input().split())\n #r=input()\n #r_split=r.split()\n #t=int(r_split[0])\n #p1=int(r_split[1])\n #p2=int(r_split[2])\n if t==1:\n scambia(nani,p1,p2)\n else:\n res=check(nani,p1,p2)\n if res==1:\n print(\"YES\")\n else:\n print(\"NO\")\n\nif __name__ == '__main__':\n main()","sub_path":"Algoritmi/2019-06-25/all-CMS-submissions/2019-06-25.10:43:37.634220.VR432075.biancaneve.py","file_name":"2019-06-25.10:43:37.634220.VR432075.biancaneve.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"337441097","text":"import random\ndef di_demo(myfile):\n print(myfile)\n fin = open(myfile,\"r\")\n mydi = dict()\n for fi in fin:\n mydi[fi] = random.randint(0,999999)\n return mydi\n\nx = input(\"Enter file name to create dictionary:\")\nprint(di_demo(x))\n","sub_path":"LAB3TASK9.py","file_name":"LAB3TASK9.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"114195257","text":"# credit to: http://zxi.mytechroad.com/blog/dynamic-programming/688-knight-probability-in-chessboard/\nclass Solution:\n def knightProbability(self, N: int, K: int, r: int, c: int) -> float:\n dp0 = [[0] * N for _ in range(N)]\n dp0[r][c] = 1\n for k in range(1, K+1):\n dp1 = [[0] * N for _ in range(N)]\n for i in range(N):\n for j in range(N):\n sum_ = 0\n for di, dj in zip([-2,-1,1,2,2,1,-1,-2], [-1,-2,-2,-1,1,2,2,1]):\n new_i = di + i\n new_j = dj + j\n if new_i in range(N) and new_j in range(N):\n dp1[i][j] += dp0[new_j][new_i]\n dp1, dp0 = dp0, dp1\n\n total = 0\n for i in range(N):\n for j in range(N):\n total += dp0[i][j]\n return total / 8 ** K\n \nclass Solution(object):\n def knightProbability(self, N, K, r, c):\n \"\"\"\n :type N: int\n :type K: int\n :type r: int\n :type c: int\n :rtype: float\n \"\"\"\n \n dp = [[[0] * N for _ in range(N)] for _ in range(K+1)]\n \n for row in range(N):\n for col in range(N):\n dp[0][row][col] = 1\n \n for k in range(1, K+1):\n for row in range(N):\n for col in range(N):\n \n for dr, dc in zip([-2, -1, 1, 2, 2, 1, -1, -2], [-1, -2, -2, -1, 1, 2, 2, 1]):\n nrow = row + dr\n ncol = col + dc\n if nrow in range(0, N) and ncol in range(0, N):\n dp[k][row][col] += dp[k-1][nrow][ncol]\n\n return float(dp[K][r][c]) / 8 ** K\n \n \n","sub_path":"python/688 Knight Probability in Chessboard.py","file_name":"688 Knight Probability in Chessboard.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"26384254","text":"\nclass Stack:\n def __init__(self):\n self.elements = []\n\n def push(self, ele):\n self.elements.append(ele)\n\n def pop(self):\n if self.empty():\n return None\n\n return self.elements.pop(len(self.elements) - 1)\n\n def empty(self):\n return len(self.elements) == 0\n\n\n# Processed vertex uses 0 indexed scheme\nclass Vertex:\n def __init__(self, id, adjVertices):\n self.id = id\n self.adjVertices = adjVertices\n\n def __str__(self):\n vertexStr = str(self.id) + \":\"\n for vert in self.adjVertices:\n vertexStr += \" \" + str(vert)\n return vertexStr\n\n\n# Raw vertex data uses 1 indexed scheme\ndef readGraphData(fileName, numVertices):\n graphData = [Vertex(i, []) for i in range(numVertices)]\n revGraphData = [Vertex(i, []) for i in range(numVertices)]\n with open(fileName) as f:\n for line in f:\n edge = line.split()\n start = int(edge[0]) - 1\n end = int(edge[1]) - 1\n # Ignore self connected edges\n if start == end:\n continue\n\n graphData[start].adjVertices.append(end)\n revGraphData[end].adjVertices.append(start)\n\n f.close()\n\n return graphData, revGraphData\n\n\ndef findStronglyConnectedGraph(graphData, revGraphData):\n numVertices = len(graphData)\n visited = {}\n for i in range(numVertices):\n visited[i] = False\n\n # First pass of DFS to do a topological sort to figure out\n # meta-graph info, which contains post order of each vertex\n # and topological ordering of each SCC.\n subDFSOrderStack = Stack()\n for i in range(numVertices):\n subDFSOrder = dfsI(revGraphData, i, visited)\n if len(subDFSOrder) > 0:\n subDFSOrderStack.push(subDFSOrder)\n\n # Reconstruct first DFS order of the first pass DFS\n insertionId = 0\n firstDFSOrder = [None] * numVertices\n while not subDFSOrderStack.empty():\n subDFSOrder = subDFSOrderStack.pop()\n newInsertionId = insertionId + len(subDFSOrder)\n firstDFSOrder[insertionId:newInsertionId] = subDFSOrder\n insertionId = newInsertionId\n\n # Reset visited record\n for i in range(numVertices):\n visited[i] = False\n\n sccSizes = []\n # Second pass of DFS to find out all SCCs\n for vertId in firstDFSOrder:\n size = dfsII(graphData, vertId, visited)\n if size > 0:\n sccSizes.append(size)\n\n return sccSizes\n\n\ndef dfsI(revGraphData, initialVertId, visited):\n if visited[initialVertId]:\n return []\n\n path = []\n stack = Stack()\n stack.push(initialVertId)\n while not stack.empty():\n vertId = stack.pop()\n # Important!!! It is possible traversing a cyclic graph\n # may result in pushing duplicated nodes into the stack/queue.\n if visited[vertId]:\n continue\n\n path.append(vertId)\n visited[vertId] = True\n vertex = revGraphData[vertId]\n for adjVertId in vertex.adjVertices:\n if not visited[adjVertId]:\n stack.push(adjVertId)\n\n return path\n\n\ndef dfsII(graphData, initialVertId, visited):\n if visited[initialVertId]:\n return 0\n\n nodeCounter = 0\n stack = Stack()\n stack.push(initialVertId)\n while not stack.empty():\n vertId = stack.pop()\n # Important!!! It is possible traversing a cyclic graph\n # may result in pushing duplicated nodes into the stack/queue.\n if visited[vertId]:\n continue\n\n visited[vertId] = True\n vertex = graphData[vertId]\n for adjVertId in vertex.adjVertices:\n if not visited[adjVertId]:\n stack.push(adjVertId)\n\n nodeCounter += 1\n\n return nodeCounter\n\n\ngraphData, revGraphData = readGraphData(\"./SCC2\", 9)\nsizes = findStronglyConnectedGraph(graphData, revGraphData)\nsizes.sort(reverse=True)\nprint(sizes[:5])\n","sub_path":"Py_solution/StrongConnectedGraph.py","file_name":"StrongConnectedGraph.py","file_ext":"py","file_size_in_byte":3920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"463483317","text":"import datetime\n\nfrom django.contrib.auth.models import Group\n\nfrom api.models import *\nfrom rest_framework import serializers\nfrom django.template.defaultfilters import slugify\n\n\nclass SkillSerializer(serializers.ModelSerializer):\n class Meta:\n model = Skill\n fields = ('id', 'label')\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = '__all__'\n\nclass PortfolioSerializer(serializers.ModelSerializer):\n skills = serializers.PrimaryKeyRelatedField(queryset=Skill.objects.all(), many=True)\n class Meta:\n model = Portfolio\n fields = ('user', 'about', 'education', 'experience', 'phone', 'skills')\n extra_kwargs = {'user': {'required': False}}\n\n def create(self, validated_data):\n user = self.context['request'].user\n validated_data['user'] = user\n return super(PortfolioSerializer, self).create(validated_data)\n\n# this should work (but i'm not sure)\nclass ProjectSerializer(serializers.ModelSerializer):\n creator = UserSerializer(read_only=True)\n class Meta:\n model = Project\n fields = ('id', 'name', 'description', 'stage',\n 'chatRoom', 'creator', 'endDate', 'startDate')\n extra_kwargs = {\n 'chatRoom': {\n 'read_only': True\n }\n }\n def create(self, validated_data):\n return Project.objects.create(\n creator=self.context['request'].user, **validated_data,\n chatRoom=slugify(str(self.context['request'].user)+'-'+validated_data['name'])\n )\n\nclass TaskSerializer(serializers.ModelSerializer):\n developers = serializers.PrimaryKeyRelatedField(many=True, queryset=User.objects.all())\n class Meta:\n model = Task\n fields = ('id', 'description', 'deadline', 'stage', 'project', 'developers')\n\nclass CommentSerializer(serializers.ModelSerializer):\n user = UserSerializer(read_only=True)\n class Meta:\n model = Comment\n fields = '__all__'\n def create(self, validated_data):\n return Comment.objects.create(\n user = self.context['request'].user, **validated_data\n )\n\nclass ApplicationSerializer(serializers.ModelSerializer):\n user = UserSerializer(read_only=True)\n class Meta:\n model = Application\n fields = '__all__'\n\n def create(self, validated_data):\n return Application.objects.create(\n user=self.context['request'].user, **validated_data\n )\n\nclass UserSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = User\n fields = (\"password\", \"email\", \"first_name\", \"last_name\")\n extra_kwargs = {'password': {'write_only': True}, }\n\n\nclass ProfileSerializer(serializers.ModelSerializer):\n user = UserSerializer()\n\n class Meta:\n model= Profile\n fields = '__all__'\n\n def create(self, validated_data):\n user_data = validated_data.pop('user')\n username = user_data[\"email\"]\n role = validated_data.pop('role')\n if role not in Profile.RoleValues.values:\n raise str(\"Unsupported role exeption. Only %s supported. You set: '%s'.\", Profile.RoleValues.values, role)\n user = User(username=username, **user_data)\n user.set_password(user_data[\"password\"])\n user.save()\n profile = Profile.objects.create(user=user, role=role)\n group, created = Group.objects.get_or_create(name=role)\n user.groups.add(group)\n return profile\n\n\n","sub_path":"api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"489238266","text":"# !/usr/bin/python\r\n\r\nimport re, os\r\n\r\ndef words_between():\r\n file_path = os.path.join(os.getcwd(),\"dataset.sentences\")\r\n labelsf = open('dataset.labels', 'r+')\r\n \r\n outputf = open('dataset.words_between.csv', 'w')\r\n outputf.write(\"words\\tlabel\\n\")\r\n \r\n with open(file_path,'r+') as sentencesf:\r\n for line in sentencesf:\r\n # Find all the words between PROTX1 and PROTX2 and vice versa\r\n result12=re.findall(\"(?<=PROTX1).*(?=PROTX2)\", line)\r\n result21=re.findall(\"(?<=PROTX2).*(?=PROTX1)\", line)\r\n # Read the label of the sentence\r\n label=labelsf.readline()\r\n # Write the words in between and the label separated by a tab\r\n if result12:\r\n outputf.write(\"\\\"\"+result12[0]+\"\\\"\\t\"+label)\r\n else:\r\n if result21:\r\n outputf.write(\"\\\"\"+result21[0]+\"\\\"\\t\"+label)\r\n else:\r\n outputf.write(\"\\\"\\\"\\t\"+label)\r\n \r\n sentencesf.close()\r\n labelsf.close()\r\n outputf.close()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n words_between()","sub_path":"baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"91399915","text":"# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Interactive prompt to run advanced commands and sub-processes.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\ntry:\n unicode\nexcept NameError:\n unicode = str\n unichr = chr\n\nimport os\nimport re\nimport subprocess\n\nimport app.controller\n\n\ndef functionTestEq(a, b):\n assert a == b, u\"%r != %r\" % (a, b)\n\n\nif 1:\n # Break up a command line, separate by |.\n kRePipeChain = re.compile(\n #r'''\\|\\|?|&&|((?:\"(?:\\\\\"|[^\"])*\"|'(?:\\\\'|[^'])*'|[^\\s|&]+)+)''')\n r'''((?:\"(?:\\\\\"|[^\"])*\"|'(?:\\\\'|[^'])*'|\\|\\||[^|]+)+)''')\n functionTestEq(\n kRePipeChain.findall(''' date \"a b\" 'c d ' | sort '''),\n [\"\"\" date \"a b\" 'c d ' \"\"\", ' sort '])\n functionTestEq(kRePipeChain.findall('date'), ['date'])\n functionTestEq(kRePipeChain.findall('d-a.te'), ['d-a.te'])\n functionTestEq(kRePipeChain.findall('date | wc'), ['date ', ' wc'])\n functionTestEq(kRePipeChain.findall('date|wc'), ['date', 'wc'])\n functionTestEq(kRePipeChain.findall('date && sort'), ['date && sort'])\n functionTestEq(kRePipeChain.findall('date || sort'), ['date || sort'])\n functionTestEq(\n kRePipeChain.findall('''date \"a b\" 'c d ' || sort'''),\n [\"\"\"date \"a b\" 'c d ' || sort\"\"\"])\n\n# Break up a command line, separate by &&.\nkReLogicChain = re.compile(\n r'''\\s*(\\|\\|?|&&|\"(?:\\\\\"|[^\"])*\"|'(?:\\\\'|[^'])*'|[^\\s|&]+)''')\nfunctionTestEq(kReLogicChain.findall('date'), ['date'])\nfunctionTestEq(kReLogicChain.findall('d-a.te'), ['d-a.te'])\nfunctionTestEq(kReLogicChain.findall('date | wc'), ['date', '|', 'wc'])\nfunctionTestEq(kReLogicChain.findall('date|wc'), ['date', '|', 'wc'])\nfunctionTestEq(kReLogicChain.findall('date && sort'), ['date', '&&', 'sort'])\nfunctionTestEq(kReLogicChain.findall('date || sort'), ['date', '||', 'sort'])\nfunctionTestEq(\n kReLogicChain.findall(''' date \"a\\\\\" b\" 'c d ' || sort '''),\n ['date', '\"a\\\\\" b\"', \"'c d '\", '||', 'sort'])\n\n# Break up a command line, separate by \\\\s.\nkReArgChain = re.compile(r'''\\s*(\"(?:\\\\\"|[^\"])*\"|'(?:\\\\'|[^'])*'|[^\\s]+)''')\nfunctionTestEq(kReArgChain.findall('date'), ['date'])\nfunctionTestEq(kReArgChain.findall('d-a.te'), ['d-a.te'])\nfunctionTestEq(\n kReArgChain.findall(''' date \"a b\" 'c d ' \"a\\\\\" b\" 'c\\\\' d ' '''),\n ['date', '\"a b\"', \"'c d '\", '\"a\\\\\" b\"', \"'c\\\\' d '\"])\nfunctionTestEq(kReArgChain.findall('''bm +'''), ['bm', '+'])\n\n# Break up a command line, separate by \\w (non-word chars will be separated).\nkReSplitCmdLine = re.compile(\n r\"\"\"\\s*(\"(?:\\\\\"|[^\"])*\"|'(?:\\\\'|[^'])*'|\\w+|[^\\s]+)\\s*\"\"\")\nfunctionTestEq(kReSplitCmdLine.findall('''bm ab'''), ['bm', 'ab'])\nfunctionTestEq(kReSplitCmdLine.findall('''bm+'''), ['bm', '+'])\nfunctionTestEq(kReSplitCmdLine.findall('''bm \"one two\"'''), ['bm', '\"one two\"'])\nfunctionTestEq(\n kReSplitCmdLine.findall('''bm \"o\\\\\"ne two\"'''), ['bm', '\"o\\\\\"ne two\"'])\n\n# Unquote text.\nkReUnquote = re.compile(r'''([\"'])([^\\1]*)\\1''')\nfunctionTestEq(kReUnquote.sub('\\\\2', 'date'), 'date')\nfunctionTestEq(kReUnquote.sub('\\\\2', '\"date\"'), 'date')\nfunctionTestEq(kReUnquote.sub('\\\\2', \"'date'\"), 'date')\nfunctionTestEq(kReUnquote.sub('\\\\2', \"'da\\\\'te'\"), \"da\\\\'te\")\nfunctionTestEq(kReUnquote.sub('\\\\2', '\"da\\\\\"te\"'), 'da\\\\\"te')\n\n\nclass InteractivePrompt(app.controller.Controller):\n \"\"\"Extended commands prompt.\"\"\"\n\n def __init__(self, view):\n app.controller.Controller.__init__(self, view, u\"prompt\")\n\n def setTextBuffer(self, textBuffer):\n app.controller.Controller.setTextBuffer(self, textBuffer)\n self.textBuffer = textBuffer\n self.commands = {\n u'bm': self.bookmarkCommand,\n u'build': self.buildCommand,\n u'cua': self.changeToCuaMode,\n u'emacs': self.changeToEmacsMode,\n u'make': self.makeCommand,\n u'open': self.openCommand,\n #u'split': self.splitCommand, # Experimental wip.\n u'vim': self.changeToVimNormalMode,\n }\n self.filters = {\n u'format': self.formatCommand,\n u'lower': self.lowerSelectedLines,\n u'numEnum': self.assignIndexToSelectedLines,\n u's': self.substituteText,\n u'sort': self.sortSelectedLines,\n u'sub': self.substituteText,\n u'upper': self.upperSelectedLines,\n u'wrap': self.wrapSelectedLines,\n }\n self.subExecute = {\n u'!': self.shellExecute,\n u'|': self.pipeExecute,\n }\n\n def bookmarkCommand(self, cmdLine, view):\n args = kReSplitCmdLine.findall(cmdLine)\n if len(args) > 1 and args[1][0] == u'-':\n if self.view.host.textBuffer.bookmarkRemove():\n return {}, u'Removed bookmark'\n else:\n return {}, u'No bookmarks to remove'\n else:\n self.view.host.textBuffer.bookmarkAdd()\n return {}, u'Added bookmark'\n\n def buildCommand(self, cmdLine, view):\n return {}, u'building things'\n\n def changeToCuaMode(self, cmdLine, view):\n return {}, u'CUA mode'\n\n def changeToEmacsMode(self, cmdLine, view):\n return {}, u'Emacs mode'\n\n def changeToVimNormalMode(self, cmdLine, view):\n return {}, u'Vim normal mode'\n\n def focus(self):\n app.log.info(u'InteractivePrompt.focus')\n self.textBuffer.selectionAll()\n\n def formatCommand(self, cmdLine, lines):\n formatter = {\n #\".js\": app.format_javascript.format\n #\".py\": app.format_python.format\n #\".html\": app.format_html.format,\n }\n\n def noOp(data):\n return data\n\n fileName, ext = os.path.splitext(self.view.host.textBuffer.fullPath)\n app.log.info(fileName, ext)\n data = formatter.get(ext,\n noOp)(self.view.host.textBuffer.parser.data)\n lines = data.split(u\"\\n\")\n\n return lines, u'Changed %d lines' % (len(lines),)\n\n def makeCommand(self, cmdLine, view):\n return {}, u'making stuff'\n\n def openCommand(self, cmdLine, view):\n \"\"\"\n Opens the file under cursor.\n \"\"\"\n args = kReArgChain.findall(cmdLine)\n app.log.info(args)\n if len(args) == 1:\n # If no args are provided, look for a path at the cursor position.\n view.textBuffer.openFileAtCursor()\n return {}, view.textBuffer.message[0]\n # Try the raw path.\n path = args[1]\n if os.access(path, os.R_OK):\n return self.openFile(path, view)\n # Look in the same directory as the current file.\n path = os.path.join(os.path.dirname(view.textBuffer.fullPath), args[1])\n if os.access(path, os.R_OK):\n return self.openFile(path, view)\n return {}, u\"Unable to open \" + args[1]\n\n def openFile(self, path, view):\n textBuffer = view.program.bufferManager.loadTextBuffer(path)\n inputWindow = self.currentInputWindow()\n inputWindow.setTextBuffer(textBuffer)\n self.changeTo(inputWindow)\n inputWindow.setMessage('Opened file {}'.format(path))\n\n def splitCommand(self, cmdLine, view):\n view.splitWindow()\n return {}, u'Split window'\n\n def execute(self):\n try:\n cmdLine = self.textBuffer.parser.data\n if not len(cmdLine):\n self.changeToHostWindow()\n return\n tb = self.view.host.textBuffer\n lines = list(tb.getSelectedText())\n if cmdLine[0] in self.subExecute:\n data = self.view.host.textBuffer.parser.data.encode('utf-8')\n output, message = self.subExecute.get(cmdLine[0])(cmdLine[1:],\n data)\n if app.config.strict_debug:\n assert isinstance(output, bytes)\n assert isinstance(message, unicode)\n tb.editPasteLines(tuple(output.decode('utf-8').split(u\"\\n\")))\n tb.setMessage(message)\n else:\n cmd = re.split(u'\\\\W', cmdLine)[0]\n dataFilter = self.filters.get(cmd)\n if dataFilter:\n if not len(lines):\n tb.setMessage(\n u'The %s filter needs a selection.' % (cmd,))\n else:\n lines, message = dataFilter(cmdLine, lines)\n tb.setMessage(message)\n if not len(lines):\n lines.append(u'')\n tb.editPasteLines(tuple(lines))\n else:\n command = self.commands.get(cmd, self.unknownCommand)\n message = command(cmdLine, self.view.host)[1]\n tb.setMessage(message)\n except Exception as e:\n app.log.exception(e)\n tb.setMessage(u'Execution threw an error.')\n self.changeToHostWindow()\n\n def shellExecute(self, commands, cmdInput):\n \"\"\"\n cmdInput is in bytes (not unicode).\n return tuple: output as bytes (not unicode), message as unicode.\n \"\"\"\n if app.config.strict_debug:\n assert isinstance(commands, unicode), type(commands)\n assert isinstance(cmdInput, bytes), type(cmdInput)\n try:\n process = subprocess.Popen(\n commands,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n shell=True)\n return process.communicate(cmdInput)[0], u''\n except Exception as e:\n return u'', u'Error running shell command\\n' + e\n\n def pipeExecute(self, commands, cmdInput):\n \"\"\"\n cmdInput is in bytes (not unicode).\n return tuple: output as bytes (not unicode), message as unicode.\n \"\"\"\n if app.config.strict_debug:\n assert isinstance(commands, unicode), type(commands)\n assert isinstance(cmdInput, bytes), type(cmdInput)\n chain = kRePipeChain.findall(commands)\n try:\n process = subprocess.Popen(\n kReArgChain.findall(chain[-1]),\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n if len(chain) == 1:\n return process.communicate(cmdInput)[0], u''\n else:\n chain.reverse()\n prior = process\n for i in chain[1:]:\n prior = subprocess.Popen(\n kReArgChain.findall(i),\n stdin=subprocess.PIPE,\n stdout=prior.stdin,\n stderr=subprocess.STDOUT)\n prior.communicate(cmdInput)\n return process.communicate()[0], u''\n except Exception as e:\n app.log.exception(e)\n return b'', u'Error running shell command\\n' + unicode(e)\n\n def info(self):\n app.log.info(u'InteractivePrompt command set')\n\n def lowerSelectedLines(self, cmdLine, lines):\n lines = [line.lower() for line in lines]\n return lines, u'Changed %d lines' % (len(lines),)\n\n def assignIndexToSelectedLines(self, cmdLine, lines):\n output = []\n for i, line in enumerate(lines):\n output.append(u\"%s = %d\" % (line, i))\n return output, u'Changed %d lines' % (len(output),)\n\n def sortSelectedLines(self, cmdLine, lines):\n lines.sort()\n return lines, u'Changed %d lines' % (len(lines),)\n\n def substituteText(self, cmdLine, lines):\n if len(cmdLine) < 2:\n return (lines, u'''tip: %s/foo/bar/ to replace 'foo' with 'bar'.'''\n % (cmdLine,))\n if not lines:\n return lines, u'No text was selected.'\n sre = re.match('\\w+(\\W)', cmdLine)\n if not sre:\n return (lines, u'''Separator punctuation missing, example:'''\n u''' %s/foo/bar/''' % (cmdLine,))\n separator = sre.groups()[0]\n try:\n _, find, replace, flags = cmdLine.split(separator, 3)\n except ValueError:\n return (lines, u'''Separator punctuation missing, there should be'''\n u''' three '%s'.''' % (separator,))\n data = self.view.host.textBuffer.parser.data\n output = self.view.host.textBuffer.findReplaceText(\n find, replace, flags, data)\n lines = output.split(u\"\\n\")\n return lines, u'Changed %d lines' % (len(lines),)\n\n def upperSelectedLines(self, cmdLine, lines):\n lines = [line.upper() for line in lines]\n return lines, u'Changed %d lines' % (len(lines),)\n\n def unknownCommand(self, cmdLine, view):\n self.view.host.textBuffer.setMessage(u'Unknown command')\n return {}, u'Unknown command %s' % (cmdLine,)\n\n def wrapSelectedLines(self, cmdLine, lines):\n tokens = cmdLine.split()\n app.log.info(\"tokens\", tokens)\n width = 80 if len(tokens) == 1 else int(tokens[1])\n indent = len(lines[0]) - len(lines[0].lstrip())\n width -= indent\n lines = app.curses_util.wrapLines(lines, u\" \" * indent, width)\n return lines, u'Changed %d lines' % (len(lines),)\n","sub_path":"app/interactive_prompt.py","file_name":"interactive_prompt.py","file_ext":"py","file_size_in_byte":13830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"90869773","text":"import igraph as ig\nimport numpy as np\nimport pandas as pd\nimport time\nimport sys\nfrom multiprocessing import Pool, cpu_count\n\nimport random\nimport math\n\nfrom typing import NewType, Set\nfrom pathlib import Path\n\nfrom tqdm import tqdm\nfrom HighGraphPreprocessing import HighGraphPreprocessing\nfrom NeighborManager import NeighborManager\nfrom LayerManager import LayerManager\nfrom ComponentCollector import ComponentCollector\nfrom ReachabilityEstimator import ReachabilityEstimator\nfrom SizeEstimation import SizeEstimation\nfrom Statistics import Statistics\nfrom GraphUtils import GraphUtils as gu\n#from utils.metrics import *\n\nfrom globals import *\n\nGraph = NewType(\"Graph\", ig.GraphBase)\nNode = NewType(\"Node\", ig.Vertex)\nNodesSet = NewType(\"NodesSet\", Set[ig.Vertex])\n\nclass Sampler:\n def init(self, dataset_name=\"Epinions\", loaded_graph=None):\n dataset = None\n for ds in datasets:\n if dataset_name == ds.title:\n dataset = ds\n break\n if dataset is None:\n raise ValueError(\"No dataset with this name\")\n\n self.title = dataset_name\n self.dataset = dataset\n path, sep, directed = dataset.path, dataset.sep, dataset.directed\n print(\"Reading graph: \", self.title, \"from\", path)\n self.layer_num = 2\n\n if loaded_graph is None:\n g, in_degrees = gu.load_graph(path, sep, directed=directed)\n else:\n g, in_degrees = loaded_graph\n g.to_undirected()\n print(\"Taking giant component...\")\n g = g.components().giant()\n # the actual size will not be used at any point during the algorithm\n # (used only for evaluation).\n self.actual_graph_size = len(g.vs)\n print(\"Number of nodes:\", self.actual_graph_size)\n self.graph = g\n self.initialized = True\n self.total_num_samples = 0\n self.containment_probs = None\n\n def generate_L0(self, L0_method: str = \"greedy\", L0_size: int = -1):\n assert self.initialized\n self.high_subgraph = HighGraphPreprocessing(self.graph)\n self.high_subgraph.set_method(L0_method)\n if L0_size <= 0:\n L0_size = nums_L0[self.title]\n self.high_subgraph.get_high_nodes(L0_size, np.random.choice(self.graph.vs))\n self.L0_generated = True\n self.L0_size = L0_size\n self.L1_size = len(self.high_subgraph.L1_set)\n self.L0_L1_size = self.L0_size + self.L1_size\n # note the the actual L2_size will not be used anywhere in the algorithm (it\n # is only used for evaluation purposes)\n self.actual_L2_size = self.actual_graph_size - self.L0_L1_size\n self.neighbor_manager = NeighborManager(self.high_subgraph)\n print(\"Built L0. Total queries:\", self.neighbor_manager.query_counter)\n\n self.layer_manager = LayerManager(self.graph,\n self.high_subgraph,\n self.neighbor_manager)\n self.layers = self.layer_manager.get_layers()\n self.component_collector = ComponentCollector(self.graph,\n self.high_subgraph,\n self.neighbor_manager,\n self.layer_manager,\n self.layer_num,\n with_in_layer_edges=False)\n self.reachability_estimator = ReachabilityEstimator(self.high_subgraph,\n self.neighbor_manager,\n self.layer_manager,\n self.component_collector,\n self.layer_num)\n self.size_estimator = SizeEstimation(self.high_subgraph,\n self.neighbor_manager,\n self.layer_manager,\n self.component_collector,\n self.reachability_estimator)\n self.statistics = Statistics(self.graph,\n self.high_subgraph,\n self.neighbor_manager,\n self.layer_manager,\n self.reachability_estimator,\n self.component_collector)\n\n def L2_reach_step(self, with_updating=True, preprocessing=False):\n update_flag = with_updating and not self.frozen\n next_node_lst = self.component_collector.sample_component_nodes_no_rejection(1, False)\n self.reached_nodes += next_node_lst\n if update_flag:\n self.size_estimator.update_down(next_node_lst)\n node_reachability = self.reachability_estimator.get_reachability(next_node_lst[0],\n layer_num=self.layer_num)\n self.reachabilities.append(node_reachability)\n if update_flag:\n self.reachability_estimator.update_observed_reachabilities([node_reachability])\n node_prob = random.random()\n self.node_probs.append(node_prob)\n self.query_counters.append(self.neighbor_manager.query_counter)\n\n if preprocessing:\n self.is_accepted.append(None)\n else:\n acceptance = self.is_reached_node_accepted(node_prob, node_reachability)\n self.is_accepted.append(acceptance)\n if acceptance:\n return next_node_lst[0]\n else:\n return None\n\n def is_reached_node_accepted(self, node_prob, node_reachability):\n return node_prob < self.baseline_reach / node_reachability\n\n\n def preprocess_L2(self, L1_num_samples: int = 100, L2_num_reaches: int = 10, allowed_error=0.01):\n assert self.L0_generated\n\n self.frozen = False\n\n nodes_from_L1 = random.sample(self.high_subgraph.L1_list, L1_num_samples)\n self.up_nodes = set(nodes_from_L1)\n self.size_estimator.update_up(nodes_from_L1)\n self.initial_query_counter = self.neighbor_manager.query_counter\n\n print(\"Sampled\", L1_num_samples, \"nodes from L1. Total queries:\",\n self.initial_query_counter)\n\n self.reached_nodes = []\n self.node_probs = []\n self.reachabilities = []\n self.query_counters = []\n self.is_accepted = []\n\n for _ in range(L2_num_reaches):\n self.L2_reach_step(preprocessing=True)\n\n self.update_estimates(allowed_error=allowed_error)\n for i in range(len(self.reached_nodes)):\n self.is_accepted[i] = self.is_reached_node_accepted(self.node_probs[i], self.reachabilities[i])\n\n print(\"Sampled\", L2_num_reaches, \"nodes from L2+ without rejection. Total queries:\",\n self.neighbor_manager.query_counter)\n\n self.L2_preprocessed = True\n\n def freeze(self):\n self.frozen = True\n self.neighbor_manager.stop_recording()\n\n def unfreeze(self):\n self.frozen= False\n self.neighbor_manager.resume_recording()\n\n\n def update_estimates(self, allowed_error=0.01):\n if not self.frozen:\n self.L2_size_estimation = self.size_estimator.estimate_size(self.L1_size)\n self.graph_size_estimation = self.L2_size_estimation + self.L0_L1_size\n self.estimated_fractions = np.array([self.L0_size, self.L1_size, self.L2_size_estimation],\n dtype=np.float) / self.graph_size_estimation\n self.reach_quantile = allowed_error / self.estimated_fractions[2]\n self.baseline_reach = self.reachability_estimator.estimate_baseline_reachability(self.reach_quantile)\n\n def sample_from_components(self):\n node = None\n while node is None:\n node = self.L2_reach_step()\n return node\n\n def sample_v2(self,\n num_samples: int = 1,\n num_additional_L1_samples: int = 5,\n allowed_error: float = 0.01,\n with_tqdm: bool = False):\n assert self.L2_preprocessed\n\n samples = []\n query_counters = []\n layer_numbers = range(3)\n layer_choices = np.random.choice(layer_numbers, num_samples, p=self.estimated_fractions)\n\n rng = range(num_samples)\n if with_tqdm:\n rng = tqdm(rng)\n for i in rng:\n if layer_choices[i] == 0:\n samples.append(random.choice(self.high_subgraph.L0_list))\n elif layer_choices[i] == 1:\n samples.append(random.choice(self.high_subgraph.L1_list))\n else:\n samples.append(self.sample_from_components())\n query_counters.append(self.neighbor_manager.query_counter)\n\n if not self.frozen:\n new_L1_samples = [self.sample_new_L1_node() for _ in range(num_additional_L1_samples)]\n new_L1_samples = [samp for samp in new_L1_samples if samp is not None]\n if len(new_L1_samples) > 0:\n self.size_estimator.update_up(new_L1_samples)\n\n self.update_estimates(allowed_error=allowed_error)\n\n if num_samples is 1:\n return samples[0], query_counters[0]\n else:\n return samples, query_counters\n\n def sample_new_L1_node(self):\n if len(self.up_nodes) >= self.L1_size:\n return None\n while True:\n node = random.choice(self.high_subgraph.L1_list)\n if node not in self.up_nodes:\n self.up_nodes.add(node)\n return node\n\n\n\n def sample_old(self, num_samples: int,\n allowed_error = 0.01,\n use_standard_mult_factor=False,\n standard_mult_factor = 0.05,\n standard_additive_factor = 100,\n use_extra_mult_factor=False,\n extra_mult_factor: float = 0.5,\n extra_additive_factor = 100,\n with_updating=True,\n with_printing=False):\n\n new_probs = np.array(np.random.random(num_samples), dtype=np.float)\n if self.containment_probs is None:\n self.containment_probs = new_probs\n else:\n self.containment_probs = np.concatenate([self.containment_probs, new_probs])\n\n self.total_num_samples += num_samples\n\n curr_stop = len(self.reached_nodes)\n while True:\n if use_standard_mult_factor:\n next_stop = int(curr_stop + math.ceil(curr_stop * standard_mult_factor))\n else: # use additive factor\n next_stop = curr_stop + standard_additive_factor\n\n curr_L2_size = self.size_estimator.estimate_size(self.L1_size)\n curr_graph_size = curr_L2_size + self.L0_L1_size\n if with_printing:\n print(\"Estimated L2+ size:\", curr_L2_size)\n curr_L2_fraction = float(curr_L2_size) / curr_graph_size\n num_required_L2_samples = int(np.count_nonzero(self.containment_probs < curr_L2_fraction))\n\n reach_quantile = allowed_error / curr_L2_fraction\n baseline_reach = self.reachability_estimator.estimate_baseline_reachability(reach_quantile)\n if with_printing:\n print(\"Estimated baseline reachability:\", baseline_reach)\n\n num_actual_L2_samples = np.count_nonzero(np.array(self.node_probs)\n < baseline_reach / np.array(self.reachabilities))\n if num_actual_L2_samples >= num_required_L2_samples:\n if with_printing:\n print(\"Achieved the L2+ initial goal. Now continuing some more steps for stabilization.\")\n break # At this point, we roughly made all required queries from L2+\n\n for counter in range(curr_stop, next_stop):\n self.L2_reach_step(with_updating=with_updating)\n\n curr_stop = next_stop\n\n finished = False\n if with_printing:\n print(\"Stabilization stage...\")\n while not finished:\n if use_extra_mult_factor:\n num_steps = int(extra_mult_factor * curr_stop)\n else:\n num_steps = extra_additive_factor\n for _ in range(num_steps):\n self.L2_reach_step(with_updating=with_updating)\n curr_L2_size = self.size_estimator.estimate_size(self.L1_size)\n curr_graph_size = curr_L2_size + self.L0_L1_size\n curr_L2_fraction = float(curr_L2_size) / curr_graph_size\n num_required_L2_samples = int(np.sum(self.containment_probs < curr_L2_fraction))\n reach_quantile = allowed_error / curr_L2_fraction\n baseline_reach = self.reachability_estimator.estimate_baseline_reachability(reach_quantile)\n L2_possible_samples = np.nonzero(np.array(self.node_probs)\n < baseline_reach / np.array(self.reachabilities))[0]\n if len(L2_possible_samples) >= num_required_L2_samples:\n finished = True\n L2_sample_indices = np.sort(np.random.choice(L2_possible_samples, num_required_L2_samples, replace=False))\n\n if with_printing:\n print(\"Finished!!!\\nEstimated size:\", curr_L2_size)\n print(\"Actual L2+ size:\", sum([len(layer) for layer in self.layers[2:]]))\n print(\"Estimated baseline reachability:\", baseline_reach)\n print(\"Sampled\", len(L2_sample_indices), \"nodes from L2+ out of\", len(self.reached_nodes), \"reaches\")\n print(\"Total number of queries:\", self.neighbor_manager.query_counter)\n print(\"Number of queries per sample:\", self.neighbor_manager.query_counter / self.total_num_samples)\n L2_samples = [self.reached_nodes[int(i)] for i in L2_sample_indices]\n L2_query_counters = [self.query_counters[int(i)] for i in L2_sample_indices]\n\n all_samples = [None for _ in range(self.total_num_samples)]\n all_counters = [self.initial_query_counter for _ in range(self.total_num_samples)]\n L2_locations_in_all_samples = list(np.sort(np.random.choice(range(self.total_num_samples), len(L2_samples), replace=False)))\n for i in range(len(L2_samples)):\n all_samples[L2_locations_in_all_samples[i]] = L2_samples[i]\n all_counters[L2_locations_in_all_samples[i]] = L2_query_counters[i]\n\n # setting up query counters when queries have not been made.\n for i in range(L2_locations_in_all_samples[0]+1, len(all_samples)):\n if all_counters[i] == self.initial_query_counter:\n all_counters[i] = all_counters[i-1]\n\n # deciding for leftover samples whether they were from L0 or L1.\n L0_relative_size = float(self.L0_size) / self.L0_L1_size\n for j in range(self.total_num_samples):\n if all_samples[j] != None:\n pass\n elif random.random() < L0_relative_size:\n all_samples[j] = random.choice(self.high_subgraph.L0_list)\n else:\n all_samples[j] = random.choice(self.high_subgraph.L1_list)\n\n return all_samples, all_counters\n\n\n\n\n\n\n\n# def run_uniformity_experiment(ds_title,\n# L0_size,\n# min_num_samples_L2,\n# max_num_samples_L2,\n# jump_L2,\n# L0_method=\"greedy\",\n# L1_num_samples=3000,\n# L2_num_preprocessing_reaches=200):\n# sampler = Sampler()\n# sampler.init(ds_title)\n# sampler.generate_L0(L0_method=L0_method, L0_size=L0_size)\n# sampler.preprocess_L2(L1_num_samples=L1_num_samples, L2_num_reaches=L2_num_preprocessing_reaches)\n# samples, query_counts = sampler.sample(min_num_samples_L2)\n# frozen_samplers = [copy.deepcopy(sampler)]\n#\n# num_samples_list = len(range(min_num_samples_L2, max_num_samples_L2, jump_L2))\n#\n# for i in range(len(num_samples_list)-1):\n# sampler.sample(num_samples_list[i+1] - num_samples_list[i])\n# frozen_samplers.append(copy.deepcopy(sampler))\n#\n# samples, query_counts = sampler.sample(3000, with_updating=False)\n# return np.histogram(samples)\n\n\n\ndef run_sampling_experiment(dataset,\n L0_size,\n sample_size_range,\n results_fnames,\n L0_method=\"greedy\",\n L1_num_samples=3000,\n L2_num_preprocessing_reaches=500):\n sampler = Sampler()\n sampler.init(dataset.title)\n sampler.generate_L0(L0_method=L0_method, L0_size=L0_size)\n sampler.preprocess_L2(L1_num_samples=L1_num_samples, L2_num_reaches=L2_num_preprocessing_reaches)\n print(f\"{dataset.title}: {sample_size_range[0]}\")\n samples, query_counters = sampler.sample(sample_size_range[0])\n df = pd.DataFrame(list(zip([sample.index for sample in samples], query_counters)),\n columns=[\"node\", \"query_count\"])\n df.to_json(results_fnames[0])\n for i in range(len(sample_size_range) - 1):\n print(f\"{dataset.title}: {sample_size_range[i+1]}\")\n samples, query_counters = sampler.sample(sample_size_range[i+1] - sample_size_range[i])\n df = pd.DataFrame(list(zip([sample.index for sample in samples], query_counters)),\n columns=[\"node\", \"query_count\"])\n df.to_json(results_fnames[i+1])\n\n\ndef run_sampling_experiment_v2(dataset,\n L0_size,\n sample_size_range,\n fname,\n L0_method=\"greedy\",\n L2_num_preprocessing_reaches=200):\n sampler = Sampler()\n sampler.init(dataset.title)\n vertex_indices_range = (0,len(sampler.graph.vs))\n sampler.generate_L0(L0_method=L0_method, L0_size=L0_size)\n L1_num_samples = nums_L1_up[dataset.title]\n sampler.preprocess_L2(L1_num_samples=L1_num_samples, L2_num_reaches=L2_num_preprocessing_reaches)\n print(f\"{dataset.title}: {sample_size_range[0]}\")\n samples, query_counters = sampler.sample_v2(sample_size_range[0])\n dataframes = [pd.DataFrame(list(zip([sample.index for sample in samples], query_counters)),\n columns=[\"node\", \"query_count\"])]\n histogram = np.histogram([samp.index for samp in samples], range=vertex_indices_range)[0]\n print(\"histogram:\", histogram)\n t = time.time()\n for i in range(len(sample_size_range) - 1):\n print(f\"{dataset.title}: {sample_size_range[i+1]}\")\n samples, query_counters = sampler.sample_v2(sample_size_range[i+1] - sample_size_range[i])\n dataframes.append(pd.DataFrame(list(zip([sample.index for sample in samples], query_counters)),\n columns=[\"node\", \"query_count\"]))\n histogram += np.histogram([samp.index for samp in samples], range=vertex_indices_range)[0]\n print(\"histogram:\", histogram)\n t2 = time.time()\n print(\"time:\", t2-t)\n t = t2\n all_data = pd.concat(dataframes, ignore_index=True)\n print(all_data)\n all_data.to_json(fname)\n return all_data\n\ndef run_sampling_experiment_with_freezing(dataset,\n L0_size,\n sample_size_range,\n num_samples_per_instance,\n fname,\n L0_method=\"greedy\",\n L1_num_samples=5000,\n L2_num_preprocessing_reaches=200,\n additional_L1_samples_per=5):\n sampler = Sampler()\n sampler.init(dataset.title)\n vertex_indices_range = (0,len(sampler.graph.vs))\n sampler.generate_L0(L0_method=L0_method, L0_size=L0_size)\n sampler.preprocess_L2(L1_num_samples=L1_num_samples, L2_num_reaches=L2_num_preprocessing_reaches)\n print(f\"{dataset.title}: {sample_size_range[0]}\")\n for _ in range(sample_size_range[0]):\n sampler.sample_v2(num_samples=1, num_additional_L1_samples=additional_L1_samples_per)\n print(\"L2 size estimate:\", sampler.L2_size_estimation)\n print(\"L2 baseline reachability estimate:\", sampler.baseline_reach)\n sampler.freeze()\n samples, query_counts = sampler.sample_v2(num_samples=num_samples_per_instance, with_tqdm=True)\n print(len(sampler.reachability_estimator.observed_reachabilities))\n dataframes = [pd.DataFrame(list(zip([sample.index for sample in samples],\n query_counts,\n [sample_size_range[0] for _ in samples])),\n columns=[\"node\", \"query_count\", \"pre_freeze\"])]\n histogram = np.histogram([samp.index for samp in samples], range=vertex_indices_range)[0]\n print(\"histogram:\", histogram)\n t = time.time()\n for i in range(len(sample_size_range) - 1):\n print(f\"{dataset.title}: {sample_size_range[i+1]}\")\n sampler.unfreeze()\n for _ in range(sample_size_range[i+1] - sample_size_range[i]):\n sampler.sample_v2(num_samples=1, num_additional_L1_samples=additional_L1_samples_per)\n print(\"L2 size estimate:\", sampler.L2_size_estimation)\n print(\"L2 baseline reachability estimate:\", sampler.baseline_reach)\n sampler.freeze()\n samples, query_counts = sampler.sample_v2(num_samples=num_samples_per_instance, with_tqdm=True)\n print(len(sampler.reachability_estimator.observed_reachabilities))\n dataframes.append(pd.DataFrame(list(zip([sample.index for sample in samples],\n query_counts,\n [sample_size_range[i+1] for _ in samples])),\n columns=[\"node\", \"query_count\", \"pre_freeze\"]))\n histogram = np.histogram([samp.index for samp in samples], range=vertex_indices_range)[0]\n print(\"histogram:\", histogram)\n t2 = time.time()\n print(\"time:\", t2-t)\n t = t2\n all_data = pd.concat(dataframes, ignore_index=True)\n print(all_data)\n all_data.to_json(fname)\n return all_data\n\ndef run_sampling_experiment_with_freezing_v3(dataset,\n L0_size,\n sample_size_range,\n num_samples_per_instance,\n results_fnames,\n L0_method=\"greedy\",\n L1_num_samples=100,\n L2_num_preprocessing_reaches=10,\n additional_L1_samples_per=5):\n sampler = Sampler()\n sampler.init(dataset.title)\n vertex_indices_range = (0,len(sampler.graph.vs))\n sampler.generate_L0(L0_method=L0_method, L0_size=L0_size)\n sampler.preprocess_L2(L1_num_samples=L1_num_samples, L2_num_reaches=L2_num_preprocessing_reaches)\n print(f\"{dataset.title}: {sample_size_range[0]}\")\n for _ in range(sample_size_range[0]):\n sampler.sample_v2(num_samples=1, num_additional_L1_samples=additional_L1_samples_per)\n print(\"L2 size estimate:\", sampler.L2_size_estimation)\n print(\"L2 baseline reachability estimate:\", sampler.baseline_reach)\n sampler.freeze()\n samples, query_counts = sampler.sample_v2(num_samples=num_samples_per_instance, with_tqdm=True)\n print(len(sampler.reachability_estimator.observed_reachabilities))\n df = pd.DataFrame(list(zip([sample.index for sample in samples],\n query_counts,\n [sample_size_range[0] for _ in samples])),\n columns=[\"node\", \"query_count\", \"pre_freeze\"])\n df.to_json(results_fnames[0])\n histogram = np.histogram([samp.index for samp in samples], range=vertex_indices_range)[0]\n print(\"histogram:\", histogram)\n t = time.time()\n for i in range(len(sample_size_range) - 1):\n print(f\"{dataset.title}: {sample_size_range[i+1]}\")\n sampler.unfreeze()\n for _ in range(sample_size_range[i + 1] - sample_size_range[i]):\n sampler.sample_v2(num_samples=1, num_additional_L1_samples=additional_L1_samples_per)\n print(\"L2 size estimate:\", sampler.L2_size_estimation)\n print(\"L2 baseline reachability estimate:\", sampler.baseline_reach)\n sampler.freeze()\n samples, query_counts = sampler.sample_v2(num_samples=num_samples_per_instance, with_tqdm=True)\n print(len(sampler.reachability_estimator.observed_reachabilities))\n df = pd.DataFrame(list(zip([sample.index for sample in samples],\n query_counts,\n [sample_size_range[i+1] for _ in samples])),\n columns=[\"node\", \"query_count\", \"pre_freeze\"])\n df.to_json(results_fnames[i+1])\n histogram = np.histogram([samp.index for samp in samples], range=vertex_indices_range)[0]\n print(\"histogram:\", histogram)\n t2 = time.time()\n print(\"time:\", t2-t)\n t = t2\n #all_data = pd.concat(dataframes, ignore_index=True)\n #print(all_data)\n #all_data.to_json(fname)\n #return all_data\n\ndef run_sampling_all_graphs():\n for exp_num in range(10):\n for dataset in datasets[0:2]:\n sample_size_range = range(100, 200001, 100)\n print(\"dataset:\", dataset.title)\n fname = f\"{dataset.title}-sampling-exp{exp_num}-{sample_size_range[0]}-{sample_size_range[-1]}-{sample_size_range[1]-sample_size_range[0]}\"\n results_fname = Path(\"experiment_outs/results/sampling\") / (fname + \".json\")\n L0_size = nums_L0[dataset.title]\n run_sampling_experiment_v2(dataset, L0_size, sample_size_range, results_fname)\n for dataset in datasets[2:4]:\n sample_size_range = range(100, 50001, 100)\n print(\"dataset:\", dataset.title)\n fname = f\"{dataset.title}-sampling-exp{exp_num}-{sample_size_range[0]}-{sample_size_range[-1]}-{sample_size_range[1] - sample_size_range[0]}\"\n results_fname = Path(\"experiment_outs/results/sampling\") / (fname + \".json\")\n L0_size = nums_L0[dataset.title]\n run_sampling_experiment_v2(dataset, L0_size, sample_size_range, results_fname)\n for exp_num in range(5):\n for dataset in datasets[4:6]:\n sample_size_range = range(100, 20001, 100)\n print(\"dataset:\", dataset.title)\n fname = f\"{dataset.title}-sampling-exp{exp_num}-{sample_size_range[0]}-{sample_size_range[-1]}-{sample_size_range[1] - sample_size_range[0]}\"\n results_fname = Path(\"experiment_outs/results/sampling\") / (fname + \".json\")\n L0_size = nums_L0[dataset.title]\n run_sampling_experiment_v2(dataset, L0_size, sample_size_range, results_fname)\n for exp_num in range(5):\n for dataset in datasets[6:8]:\n sample_size_range = range(100, 20001, 100)\n print(\"dataset:\", dataset.title)\n fname = f\"{dataset.title}-sampling-exp{exp_num}-{sample_size_range[0]}-{sample_size_range[-1]}-{sample_size_range[1] - sample_size_range[0]}\"\n results_fname = Path(\"experiment_outs/results/sampling\") / (fname + \".json\")\n L0_size = nums_L0[dataset.title]\n run_sampling_experiment_v2(dataset, L0_size, sample_size_range, results_fname)\n\n\ndef run_sampling_with_freezing_all_graphs(results_base=None, exp_num=0, dataset_idx=0):\n dataset = datasets[dataset_idx]\n sample_size_range = [10, 20, 40, 80, 150, 300, 500, 750, 1000]\n num_samples_per_instance = 100000\n print(\"dataset:\", dataset.title)\n fnames = [f\"{dataset.title}-sampling-freezing-{sample_size_range[i]}-num_samples{num_samples_per_instance}-exp{exp_num}\"\n for i in range(len(sample_size_range))]\n if results_base is None:\n results_base = Path(__file__).resolve().parent.parent/\"experiment_outs\"/\"results\"/\"sampling\"\n results_fnames = [results_base / (fname + \".json\") for fname in fnames]\n L0_size = nums_L0[dataset.title]\n L1_num_samples = 100#nums_L1_up[dataset.title]\n L2_num_preprocessing_reaches = 10\n run_sampling_experiment_with_freezing_v3(dataset,\n L0_size,\n sample_size_range,\n num_samples_per_instance,\n results_fnames,\n L1_num_samples=L1_num_samples,\n L2_num_preprocessing_reaches= L2_num_preprocessing_reaches\n )\n\n\n\n\n\nif __name__ == \"__main__\":\n results_base = None#Path(\"/vol/scratch/omrib/sampling\")\n num_exps = 5\n dataset_idx = int(sys.argv[1])\n min_exp = int(sys.argv[2])\n pool = Pool(num_exps)\n pool.starmap(run_sampling_with_freezing_all_graphs,\n ((results_base, exp_num, dataset_idx)\n for exp_num in range(min_exp, min_exp+num_exps)))\n\n #run_sampling_with_freezing_all_graphs(results_base=results_base,\n # exp_num=int(sys.argv[1]),\n # dataset_idx=int(sys.argv[2]))\n #distance_from_uniformity_multi_experiments(title=\"Epinions\", num_exps=3, sample_size_range=range(10, 51, 10), L0_size=1000)","sub_path":"code/Sampler.py","file_name":"Sampler.py","file_ext":"py","file_size_in_byte":29656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"415944230","text":"bad = 0\ngood = 0\nmiddle = 0\nerro = 0\n\nwith open(\"xiaohongshu_sentiments.txt\", encoding=\"utf-8\") as f:\n\tlines = f.readlines()\n\nfor i in lines:\n\ti = int(i.strip())\n\tif i == 0:\n\t\tbad += 1\n\telif i == 1:\n\t\tmiddle += 1\n\telif i == 2:\n\t\tgood += 1\n\telif i == -1:\n\t\terro += 1\n\telse:\n\t\tprint(i)\n\t\tprint(\"超出范围\")\n\nsum = good+bad+middle\n\nprint(\"积极态度:\",good,\"\\t\",good/sum)\nprint(\"中间态度:\",middle,\"\\t\",middle/sum)\nprint(\"消极态度\",bad,\"\\t\",bad/sum)\nprint(\"情感分析错误:\",erro)\n","sub_path":"baiduAPI/get_percent.py","file_name":"get_percent.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"40213013","text":"import math\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nfrom statsmodels import api as sm\nfrom utils import stock_pgfunctions as pg\nimport plotly.graph_objects as go\nimport mplfinance as mpf\nfrom cycler import cycler\nimport matplotlib as mpl\n\npd.set_option ('display.max_rows', 1000)\npd.set_option ('display.max_columns',20)\n\ndef query_dt(sql):\n \"\"\"\n 连接数据库,查询指定SQL语句,返回PD的数据集\n :param sql: str\n :return: panda data\n \"\"\"\n conn = pg.connect()\n dt = pd.read_sql(sql, conn)\n return dt\n\n\ndef vwap_pic(symbol, from_t, to_t):\n \"\"\"\n 用plot画VWAP图\n 成交量加权平均价VWAP是将多笔交易的价格按各自的成交量加权而算出的平均价\n \"\"\"\n sql = f\"select * from stock_candles_min where symbol='{symbol}' and dt>='{from_t}' and dt<='{to_t}'\"\n dt = query_dt(sql)\n dt['avg_p'] = ( dt['h'] + dt['l'] + dt['c'] ) / 3\n dt['v_cum'] = dt['v'].cumsum()\n dt['pv'] = dt['avg_p']*dt['v']\n dt['pv_cum'] = dt['pv'].cumsum()\n dt['VWAP'] = dt['pv_cum']/dt['v_cum']\n print(dt.head())\n #打印出每日闭市价格(绿线)和VMAP价格(橙线)\n plt.figure(figsize=(12, 8))\n plt.plot(dt['dt'], dt['c'], color='green', label=\"Close Price\")\n plt.plot(dt['dt'], dt['VWAP'], color='orange', label=\"VWAP\")\n plt.title('VWAP and close price of dt on 21Jan.2020', fontsize=20)\n plt.xlabel('Time', fontsize=16)\n plt.ylabel('Price', fontsize=16)\n plt.legend()\n plt.show()\n\n\ndef candle_stick_plot1(symbol, from_t, to_t):\n \"\"\"\n 画candle stick plot图,方式一\n 用plotly.graph_objects\n \"\"\"\n sql = f\"select * from stock_candles_min where symbol='{symbol}' and dt>='{from_t}' and dt<='{to_t}'\"\n dt = query_dt(sql)\n fig = go.Figure(data=[go.Candlestick(x=dt['dt'], open=dt['o'], high=dt['h'], low=dt['l'], close=dt['c'])])\n fig.show()\n\n\ndef candle_stick_plot2(symbol, from_t, to_t):\n \"\"\"\n 画candle stick plot图,方式二\n mplfinance 画\n \"\"\"\n sql = f\"select * from stock_candles_min where symbol='{symbol}' and dt>='{from_t}' and dt<='{to_t}'\"\n dt = query_dt(sql)\n dt = dt[['o', 'h', 'l', 'c', 'v', 'dt']]\n #坑,必须用固定列名字,不能用其他列名,所以特意转换\n dt.rename(\n columns={\n 'dt': 'Date', 'o': 'Open',\n 'h': 'High', 'l': 'Low',\n 'c': 'Close', 'v': 'Volume'},\n inplace=True)\n dt.index = dt['Date'] #索引必须是标准日期格式\n dt = dt.drop('Date', axis=1) #只需要o,h,l,c,v五个数据列,不删除多余的列也不会报错\n # print('-'*30)\n # print(dt)\n symbol = 'dt'\n # 设置基本参数\n # type:绘制图形的类型,有candle, renko, ohlc, line等\n # 此处选择candle,即K线图\n # mav(moving average):均线类型,此处设置7,30,60日线\n # volume:布尔类型,设置是否显示成交量,默认False\n # title:设置标题\n # y_label:设置纵轴主标题\n # y_label_lower:设置成交量图一栏的标题\n # figratio:设置图形纵横比\n # figscale:设置图形尺寸(数值越大图像质量越高)\n kwargs = dict(\n type='candle',\n mav=(7, 30, 60),\n volume=True,\n title='\\nUS_stock %s candle_line' % (symbol),\n ylabel='OHLC Candles',\n ylabel_lower='Shares\\nTraded Volume',\n figratio=(15, 10),\n figscale=5)\n # 设置marketcolors\n # up:设置K线线柱颜色,up意为收盘价大于等于开盘价\n # down:与up相反,这样设置与国内K线颜色标准相符\n # edge:K线线柱边缘颜色(i代表继承自up和down的颜色),下同。详见官方文档)\n # wick:灯芯(上下影线)颜色\n # volume:成交量直方图的颜色\n # inherit:是否继承,选填\n mc = mpf.make_marketcolors(\n up='green',\n down='red',\n edge='i',\n wick='i',\n volume='in',\n inherit=True)\n # 设置图形风格\n # gridaxis:设置网格线位置\n # gridstyle:设置网格线线型\n # y_on_right:设置y轴位置是否在右\n s = mpf.make_mpf_style(\n gridaxis='both',\n gridstyle='-.',\n y_on_right=False,\n marketcolors=mc)\n # 设置均线颜色,配色表可见下图\n # 建议设置较深的颜色且与红色、绿色形成对比\n # 此处设置七条均线的颜色,也可应用默认设置\n mpl.rcParams['axes.prop_cycle'] = cycler(\n color=['dodgerblue', 'deeppink',\n 'navy', 'teal', 'maroon', 'darkorange',\n 'indigo'])\n # 设置线宽\n mpl.rcParams['lines.linewidth'] = .5\n # 图形绘制\n # show_nontrading:是否显示非交易日,默认False\n # savefig:导出图片,填写文件名及后缀\n mpf.plot(dt,\n **kwargs,\n style=s,\n show_nontrading=False,\n savefig='A_stock_%s_candle_min_line' %(symbol) + '.jpg')\n plt.show()\n print(\"A_stock_%s_candle_min_line\" %(symbol) + \".jpg\" +\"蜡烛图像已经画好。\")\n\ndef daily_return(symbol, from_t, to_t):\n \"\"\"\n 计算日收益率daily_return\n 计算公式是 : return = log(today close/previous close)\n :param symbol: str\n :param from_t: str\n :param to_t: str\n :return: picture\n \"\"\"\n sql = f\"select * from stock_candles_day where symbol='{symbol}' and dt>='{from_t}' and dt<='{to_t}' order by symbol,series\"\n dt = query_dt(sql)\n rt=[]\n rt.append(np.nan)\n for j in range(1, len(dt['c'])):\n r = (math.log((dt['c'][j])/(dt['c'][j - 1])))\n rt.append(r)\n dt['return'] = rt\n plt.plot(dt['dt'], dt['return'], '--')\n return plt.show()\n\n\ndef hyp_test_pic1(symbol, from_t, to_t):\n \"\"\"\n 画出正态分布的图。这是方法一。\n 先计算对数收益率,然后画图\n :param symbol: str\n :param from_t: str\n :param to_t: str\n :return: picture\n \"\"\"\n sql = f\"select * from stock_candles_day where symbol='{symbol}' and dt>='{from_t}' and dt<='{to_t}' order by symbol,series\"\n dt = query_dt(sql)\n #计算对数收益率\n a = np.log(dt['c'].pct_change()+1)\n #画出正态分布图\n a.hist(bins=50, figsize=(10, 6))\n return plt.show()\n\ndef hyp_test_pic2(symbol, from_t, to_t):\n \"\"\"\n 画出检验正态分布的图。这是方法二。\n X轴理论分位数,y轴样本分位数.只要不在一条直线上,就表示不符合正态分布\n :param symbol: str\n :param from_t: str\n :param to_t: str\n :return: picture\n \"\"\"\n sql = f\"select * from stock_candles_day where symbol='{symbol}' and dt>='{from_t}' and dt<='{to_t}' order by symbol,series\"\n dt = query_dt(sql)\n #计算对数收益率\n a = np.log(dt['c'].pct_change()+1)\n fix, axes = plt.subplots(1, 1, figsize=(10, 12))\n sm.qqplot(a.dropna(), line='s', ax=axes)\n axes.set_title(\"hypothesis testing\") #用中文做标题会出错\n return plt.show()\n\ndef hyp_test_data(symbol, from_t, to_t):\n \"\"\"\n 计算出检验正态分布的统计数据。这是方法三。\n :param symbol: str\n :param from_t: str\n :param to_t: str\n :return:str\n \"\"\"\n sql = f\"select * from stock_candles_day where symbol='{symbol}' and dt>='{from_t}' and dt<='{to_t}' order by symbol,series\"\n dt = query_dt(sql)\n a = np.log(dt['c'].pct_change()+1)#计算对数收益率\n b = a.drop(0, axis=0)\n u = b.mean() # 计算均值\n std = b.std() # 计算标准差\n \"\"\"\n kstest方法中的参数分别是:待检验的数据,检验方法(这里设置成norm正态分布),均值与标准差\n 返回两个值:statistic → D值,pvalue → P值\n 当p值大于0.05,说明待检验的数据符合为正态分布 \n \"\"\"\n c = b.values.tolist()\n result = stats.kstest(c,'norm', (b, std))\n return result\n","sub_path":"Final_Project/zhangsongbin/assginment2/stock_assginment2_utils.py","file_name":"stock_assginment2_utils.py","file_ext":"py","file_size_in_byte":7875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"284499139","text":"from flask import (\n Blueprint,\n request,\n url_for,\n render_template,\n redirect,\n session,\n)\n\nimport csrf_token\nfrom csrf_token import create_token\nfrom controllers import current_user, login_required\nfrom models.Board import Board\nfrom models.Reply import Reply\nfrom models.Topic import Topic\nfrom utils import log\n\ntopic_bp = Blueprint('topic', __name__, url_prefix='/topic')\n\n\n@topic_bp.route('/detail', methods=['GET'])\ndef detail():\n topic_id = request.args.get('id')\n topic = Topic.find_by(id=topic_id)\n topic.views += 1\n topic.update()\n board = Board.find_by(id=topic.board_id)\n topic_user = topic.user()\n replies = Reply.find_all(topic_id=topic_id)\n\n u = current_user()\n token = create_token()\n if u is not None:\n csrf_token.set_value(token, u.id)\n\n return render_template(\n 'topic/detail.html',\n topic=topic,\n topic_user=topic_user,\n board=board,\n replies=replies,\n token=token,\n )\n\n\n@topic_bp.route('/create', methods=['GET', 'POST'])\n@login_required\ndef create():\n u = current_user()\n if request.method == 'POST':\n form = request.form.to_dict()\n token = form.get('token')\n if csrf_token.get_value(token) == u.id:\n csrf_token.pop_key(token)\n topic = Topic.new(form, user_id=u.id)\n return redirect(url_for('.detail', id=topic.id))\n else:\n boards = Board.all()\n token = create_token()\n csrf_token.set_value(token, u.id)\n return render_template('topic/create.html', boards=boards, token=token)\n\n\n@topic_bp.route('/delete', methods=['GET'])\n@login_required\ndef delete():\n topic_id = request.args.get('id')\n topic = Topic.find_by(id=topic_id)\n token = request.args.get('token')\n # token 是属于 current_user 的\n if csrf_token.get_value(token) == topic.user_id:\n csrf_token.pop_key(token)\n Topic.delete(id=topic_id)\n return redirect(url_for('static.index'))\n else:\n csrf_token.pop_key(token)\n return redirect(url_for('static.error'))\n\n\n@topic_bp.route('/update', methods=['GET', 'POST'])\n@login_required\ndef update():\n u = current_user()\n topic_id = request.args.get('id')\n topic = Topic.find_by(id=topic_id)\n if request.method == 'POST':\n form = request.form.to_dict()\n token = form.get('token')\n if csrf_token.get_value(token) == topic.user_id:\n csrf_token.pop_key(token)\n Topic.update_by_form(topic_id, form)\n return redirect(url_for('.detail', id=topic_id))\n else:\n csrf_token.pop_key(token)\n return redirect(url_for('static.error'))\n else:\n token = create_token()\n csrf_token.set_value(token, u.id)\n topic_board = Board.find_by(id=topic.board_id)\n boards = Board.all()\n return render_template('topic/update.html', topic=topic, topic_board=topic_board, boards=boards, token=token)\n","sub_path":"controllers/topic.py","file_name":"topic.py","file_ext":"py","file_size_in_byte":2972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"503169030","text":"\"\"\"\nPix2Surf Single-View Version Evaluation in multi view protocol\n\"\"\"\n\nfrom .modelbase_v2 import ModelBase\nfrom .pix2surf_sv import Network as SV_Net\nfrom core.models.utils import *\n\nimport os\nimport torch\nfrom core.evaluation import eval_warp\n\n\nclass Model(ModelBase):\n def __init__(self, cfg):\n super(Model, self).__init__(cfg)\n self.name = 'pix2surf-sv'\n self.cfg = cfg\n # register key component\n self.network = Network()\n self.optimizer = torch.optim.Adam(params=self.network.parameters(), lr=self.lr,\n betas=(self.cfg.ADAM_BETA1, self.cfg.ADAM_BETA2))\n # initialize models\n self.resume = cfg.RESUME\n if self.resume:\n self.resume_id = cfg.RESUME_EPOCH_ID\n load_path = os.path.join(cfg.ROOT_DIR, 'log', cfg.LOG_DIR, 'model',\n 'epoch_%d' % cfg.RESUME_EPOCH_ID + '.model')\n self.load_model(loadpath=load_path, current_model_state='cpu', strict=False)\n elif cfg.MODEL_INIT_PATH != ['None']:\n self.load_model(loadpath=cfg.MODEL_INIT_PATH, strict=False)\n self.to_gpus()\n # config output meaning\n self.output_info_dict = {\n 'metric': ['batch-loss', 'reg-v-loss', 'reg-x-loss', 'mask-v-loss', 'mask-x-loss',\n 'sp-loss', 'crr-xyz-loss'],\n 'image': ['uni-rgb-v', 'nox-v-gt-uni', 'mask-v'] +\n ['unwrapped-chart', 'unwrapped-chart-uni', 'learned-chart', 'sp-image-uni'],\n 'xls': ['metric-report']\n }\n\n def _preprocess(self, in_batch):\n return load_multiview_batch(in_batch)\n\n def _postprocess(self, batch):\n # compute metric in multi thread\n batch = eval_warp(batch, method_name='pix2surf-sv', nox_gt_key='nox-v-gt', nox_pred_key='sp-image')\n # add crr_loss to xls report\n batch['metric-report']['consistency-error'] = [float(i) for i in\n batch['crr-xyz-loss-xls'].detach().cpu().numpy()]\n return batch\n\n\nclass Network(SV_Net):\n def __init__(self):\n super(Network, self).__init__()\n # visualization resolution\n self.vis_chart_res = 256\n self.vis_chart_container = torch.zeros(1, 3, self.vis_chart_res, self.vis_chart_res)\n # make eval config\n self.eval_image_res = (240, 320)\n self.eval_image_grid = make_grid(self.eval_image_res)\n\n def forward(self, pack, is_train=True):\n batch = dict()\n n_batch = pack['nox-v'][0].shape[0]\n n_view = len(pack['rgb-v'])\n\n # pred list is a list for each view\n pred_list, code_list = list(), list()\n for ii in range(n_view): # do for each view\n pred, fm = self.network_dict['seg-net'](pack['rgb-v'][ii], return_code=True)\n pred_list.append(pred)\n code_list.append(self.network_dict['global-code'](fm).reshape(n_batch, -1, 1).contiguous())\n\n # prepare gather container\n pred_nox_v_list, pred_nox_x_list, pred_mask_v_list, pred_mask_x_list = [], [], [], []\n pred_xyz_list, pred_uv_list = [], []\n learned_chart_list, unwrapped_chart_list = [], []\n reg_v_loss, reg_x_loss, mask_v_loss, mask_x_loss, sp_loss = 0, 0, 0, 0, 0\n eval_rendered_list = []\n\n for ii in range(n_view):\n mask_v = pack['mask-v'][ii]\n mask_x = pack['mask-x'][ii]\n\n # make cnn prediction\n pred = pred_list[ii]\n pred_nox_v = pred[:, :3, :, :]\n pred_nox_x = pred[:, 3:6, :, :]\n pred_score_v = pred[:, 6:8, :, :]\n pred_score_x = pred[:, 8:10, :, :]\n learned_uv = self.sgmd(pred[:, 10:12, :, :])\n\n # make NOCS-regression branch\n mask1c_v = mask_v[:, 0, :, :].unsqueeze(1).detach()\n mask_v_loss = mask_v_loss + self.cls_criterion(pred_score_v, mask1c_v.squeeze(1).long().detach()) / n_view\n pred_mask_v = torch.argmax(pred_score_v, dim=1, keepdim=True).float()\n mask1c_x = mask_x[:, 0, :, :].unsqueeze(1).detach()\n mask_x_loss = mask_x_loss + self.cls_criterion(pred_score_x, mask1c_x.squeeze(1).long().detach()) / n_view\n pred_mask_x = torch.argmax(pred_score_x, dim=1, keepdim=True).float()\n reg_v_loss = reg_v_loss + self.ml2_criterion(pred_nox_v, pack['nox-v'][ii], mask1c_v, True) / n_view\n reg_x_loss = reg_x_loss + self.ml2_criterion(pred_nox_x, pack['nox-x'][ii], mask1c_x, True) / n_view\n\n # make mlp prediction\n eachview_z = code_list[ii].squeeze(2)\n queried_uv = query_feature(learned_uv, pack['uv-v'][ii])\n pred_xyz = self.network_dict['mlp'](eachview_z, queried_uv, unique_code=True)\n pred_xyz = self.sgmd(pred_xyz)\n sp_loss = sp_loss + self.ml2_criterion(pred_xyz, pack['uv-xyz-v'][ii], pack['uv-mask-v'][ii]) / n_view\n\n # Do SP evaluation\n _eval_rendered_list = list()\n for bid in range(n_batch):\n # select mask\n _mask = pred_mask_v[bid, ...].reshape(-1) # H*W\n _learned_uv = learned_uv[bid, ...].reshape(1, 2, -1) # 1,2,H*W\n _learned_uv = _learned_uv[:, :, _mask > 0] # 1,2,S\n uv = self.eval_image_grid.cuda().reshape(1, 2, -1)[:, :, _mask > 0].unsqueeze(3) # 1,2,S,1\n # do Surface Parametrization\n eval_xyz_v = self.network_dict['mlp'](eachview_z[bid, ...].unsqueeze(0), _learned_uv.unsqueeze(3),\n unique_code=True)\n eval_xyz_v = self.sgmd(eval_xyz_v) # 1,3,S,1\n uv[:, 0, :, :] = uv[:, 0, :, :] * mask1c_v.shape[2]\n uv[:, 1, :, :] = uv[:, 1, :, :] * mask1c_v.shape[3]\n uv = uv.long()\n idx = uv[:, 0, :, :] * mask1c_v.shape[3] + uv[:, 1, :, :] # B,N,1\n idx = idx.permute(0, 2, 1) # B,1,N\n vis_eval = torch.ones_like(pack['rgb-v'][ii]).float()[bid, ...].unsqueeze(0)\n vis_eval = vis_eval.reshape(1, 3, -1) # B,3,R*R\n vis_eval = vis_eval.scatter(dim=2, index=idx.repeat(1, 3, 1), src=eval_xyz_v.squeeze(3))\n vis_eval = vis_eval.reshape(1, 3, mask1c_v.shape[2], mask1c_v.shape[3])\n _eval_rendered_list.append(vis_eval)\n eval_rendered = torch.cat(_eval_rendered_list, 0)\n eval_rendered_list.append(eval_rendered)\n\n # vis unwrapped chart\n unwrapped_chart = self.vis_chart_container.repeat(n_batch, 1, 1, 1).cuda()\n unwrapped_chart = spread_feature(unwrapped_chart, learned_uv, pack['rgb-v'][ii], pack['mask-v'][ii])\n\n # gather\n pred_nox_v_list.append(pred_nox_v)\n pred_nox_x_list.append(pred_nox_x)\n pred_mask_v_list.append(pred_mask_v)\n pred_mask_x_list.append(pred_mask_x)\n\n pred_xyz_list.append(pred_xyz)\n pred_uv_list.append(queried_uv)\n unwrapped_chart_list.append(unwrapped_chart)\n learned_chart_list.append(learned_uv.repeat(1, 2, 1, 1)[:, :3, :, :] * pred_mask_x + (1.0 - pred_mask_v))\n\n # make naive multi-view constrain:\n _p1_list, _p2_list, _m_list = [], [], []\n _uv1_list, _uv2_list = [], []\n for base_view_id in range(len(pack['crr-idx-mtx'])):\n for query_view_id in range(len(pack['crr-idx-mtx'][base_view_id])):\n base_pc = pred_xyz_list[base_view_id]\n query_pc = pred_xyz_list[base_view_id + query_view_id + 1]\n base_uv = pred_uv_list[base_view_id]\n query_uv = pred_uv_list[base_view_id + query_view_id + 1]\n pair_idx = pack['crr-idx-mtx'][base_view_id][query_view_id].squeeze(3)\n paired_pc_from_base_to_query = torch.gather(base_pc.squeeze(3), dim=2,\n index=pair_idx.repeat(1, 3, 1)).unsqueeze(3)\n paired_uv_from_base_to_query = torch.gather(base_uv.squeeze(3), dim=2,\n index=pair_idx.repeat(1, 2, 1)).unsqueeze(3)\n _p1_list.append(paired_pc_from_base_to_query)\n _p2_list.append(query_pc)\n _uv1_list.append(paired_uv_from_base_to_query)\n _uv2_list.append(query_uv)\n _m_list.append(pack['crr-mask-mtx'][base_view_id][query_view_id])\n\n crr_xyz_loss_each = self.ml2_criterion(torch.cat(_p1_list, dim=2).contiguous(),\n torch.cat(_p2_list, dim=2).contiguous(),\n torch.cat(_m_list, dim=2).contiguous(),\n detach=False, reduce_batch=False)\n crr_xyz_loss = crr_xyz_loss_each.mean()\n\n crr_uv_loss_each = self.ml2_criterion(torch.cat(_uv1_list, dim=2).contiguous(),\n torch.cat(_uv2_list, dim=2).contiguous(),\n torch.cat(_m_list, dim=2).contiguous(),\n detach=False, reduce_batch=False)\n crr_uv_loss = crr_uv_loss_each.mean()\n\n # summary\n batch['batch-loss'] = (((reg_v_loss + reg_x_loss) * 0.3 + (mask_v_loss + mask_x_loss) * 0.7) * 0.1 + \\\n sp_loss * 0.9).unsqueeze(0)\n\n batch['reg-v-loss'] = reg_v_loss.detach().unsqueeze(0)\n batch['reg-x-loss'] = reg_x_loss.detach().unsqueeze(0)\n batch['mask-v-loss'] = mask_v_loss.detach().unsqueeze(0)\n batch['mask-x-loss'] = mask_x_loss.detach().unsqueeze(0)\n batch['sp-loss'] = sp_loss.detach().unsqueeze(0)\n batch['crr-xyz-loss'] = crr_xyz_loss.detach().unsqueeze(0)\n batch['crr-xyz-loss-xls'] = crr_xyz_loss_each.detach()\n\n batch['mask-v'] = torch.cat(pred_mask_v_list, 3)\n batch['mask-x'] = torch.cat(pred_mask_x_list, 3)\n batch['rgb-v'] = pack['rgb-v']\n batch['uni-rgb-v'] = torch.cat(pack['rgb-v'], 3)\n\n batch['nox-v-gt'] = [p * m + (1.0 - m) for p, m in zip(pack['nox-v'], pack['mask-v'])]\n batch['nox-x-gt'] = [p * m + (1.0 - m) for p, m in zip(pack['nox-x'], pack['mask-x'])]\n batch['nox-v-gt-uni'] = torch.cat([p * m + (1.0 - m) for p, m in zip(pack['nox-v'], pack['mask-v'])], 3)\n batch['nox-x-gt-uni'] = torch.cat([p * m + (1.0 - m) for p, m in zip(pack['nox-x'], pack['mask-x'])], 3)\n\n batch['sp-image'] = eval_rendered_list\n batch['sp-image-uni'] = torch.cat(eval_rendered_list, 3)\n\n batch['learned-chart'] = torch.cat(learned_chart_list, 3)\n batch['unwrapped-chart'] = torch.cat(unwrapped_chart_list, 3)\n vis_nsc_uni = unwrapped_chart_list[0]\n for new_scatter in unwrapped_chart_list:\n vis_nsc_uni = torch.max(new_scatter, vis_nsc_uni)\n batch['unwrapped-chart-uni'] = vis_nsc_uni\n\n return batch\n","sub_path":"core/models/pix2surf_sv_mveval.py","file_name":"pix2surf_sv_mveval.py","file_ext":"py","file_size_in_byte":11066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"495007642","text":"import requests\nfrom bs4 import BeautifulSoup\n\ndef fetch_company_type(company_name, company_code):\n\troot_url = 'https://www.bseindia.com/SiteCache/1D/CompanyHeader.aspx?Type=EQ&text=' + company_code\n\tr = requests.get(root_url)\n\tsoup = BeautifulSoup(r.content, 'html.parser')\n\ttd_tags = soup.find_all('td')\n\t#hardcoded the below line. Can't find anything better right now.\n\tindustry_type = td_tags[-1]\n\tcompany_name = company_name.replace('+' , ' ')\n\twrite_string = company_name + ',' + company_code + ',' + industry_type.string + '\\n'\n\twith open('data/company_type.csv', 'a') as file:\n\t\tfile.write(write_string)\n\n\n\t","sub_path":"additional_attributes.py","file_name":"additional_attributes.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"527508100","text":"import numpy as np\nfrom optparse import OptionParser\nimport scipy.linalg as la\nimport scipy.stats as stats\nimport scipy.linalg.blas as blas\nimport pandas as pd\nimport csv\nimport time\nimport fastlmm.util.VertexCut as vc\nfrom pysnptools.snpreader.bed import Bed\nimport pysnptools.util as pstutil\nimport pysnptools.util.pheno as phenoUtils\nnp.set_printoptions(precision=3, linewidth=200)\n\n\n\ndef loadData(bfile, extractSim, phenoFile, missingPhenotype='-9', loadSNPs=False, standardize=True):\n\tbed = Bed(bfile, count_A1=True)\n\t\n\tif (extractSim is not None):\n\t\tf = open(extractSim)\n\t\tcsvReader = csv.reader(f)\n\t\textractSnpsSet = set([])\n\t\tfor l in csvReader: extractSnpsSet.add(l[0])\t\t\t\n\t\tf.close()\t\t\n\t\tkeepSnpsInds = [i for i in range(bed.sid.shape[0]) if bed.sid[i] in extractSnpsSet]\t\t\n\t\tbed = bed[:, keepSnpsInds]\n\t\t\n\tphe = None\n\tif (phenoFile is not None):\tbed, phe = loadPheno(bed, phenoFile, missingPhenotype)\n\t\n\tif (loadSNPs):\n\t\tbed = bed.read()\n\t\tif (standardize): bed = bed.standardize()\t\n\t\n\treturn bed, phe\n\t\n\t\ndef loadPheno(bed, phenoFile, missingPhenotype='-9', keepDict=False):\n\tpheno = phenoUtils.loadOnePhen(phenoFile, missing=missingPhenotype, vectorize=True)\n\tcheckIntersection(bed, pheno, 'phenotypes')\n\tbed, pheno = pstutil.intersect_apply([bed, pheno])\n\tif (not keepDict): pheno = pheno['vals']\n\treturn bed, pheno\n\t\n\t\ndef checkIntersection(bed, fileDict, fileStr, checkSuperSet=False):\n\tbedSet = set((b[0], b[1]) for b in bed.iid)\n\tfileSet = set((b[0], b[1]) for b in fileDict['iid'])\n\t\n\tif checkSuperSet:\n\t\tif (not fileSet.issuperset(bedSet)): raise Exception(fileStr + \" file does not include all individuals in the bfile\")\n\t\n\tintersectSet = bedSet.intersection(fileSet)\n\tif (len(intersectSet) != len (bedSet)):\n\t\tprint(len(intersectSet), 'individuals appear in both the plink file and the', fileStr, 'file')\n\n\t\ndef symmetrize(a):\n return a + a.T - np.diag(a.diagonal())\n\t\n\t\n\ndef loadRelatedFile(bed, relFile):\n\trelatedDict = phenoUtils.loadOnePhen(relFile, vectorize=True)\n\tcheckIntersection(bed, relatedDict, 'relatedness', checkSuperSet=True)\n\t_, relatedDict = pstutil.intersect_apply([bed, relatedDict])\n\trelated = relatedDict['vals']\n\tkeepArr = (related < 0.5)\n\tprint(np.sum(~keepArr), 'individuals will be removed due to high relatedness')\n\treturn keepArr\n\t\n\t\ndef findRelated(bed, cutoff, kinshipFile=None):\n\n\tif (kinshipFile is None):\n\t\tprint('Computing kinship matrix...')\n\t\tt0 = time.time()\t\n\t\tXXT = symmetrize(blas.dsyrk(1.0, bed.val, lower=1) / bed.val.shape[1])\n\t\tprint('Done in %0.2f'%(time.time()-t0), 'seconds')\n\telse:\n\t\tXXT = np.loadtxt(kinshipFile)\n\n\t#Find related individuals\n\tremoveSet = set(np.sort(vc.VertexCut().work(XXT, cutoff))) #These are the indexes of the IIDs to remove\t\t\n\tprint('Marking', len(removeSet), 'individuals to be removed due to high relatedness')\n\t\n\t#keepArr = np.array([(1 if iid in keepSet else 0) for iid in bed.iid], dtype=bool)\t\n\tkeepArr = np.ones(bed.iid.shape[0], dtype=bool)\n\tfor i in removeSet: keepArr[i] = False\t\n\treturn keepArr\n\t\n\t\n\t\ndef eigenDecompose(XXT, ignore_neig=False):\n\tt0 = time.time()\n\tprint('Computing eigendecomposition...')\n\ts,U = la.eigh(XXT)\n\tif (not ignore_neig and (np.min(s) < -1e-4)): raise Exception('Negative eigenvalues found')\n\ts[s<0]=0\t\n\tind = np.argsort(s)\n\tind = ind[s>1e-12]\n\tU = U[:, ind]\n\ts = s[ind]\n\tprint('Done in %0.2f'%(time.time()-t0), 'seconds')\n\treturn s,U\n\t\n\t\n\ndef loadCovars(bed, covarFile):\n\tcovarsDict = phenoUtils.loadPhen(covarFile)\n\tcheckIntersection(bed, covarsDict, 'covariates', checkSuperSet=True)\n\t_, covarsDict = pstutil.intersect_apply([bed, covarsDict])\n\tcovar = covarsDict['vals']\n\treturn covar\t\n\t\ndef getSNPCovarsMatrix(bed, resfile, pthresh, mindist):\n\tsnpNameToNumDict = dict([])\n\tfor i,s in enumerate(bed.sid): snpNameToNumDict[s] = i\t\n\n\tf = open(resfile)\n\tcsvReader = csv.reader(f, delimiter=\"\\t\")\n\tnext(csvReader)\t\n\tsignificantSNPs = []\n\tsignificantSNPNames = []\n\tlastPval = 0\n\tfeaturesPosList = []\n\tfor l in csvReader:\n\t\tsnpName, pVal = l[0], float(l[4])\n\t\tif (pVal < lastPval): raise Exception('P-values are not sorted in descending order: ' + str(pVal) + \">\" + str(lastPval))\n\t\tlastPval = pVal\n\t\tif (pVal > pthresh): break\t\t\n\t\tif (snpName not in snpNameToNumDict): continue\t\t\t\t\t\t\t\n\t\tsignificantSNPNames.append(snpName)\n\t\tif (mindist == 0):\n\t\t\tsignificantSNPs.append(snpNameToNumDict[snpName])\n\t\t\tprint('Using SNP', snpName, 'with p<%0.2e'%pVal, 'as a fixed effect')\n\t\telse:\n\t\t\tposArr = bed.pos[snpNameToNumDict[snpName]]\n\t\t\tchrom, pos = posArr[0], int(posArr[2])\t\t\t\t\n\t\t\taddSNP = True\n\t\t\tfor (c,p) in featuresPosList:\n\t\t\t\tif (chrom == c and abs(pos-p) < mindist):\n\t\t\t\t\taddSNP = False\n\t\t\t\t\tbreak\n\t\t\tif addSNP:\n\t\t\t\tsignificantSNPs.append(snpNameToNumDict[snpName])\n\t\t\t\tfeaturesPosList.append((chrom, pos))\n\t\t\t\tprint('Using SNP', snpName, '('+str(int(chrom))+':'+str(pos)+') with p<%0.2e'%pVal, 'as a fixed effect')\n\tf.close()\n\n\tsnpCovarsMat = bed.val[:, significantSNPs]\n\treturn snpCovarsMat\n\t\n\t\n\t\ndef getExcludedChromosome(bfile, chrom):\n\tbed = Bed(bfile, count_A1=True)\t\n\tindsToKeep = (bed.pos[:,0] != chrom)\n\tbed = bed[:, indsToKeep]\t\n\treturn bed.read().standardize()\n\t\ndef getChromosome(bfile, chrom):\n\tbed = Bed(bfile, count_A1=True)\n\tindsToKeep = (bed.pos[:,0] == chrom)\n\tbed = bed[:, indsToKeep]\t\n\treturn bed.read().standardize()\n\t\n\ndef _fixupBedAndPheno(bed, pheno, missingPhenotype='-9'):\n\tbed = _fixupBed(bed)\n\tbed, pheno = _fixup_pheno(pheno, bed, missingPhenotype)\n\treturn bed, pheno\n\t\ndef _fixupBed(bed):\n\tif isinstance(bed, str):\n\t\treturn Bed(bed, count_A1=True).read().standardize()\n\telse: return bed\n\ndef _fixup_pheno(pheno, bed=None, missingPhenotype='-9'):\n\tif (isinstance(pheno, str)):\n\t\tif (bed is not None):\n\t\t\tbed, pheno = loadPheno(bed, pheno, missingPhenotype, keepDict=True)\n\t\t\treturn bed, pheno\n\t\telse:\n\t\t\tphenoDict = phenoUtils.loadOnePhen(pheno, missing=missingPhenotype, vectorize=True)\n\t\t\treturn phenoDict\n\telse:\n\t\tif (bed is not None): return bed, pheno\t\t\t\n\t\telse: return pheno\n\ndef linreg(bed, pheno):\n\n\t#Extract snps and phenotype\n\tbed, pheno = _fixupBedAndPheno(bed, pheno)\t\n\tif isinstance(pheno, dict):\tphe = pheno['vals']\t\n\telse: phe = pheno\t\t\n\tif (len(phe.shape)==2):\n\t\tif (phe.shape[1]==1): phe=phe[:,0]\n\t\telse: raise Exception('More than one phenotype found')\t\n\n\t#Normalize y. We assume X is already normalized.\n\ty = phe - phe.mean(); y /= y.std()\n\n\t#Compute p-values\n\tXy = bed.val.T.dot(y) / y.shape[0]\n\tXy[Xy>1.0] = 1.0\n\tXy[Xy<-1.0] = -1.0\n\tdf = y.shape[0]-2\n\tTINY = 1.0e-20\n\tt = Xy * np.sqrt(df / ((1.0-Xy+TINY) * (1.0+Xy+TINY)))\n\tpValT = stats.t.sf(np.abs(t), df)*2\t\n\t\n\t#Create pandas data frame\n\titems = [\n\t\t('SNP', bed.sid),\n\t\t('Chr', bed.pos[:,0]), \n\t\t('GenDist', bed.pos[:,1]),\n\t\t('ChrPos', bed.pos[:,2]), \n\t\t('PValue', pValT), \n\t]\n\tframe = pd.DataFrame.from_items(items)\t\n\tframe.sort(\"PValue\", inplace=True)\n\tframe.index = np.arange(len(frame))\t\n\treturn frame\n\t\ndef powerPlot(df, causalSNPs, title=''):\n\timport pylab\n\tcausalSNPs = set(causalSNPs)\n\tcsnpPvals = df[df['SNP'].isin(causalSNPs)][\"PValue\"]\t\n\tpvalPoints = np.logspace(-6, -2, num=1000)\n\tpower = [np.mean(csnpPvals < p ) for p in list(pvalPoints)]\n\tpylab.plot(-np.log10(pvalPoints), power)\n\tpylab.xlabel(\"-log10(Significance Threshold)\")\n\tpylab.ylabel(\"Power\")\n\tpylab.title(title)\n\t\n\t\ndef computeCovar(bed, shrinkMethod, fitIndividuals):\n\teigen = dict([])\n\n\tif (shrinkMethod in ['lw', 'oas', 'l1', 'cv']):\n\t\timport sklearn.covariance as cov\n\t\tt0 = time.time()\n\t\tprint('Estimating shrunk covariance using', shrinkMethod, 'estimator...')\n\t\t\t\t\n\t\tif (shrinkMethod == 'lw'): covEstimator = cov.LedoitWolf(assume_centered=True, block_size = 5*bed.val.shape[0])\n\t\telif (shrinkMethod == 'oas'): covEstimator = cov.OAS(assume_centered=True)\n\t\telif (shrinkMethod == 'l1'): covEstimator = cov.GraphLassoCV(assume_centered=True, verbose=True)\n\t\telif (shrinkMethod == 'cv'):\n\t\t\tshrunkEstimator = cov.ShrunkCovariance(assume_centered=True)\n\t\t\tparam_grid = {'shrinkage': [0.01, 0.1, 0.3, 0.5, 0.7, 0.9, 0.99]}\t\t\t\n\t\t\tcovEstimator = sklearn.grid_search.GridSearchCV(shrunkEstimator, param_grid)\t\t\n\t\telse: raise Exception('unknown covariance regularizer')\n\t\t\n\t\tcovEstimator.fit(bed.val[fitIndividuals, :].T)\n\t\tif (shrinkMethod == 'l1'):\n\t\t\talpha = covEstimator.alpha_\n\t\t\tprint('l1 alpha chosen:', alpha)\n\t\t\tcovEstimator2 = cov.GraphLasso(alpha=alpha, assume_centered=True, verbose=True)\n\t\telse:\n\t\t\tif (shrinkMethod == 'cv'): shrinkEstimator = clf.best_params_['shrinkage']\n\t\t\telse: shrinkEstimator = covEstimator.shrinkage_\n\t\t\tprint('shrinkage estimator:', shrinkEstimator)\n\t\t\tcovEstimator2 = cov.ShrunkCovariance(shrinkage=shrinkEstimator, assume_centered=True)\n\t\tcovEstimator2.fit(bed.val.T)\n\t\tXXT = covEstimator2.covariance_ * bed.val.shape[1]\n\t\tprint('Done in %0.2f'%(time.time()-t0), 'seconds')\n\t\t\t\n\telse:\n\t\tprint('Computing kinship matrix...')\t\n\t\tt0 = time.time()\n\t\tXXT = symmetrize(blas.dsyrk(1.0, bed.val, lower=1))\n\t\tprint('Done in %0.2f'%(time.time()-t0), 'seconds')\t\t\n\t\ttry: shrinkParam = float(shrinkMethod)\n\t\texcept: shrinkParam = -1\n\t\tif (shrinkMethod == 'mylw'):\n\t\t\tXXT_fit = XXT[np.ix_(fitIndividuals, fitIndividuals)]\n\t\t\tsE2R = (np.sum(XXT_fit**2) - np.sum(np.diag(XXT_fit)**2)) / (bed.val.shape[1]**2)\n\t\t\t#temp = (bed.val**2).dot((bed.val.T)**2)\n\t\t\ttemp = symmetrize(blas.dsyrk(1.0, bed.val[fitIndividuals, :]**2, lower=1))\n\t\t\tsER2 = (temp.sum() - np.diag(temp).sum()) / bed.val.shape[1]\n\t\t\tshrinkParam = (sER2 - sE2R) / (sE2R * (bed.val.shape[1]-1))\t\t\n\t\tif (shrinkParam > 0):\n\t\t\tprint('shrinkage estimator:', 1-shrinkParam)\n\t\t\tXXT = (1-shrinkParam)*XXT + bed.val.shape[1]*shrinkParam*np.eye(XXT.shape[0])\n\t\n\treturn XXT\n\n\n\t\n\t\ndef standardize(X, method, optionsDict):\n\tfitIndividuals = np.ones(X.shape[0], dtype=np.bool)\n\tif (method == 'frq'):\n\t\tempMean = X.mean(axis=0) / 2.0\n\t\tX[:, empMean>0.5] = 2 - X[:, empMean>0.5]\t\n\t\tprint('regularizng SNPs according to frq file...')\n\t\tfrqFile = (optionsDict['bfilesim']+'.frq' if (optionsDict['frq'] is None) else optionsDict['frq'])\n\t\tmafs = np.loadtxt(frqFile, usecols=[1,2]).mean(axis=1)\n\t\tsnpsMean = 2*mafs\n\t\tsnpsStd = np.sqrt(2*mafs*(1-mafs))\t\n\telif (method == 'related'):\n\t\tif (optionsDict['related'] is None): raise Exception('related file not supplied')\n\t\tprint('regularizng SNPs according to non-related individuals...')\n\t\trelLines = np.loadtxt(optionsDict['related'], usecols=[2])\t\n\t\tkeepArr = (relLines != 1)\n\t\tprint('Excluding', np.sum(~keepArr), 'from the covariance matrix standardization')\n\t\tsnpsMean = X[keepArr, :].mean(axis=0)\n\t\tsnpsStd = X[keepArr, :].std(axis=0)\n\t\tfitIndividuals = keepArr\n\telif (method == 'controls'):\n\t\tphe = optionsDict['pheno']\n\t\tpheThreshold = phe.mean()\n\t\tcontrols = (phe int:\n import collections\n if not tasks: return 0\n dic = collections.Counter(tasks)\n max_task = sorted(dic.items(), reverse=True, key = lambda x:x[1])[0][0]\n res = (dic[max_task]-1) * (n+1) + 1\n for k, v in dic.items():\n if v == dic[max_task] and k != max_task:\n res += 1\n return len(tasks) if res < len(tasks) else res","sub_path":"Week_04/[621]任务调度器.py","file_name":"[621]任务调度器.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"157793042","text":"import numpy as np\n\ndef prim(G):\n mst = np.zeros(shape=G.adj.shape)\n T = np.array([0])\n W = np.array([i for i in range(1, len(G.adj))])\n sum_mst = 0\n\n while len(T) != len(G.adj):\n min = None\n for t in T:\n for w in W:\n if G.adj[t][w] != 0:\n if min == None or G.adj[t][w] < G.adj[min[0]][min[1]]:\n min = [t, w]\n \n mst[min[0]][min[1]] = mst[min[1]][min[0]] = G.adj[min[0]][min[1]]\n T = np.append(T, min[1])\n W = W[W != min[1]]\n sum_mst += G.adj[min[0]][min[1]]\n \n return [mst, sum_mst]","sub_path":"03_project/mst.py","file_name":"mst.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"637475622","text":"\nimport json\nimport requests\nfrom urllib.request import urljoin\nfrom collections import OrderedDict\n\nfrom btcmarkets.auth import build_headers\n\n\nclass BTCMarkets:\n\n base_url = 'https://api.btcmarkets.net'\n\n def __init__(self):\n self.session = requests.Session()\n\n def get_accounts(self):\n return self.request('GET', end_point='/account/balance')\n\n def get_order_book(self, instrument, currency):\n return self.request('GET', end_point='/market/%s/%s/orderbook' % (instrument, currency))\n\n def get_trades(self, instrument, currency, since=0):\n return self.request('GET', end_point='/market/%s/%s/trades?since=%s' % (instrument, currency, since))\n\n def get_open_orders(self, instrument, currency, limit=100, since=0):\n data = OrderedDict([\n ('currency', currency), ('instrument', instrument), ('limit', limit), ('since', since),\n ])\n return self.request('POST', '/order/open', data=data)\n\n def get_order_history(self, instrument, currency, limit=100, since=0):\n data = OrderedDict([\n ('currency', currency), ('instrument', instrument), ('limit', limit), ('since', since)\n ])\n return self.request('POST', '/order/history', data=data)\n\n def get_trade_history(self, instrument, currency, limit=100, since=0):\n data = OrderedDict([\n ('currency', currency), ('instrument', instrument), ('limit', limit), ('since', since)\n ])\n return self.request('POST', '/order/trade/history', data=data)\n\n def get_order_detail(self, order_ids):\n data = OrderedDict([('orderIds', order_ids)])\n return self.request('POST', end_point='/order/detail', data=data)\n\n def insert_order(self, instrument, currency, order_side, price, volume, order_type):\n \"\"\"\n :param instrument: {'BTC', 'ETH', 'LTC'}\n :param currency: {'BTC', 'AUD'}\n :param order_side: ('Bid', 'Ask')\n :param price: price for order. Must be * 100,000,000 as per https://github.com/BTCMarkets/API/wiki/Trading-API\n :param volume: volume for order. Must be * 100,000,000 as per https://github.com/BTCMarkets/API/wiki/Trading-API\n :param order_type: {'Limit', 'Market')\n :return:\n \"\"\"\n assert len(str(int(price))) > 5 and len(str(int(volume))) > 5\n data = OrderedDict([\n ('currency', currency),\n ('instrument', instrument),\n ('price', price),\n ('volume', volume),\n ('orderSide', order_side),\n ('ordertype', order_type),\n ('clientRequestId', '1'),\n ])\n return self.request('POST', end_point='/order/create', data=data)\n\n def delete_order(self, order_ids: list):\n data = OrderedDict([('orderIds', order_ids)])\n return self.request('POST', end_point='/order/cancel', data=data)\n\n def request(self, method, end_point, data=None):\n url = urljoin(self.base_url, end_point)\n if data is not None:\n data = json.dumps(data, separators=(',', ':'))\n headers = build_headers(end_point, data)\n resp = self.session.request(method, url=url, headers=headers, data=data)\n resp_json = resp.json()\n if 'success' in resp_json and not resp_json['success']:\n raise Exception('ErrorCode: %s Message: %s' % (resp_json['errorCode'], resp_json['errorMessage']))\n return resp_json\n","sub_path":"btcmarkets/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":3414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"208327613","text":"# Copyright (C) 2010-2011 Richard Lincoln\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\nfrom CIM16.Element import Element\n\nclass CurveData(Element):\n \"\"\"Multi-purpose data points for defining a curve.Multi-purpose data points for defining a curve.\n \"\"\"\n\n def __init__(self, y3value=0.0, xvalue=0.0, y2value=0.0, y1value=0.0, Curve=None, *args, **kw_args):\n \"\"\"Initialises a new 'CurveData' instance.\n\n @param y3value: The data value of the third Y-axis variable (if present), depending on the Y-axis units \n @param xvalue: The data value of the X-axis variable, depending on the X-axis units \n @param y2value: The data value of the second Y-axis variable (if present), depending on the Y-axis units \n @param y1value: The data value of the first Y-axis variable, depending on the Y-axis units \n @param Curve: The Curve defined by this CurveData.\n \"\"\"\n #: The data value of the third Y-axis variable (if present), depending on the Y-axis units\n self.y3value = y3value\n\n #: The data value of the X-axis variable, depending on the X-axis units\n self.xvalue = xvalue\n\n #: The data value of the second Y-axis variable (if present), depending on the Y-axis units\n self.y2value = y2value\n\n #: The data value of the first Y-axis variable, depending on the Y-axis units\n self.y1value = y1value\n\n self._Curve = None\n self.Curve = Curve\n\n super(CurveData, self).__init__(*args, **kw_args)\n\n _attrs = [\"y3value\", \"xvalue\", \"y2value\", \"y1value\"]\n _attr_types = {\"y3value\": float, \"xvalue\": float, \"y2value\": float, \"y1value\": float}\n _defaults = {\"y3value\": 0.0, \"xvalue\": 0.0, \"y2value\": 0.0, \"y1value\": 0.0}\n _enums = {}\n _refs = [\"Curve\"]\n _many_refs = []\n\n def getCurve(self):\n \"\"\"The Curve defined by this CurveData.\n \"\"\"\n return self._Curve\n\n def setCurve(self, value):\n if self._Curve is not None:\n filtered = [x for x in self.Curve.CurveDatas if x != self]\n self._Curve._CurveDatas = filtered\n\n self._Curve = value\n if self._Curve is not None:\n if self not in self._Curve._CurveDatas:\n self._Curve._CurveDatas.append(self)\n\n Curve = property(getCurve, setCurve)\n\n","sub_path":"CIM16/IEC61970/Core/CurveData.py","file_name":"CurveData.py","file_ext":"py","file_size_in_byte":3325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"89104676","text":"import json\n\n\nclass Generic(object):\n def __init__(self,object):\n self.__dict__.update(object)\n self.__original__=object\n\n def __repr__(self):\n rep='<'+self.__class__.__name__+' {'\n first=True\n for attrib in dir(self):\n if not attrib.startswith('__'):\n if first:\n first=False\n else:\n rep=rep+','\n rep=rep+attrib\n return rep+'}>'\n\n def pretty(self):\n return json.dumps(self.__original__,indent=3)\n\n def get(self,key):\n return self.__dict__.get(key)\n","sub_path":"dhis/types/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"135637227","text":"import pytest\n\nfrom vkbottle import ManySessionManager\nfrom vkbottle.tools.test_utils import MockedClient\n\n\n@pytest.mark.asyncio\nasync def test_client():\n client = MockedClient(\"some text\")\n text = await client.request_text(\"GET\", \"https://example.com\")\n await client.close()\n assert text == \"some text\"\n\n\n@pytest.mark.asyncio\nasync def test_session_manager():\n session_manager = ManySessionManager(lambda: MockedClient(\"some text\"))\n async with session_manager as session:\n assert await session.request_text(\"GET\", \"https://example.com\") == \"some text\"\n","sub_path":"tests/http_test.py","file_name":"http_test.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"329776988","text":"import requests\nimport json\nimport logging\nfrom datetime import datetime\nfrom urllib import parse\nfrom sys import exit\n\nlogging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)\n\nf = open(\"token.txt\", \"r\")\nTOKEN = f.readline()\nif not TOKEN:\n logging.error(\"Error occurred, have you filled the token.txt file with your bot token?\")\n exit()\n\nURL = \"https://api.telegram.org/bot{}/\".format(TOKEN)\n\nf = open(\"master.txt\", \"r\")\nmaster = int(f.readline())\nif not master:\n logging.error(\"Error occurred, have you filled the master.txt file with your master id?\")\n exit()\n\n\nclass MessageHandler:\n\n def __init__(self):\n self.master = master\n self.allowed = [self.master]\n\n #\n def get_url(self, url):\n try:\n response = requests.get(url)\n content = response.content.decode(\"utf8\")\n except requests.exceptions.ConnectionError:\n logging.info(\"Max retries exceed\")\n content = \"\"\n return content\n\n #\n def get_json_from_url(self, url):\n try:\n content = self.get_url(url)\n js = json.loads(content)\n except AttributeError:\n event_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n logging.error(\"\\nFailed to load json content at {}, content was {}\\n\".format(event_time, self.get_url(url)))\n js = []\n return js\n\n #\n def get_updates(self, offset=None):\n url = URL + \"getUpdates?timeout=1\"\n if offset:\n url += \"&offset={}\".format(offset)\n js = self.get_json_from_url(url)\n return js\n\n #\n def get_last_update_id(self, updates):\n update_ids = []\n for update in updates[\"result\"]:\n update_ids.append(int(update[\"update_id\"]))\n return max(update_ids)\n\n #\n def send_message(self, text, chat_id, reply_markup=None):\n text = parse.quote_plus(text)\n url = URL + \"SendMessage?text={}&chat_id={}&parse_mode=Markdown\".format(text, chat_id)\n if reply_markup:\n url += \"&reply_markup={}\".format(reply_markup)\n self.get_url(url)\n\n #\n def get_text_and_chat(self, updates):\n len_updates = len(updates[\"result\"])\n last_update = len_updates - 1\n try:\n text = updates[\"result\"][last_update][\"message\"][\"text\"]\n except:\n text = \"no valid text\"\n logging.error(\"no valid text\")\n chat_id = updates[\"result\"][last_update][\"message\"][\"chat\"][\"id\"]\n return text, chat_id\n\n #\n def get_name(self, updates):\n for update in updates[\"result\"]:\n chat = update[\"message\"][\"chat\"][\"id\"]\n try:\n name = update[\"message\"][\"chat\"][\"first_name\"]\n except:\n # write_log2(\"no_name\", time)\n name = \"n/a\"\n try:\n surname = update[\"message\"][\"chat\"][\"last_name\"]\n except:\n # write_log2(\"no_surname\", time)\n surname = \"n/a\"\n return name\n\n #\n def id_check(self, updates):\n for update in updates[\"result\"]:\n chat = update[\"message\"][\"chat\"][\"id\"]\n logging.info(\"chat: {}, allowed: {}\".format(chat, self.allowed))\n date = update[\"message\"][\"date\"]\n time = datetime.fromtimestamp(date)\n time = time.strftime('%Y-%m-%d at %H:%M:%S')\n try:\n name = update[\"message\"][\"chat\"][\"first_name\"]\n except:\n name = \"n/a\"\n try:\n surname = update[\"message\"][\"chat\"][\"last_name\"]\n except:\n surname = \"n/a\"\n try:\n username = update[\"message\"][\"chat\"][\"username\"]\n except:\n username = \"n/a\"\n\n if chat in self.allowed:\n #logging.info(\"\\nconnection from: {} ... \\nconnection successful\".format(chat))\n return 1\n else:\n self.send_message(\"Unknown user, access denied. Contact system admin\", chat)\n message = [name, \" \", surname, \"\\nUsername: \", username, \"\\nID: \", chat, \"\\nAt: \", str(time),\n \"Concedere i privilegi all'utente?\"]\n message = ''.join(map(str, message))\n keyboard = [[chat], [\"Home\"]]\n self.send_message(message, self.master, keyboard)\n return 0","sub_path":"messageHandler.py","file_name":"messageHandler.py","file_ext":"py","file_size_in_byte":4390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"218396426","text":"from sentiment_classifier import SentimentClassifier\r\n\r\nclf = SentimentClassifier()\r\n\r\nprediction = clf.get_prediction_message('Ужасно слабый аккумулятор, это основной минус этого аппарата, разряжается '\r\n 'буквально за пару часов при включенном wifi и на макс подсветке, '\r\n 'например если играть или смотреть видео, следовательно использовать можно '\r\n 'только если есть постоянная возможность подзарядиться. Качества звука через '\r\n 'динамик далеко не на высоте.Наблюдаются незначительные тормоза в некоторых '\r\n 'приложениях и вообще в меню. Очень мало встроенной памяти, а приложения '\r\n 'устанавливаются именно туда, с этим связанны неудобства - нужно постоянно '\r\n 'переносить их на карту памяти.\\ Несколько неудобно что нету отдельной кнопки '\r\n 'для фото. Подумываю купить батарею большей емкость мб что нибудь измениться.')\r\n\r\nprint(prediction[0])\r\n","sub_path":"classifier_test.py","file_name":"classifier_test.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"52373397","text":"from typing import List\nfrom collections import deque\n\n\nclass Solution:\n def swimInWater(self, grid: List[List[int]]) -> int:\n n = len(grid)\n\n def bfs(t):\n queue = deque([(0, 0)])\n visited_node = set((0, 0))\n while queue:\n queue_length = len(queue)\n for _ in range(queue_length):\n i, j = queue.pop()\n if (i, j) == (n - 1, n - 1):\n return True\n for delta_i, delta_j in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n new_i, new_j = i + delta_i, j + delta_j\n if (\n 0 <= new_i < n\n and 0 <= new_j < n\n and (new_i, new_j) not in visited_node\n and grid[new_i][new_j] <= t\n ):\n queue.appendleft((new_i, new_j))\n visited_node.add((new_i, new_j))\n return False\n\n start, end = grid[0][0], max((max(grid[i]) for i in range(n)))\n while start <= end:\n mid = (start + end) // 2\n if bfs(mid):\n end = mid - 1\n else:\n start = mid + 1\n\n return start\n\n\nif __name__ == \"__main__\":\n solution = Solution()\n print(solution.swimInWater(grid=[[0]]))\n print(solution.swimInWater(grid=[[0, 2], [1, 3]]))\n print(\n solution.swimInWater(\n grid=[\n [0, 1, 2, 3, 4],\n [24, 23, 22, 21, 5],\n [12, 13, 14, 15, 16],\n [11, 17, 18, 19, 20],\n [10, 9, 8, 7, 6],\n ]\n )\n )\n","sub_path":"binary_search/778SwiminRisingWater.py","file_name":"778SwiminRisingWater.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"173477954","text":"# Napisać program, który zapyta użytkownika o kod waluty\n# a następnie pobierze z NBP kurs z ostatnich kilkunastu dni\n# i wygeneruje wykres w formacie SVG.\n\nimport json\nimport requests\n\nURL = \"http://api.nbp.pl/api/exchangerates/rates/A/%s/last/%d/?format=JSON\"\n\ndef rysuj_wykres(dane, nazwapliku):\n SZER = 800\n WYS = 600\n MARGIN = 20\n ODSTEP = 10\n f = open(nazwapliku, \"w\")\n f.write('')\n f.write(f'')\n f.close()\n\n#waluta = input(\"Podaj kod waluty: \")\nwaluta = \"EUR\"\nile = 12\n\nurl = URL % ( waluta, ile )\n\nkursy = [ (k['effectiveDate'], k['mid']) for k in json.loads(requests.get(url).text)['rates'] ]\n\nrysuj_wykres( kursy, \"zadanie04.svg\")","sub_path":"pliki/zadanie04.py","file_name":"zadanie04.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"}
+{"seq_id":"375056437","text":"# -*-coding:utf-8 -*\nimport pygame, math, narro.directions, narro.tmxreader, numpy, os, sys\nfrom pygame.locals import *\nfrom narro import *\nimport pygame.surfarray as surfarray\nfrom collections import OrderedDict\nfrom .tile import *\nfrom .constantes import *\nfrom .observateur import *\nfrom .zonePensee import *\n\nif SESSION_DEBUG:\n import pdb\n\n\nclass Carte(Observateur):\n \"\"\"Classe représentant une carte au niveau des données\"\"\"\n def __init__(self, nomCarte, jeu): \n \"\"\"Initialise la carte à exécuter à partir des données issues de son fichier SQMAP.\n Cette méthode se charge surtout du transfert du format de carte .narromap à celui du Narro Engine (purement mémoriel).\"\"\"\n Observateur.__init__(self)\n self._carteTiled = tmxreader.TileMapParser().parse_decode(os.path.join(DOSSIER_RESSOURCES, nomCarte + \".tmx\"))\n self._nom, self._description = self._carteTiled.properties.get(\"nom\", nomCarte), self._carteTiled.properties.get(\"description\", \"\")\n self._musique = self._carteTiled.properties.get(\"musique\", \"\")\n self._longueur, self._largeur = self._carteTiled.width, self._carteTiled.height\n self._nombreCouches, self._hauteurTile = len(self._carteTiled.layers), self._carteTiled.tilewidth\n self._scrollingX, self._scrollingY = 0,0\n self._jeu, self._toutAChanger = jeu, True\n self._dicoSurfaces, self._tiles, self._blocsRef, self._pnj, i = dict(), list(), dict(), dict(), 0\n self._ecran = Rect(0, 0, self._longueur*32, self._largeur*32)\n self._scrollingPossible, self._etapeScrolling = False, 0\n self._surfaceZonePensee, self._positionZonePensee, self._besoinAffichageZonePensee = None, None, False\n self._emplacementScrollingX, self._emplacementScrollingY = int(int(FENETRE[\"longueurFenetre\"]/2) / 32)*32, int(int(FENETRE[\"largeurFenetre\"]/2)/32)*32\n self._ecranVisible = Rect(0, 0, FENETRE[\"longueurFenetre\"], FENETRE[\"largeurFenetre\"])\n self._positionsDepart = dict()\n self._fenetre, self._blitFrame = self._jeu.fenetre, False\n self._transformationsGlobales, self._transformationsParties, self._parametresTransformations = list(), list(), dict()\n\n self._dicoGid = dict()\n for tileset in self._carteTiled.tile_sets:\n for image in tileset.images:\n self._ajouterSurface(False, image.source, False, tileset=tileset, mobile=False)\n \n self._tilesLayers = []\n i, x, y = 0, 0, 0\n while i < self._nombreCouches:\n x = 0\n self._tilesLayers.append(pygame.Surface((self._longueur * 32, self._largeur * 32), flags=SRCALPHA))\n self._pnj[i] = dict()\n while x < self._longueur:\n y = 0\n self._tiles.append(list())\n while y < self._largeur:\n self._tiles[x].append(Tile(self._nombreCouches))\n gid = self._carteTiled.layers[i].content2D[x][y]\n if gid != 0: #Bloc plein\n self._tiles[x][y].bloc.append(Bloc(infos=self._dicoGid[gid]))\n surfaceTileset, positionSource = self._dicoSurfaces[self._dicoGid[gid][0]][\"Source\"], self._dicoGid[gid][2]\n self._tilesLayers[i].blit(surfaceTileset, (x * self._hauteurTile, y * self._hauteurTile), area=positionSource) \n else: #Bloc vide\n self._tiles[x][y].bloc.append(Bloc(vide=True))\n if i == 0: #Sur la couche 0, il faut mettre du noir pour les blocs vides\n self._tilesLayers[i].fill((0,0,0), (x * self._hauteurTile, y * self._hauteurTile, self._hauteurTile, self._hauteurTile))\n if i == self._nombreCouches - 1:\n self._tiles[x][y].recalculerPraticabilites()\n y += 1\n x += 1\n i += 1\n del self._dicoGid\n\n def _completerDicoGids(self, nomTileset, tileset, longueur, largeur):\n \"\"\"Lors du chargement d'un tileset dans _ajouterSurface (quand une carte est créée), cette fonction se charge de faire correspondre à chaque tile du tileset les infos\n qui lui correspondent. 1 Tile dans le tileset = 1 GID = 1 position source, 1 praticabilité, 1 nom de tileset\"\"\"\n gid, idTileset, x, y, tileWidth, tileHeight = int(tileset.firstgid), 0, 0, 0, int(tileset.tilewidth), int(tileset.tileheight)\n while y < largeur:\n x = 0\n while x < longueur:\n if len(tileset.tiles) > 0:\n praticabilite = tileset.tiles[idTileset].properties.get(\"Praticabilite\", False) == \"True\"\n else: #Tileset importé d'ailleurs, les praticabilités n'ont pas été indiquées\n praticabilite = True\n self._dicoGid[gid] = nomTileset, praticabilite, (x, y, tileWidth, tileHeight)\n gid, idTileset, x = gid + 1, idTileset + 1, x + tileWidth #increments\n y += tileHeight\n\n def _ajouterSurface(self, positionSource, cheminVersTileset,couleurTransparente, tileset=False, mobile=True):\n \"\"\"Ajoute la surface correspondant à un bloc dans le dico de surfaces, si elle n'y est pas déjà. \n Pour les tilesets, on ajoute la surface entière seulement. Pour les mobiles, on enregistre aussi la partie du tileset qui nous intéresse.\n Pour les tilesets, on complète le dico de GIDs (lors de la création de la carte).\"\"\"\n nomTileset = os.path.basename(cheminVersTileset)\n if nomTileset not in self._dicoSurfaces:\n self._dicoSurfaces[nomTileset] = dict()\n try:\n self._dicoSurfaces[nomTileset][\"Source\"] = pygame.image.load(os.path.join(DOSSIER_RESSOURCES,cheminVersTileset))\n if tileset is not False:\n self._completerDicoGids(nomTileset, tileset, self._dicoSurfaces[nomTileset][\"Source\"].get_width(), self._dicoSurfaces[nomTileset][\"Source\"].get_height())\n except pygame.error as erreur:\n print( MESSAGE_ERREUR_CHARGEMENT_TILESET.format(nomTileset), str(erreur) )\n if mobile is True and positionSource not in self._dicoSurfaces[nomTileset].keys(): #On ne conserve les sous-surfaces que des mobiles\n self._dicoSurfaces[nomTileset][positionSource] = pygame.Surface((positionSource[2],positionSource[3]), flags=SRCALPHA).convert_alpha()\n self._dicoSurfaces[nomTileset][positionSource].blit(self._dicoSurfaces[nomTileset][\"Source\"], (0,0), area=positionSource)\n elif mobile is False and positionSource is not False: #pour changerBloc : on retourne la sous-surface pour la blitter sur les tiles layers\n return self._dicoSurfaces[nomTileset][\"Source\"].subsurface(positionSource)\n\n def changerBloc(self, x, y, c, nomTileset, positionSource, couleurTransparente, praticabilite, vide=False):\n if self.tileExistant(x,y) is True and c < self.nombreCouches:\n bloc, jeu = self._tiles[x][y].bloc[c], self._jeu\n if vide is False:\n bloc = Bloc(nomTileset=nomTileset, positionSource=positionSource, couleurTransparente=couleurTransparente, praticabilite=praticabilite)\n self._tiles[x][y].bloc[c] = bloc\n surfaceBloc = self._ajouterSurface(positionSource, nomTileset, couleurTransparente, tileset=False, mobile=False)\n self._tilesLayers[c].blit(surfaceBloc, (x*self._hauteurTile, y*self._hauteurTile) )\n else:\n bloc, praticabilite = Bloc(jeu, vide=True), True\n self._tiles[x][y].bloc[c] = bloc\n absi, ordo, i, a = x*self._hauteurTile, y*self._hauteurTile, 0, 0\n couleurEntierementTransparente = Color(0,0,0,0)\n while i < self._hauteurTile: #On rend transparent les pixels du tile désormais vide\n a = 0\n while a < self._hauteurTile:\n self._tilesLayers[c].set_at((absi+i, ordo+a), couleurEntierementTransparente)\n a += 1\n i += 1\n self._tiles[x][y].modifierPraticabilite(c, praticabilite)\n self.mettreToutAChanger()\n\n\n def tileExistant(self,x,y):\n \"\"\"Retourne True si le tile de coordonnées , existe\"\"\"\n return x >= 0 and x < self._longueur and y >= 0 and y < self._largeur\n\n def tilePraticable(self, x, y, c):\n if x < len(self._tiles):\n if y < len(self._tiles[x]):\n if c < len(self._tiles[x][y].praticabilite):\n return self._tiles[x][y].praticabilite[c]\n else:\n return False\n else:\n return False\n else:\n return False\n\n def _determinerPresenceSurTiles(self, x, y, longueur, largeur):\n abscisses, ordonnees, x, y, longueur, largeur, i = [], [], (x / 32), (y/32), int(longueur/32), int(largeur/32), 0\n abscisses = list(range(math.floor(x), math.ceil(x) + longueur))\n ordonnees = list(range(math.floor(y), math.ceil(y) + largeur))\n listeTilesPresence = [(absa, ordo) for absa in abscisses for ordo in ordonnees]\n return listeTilesPresence\n\n def coordonneesAuTileSuivant(self, direction, x, y):\n \"\"\"Retourne les deux coordonnées au tile suivant en fonction de la direction.\"\"\"\n xReponse, yReponse = int(x/32), int(y/32)\n if direction is \"Gauche\" or direction is \"Droite\":\n xReponse = int( directions.ajusterCoordonneesLorsDeplacement(x, direction) / 32)\n elif direction is \"Haut\" or direction is \"Bas\":\n yReponse = int( directions.ajusterCoordonneesLorsDeplacement(y, direction) / 32)\n return (xReponse, yReponse)\n\n def deplacementPossible(self, positionCarte, c, nomPNJ):\n \"\"\"Indique si un déplacement en est possible. Retourne un 2-tuple avec :\n * si un PNJ peut être positionné en , sinon . Si , sont fournis, ne prend pas en compte le PNJ à cette position pour les collisions.\n * Le tile qui vient d'être quitté.\"\"\"\n deplacementPossible = True\n if self._ecran.contains(positionCarte) == 0: #Si la position d'arrivée existe dans la carte\n deplacementPossible = False\n pnjsEnCollision = [pnj for pnj in self._pnj[c].values() if pnj.nomPNJ != nomPNJ and (pnj.positionCarte.colliderect(positionCarte) == 1 and (pnj.positionCarteSuivante == positionCarte or pnj.positionCarteSuivante == False))]\n if len(pnjsEnCollision) > 0:\n deplacementPossible = False\n for (x,y) in self._determinerPresenceSurTiles(positionCarte.left, positionCarte.top, positionCarte.width, positionCarte.height):\n if self.tilePraticable(x, y, c) is False: #Si le tile est impraticable\n deplacementPossible = False\n return deplacementPossible\n\n def supprimerPNJ(self, nomPNJ, couche):\n \"\"\"Supprime un PNJ à l'écran.\"\"\"\n if nomPNJ in self._pnj[couche].keys():\n del self._pnj[couche][nomPNJ]\n self._toutAChanger = True\n\n def poserPNJ(self, positionCarte, c, positionSource, nomTileset, couleurTransparente, nomPNJ, positionCarteSuivante=False):\n \"\"\"Ordonne l'affichage à l'écran d'un PNJ à une nouvelle position et l'effacement du PNJ à sa position précedente\"\"\"\n hauteurTile = self._hauteurTile\n x,y = float(positionCarte.left), float(positionCarte.top)\n if nomPNJ not in self._pnj[c].keys():\n self._pnj[c][nomPNJ] = Bloc(self._jeu, pnj=True, nomPNJ=nomPNJ, nomTileset=nomTileset, positionCarte=positionCarte, positionCarteSuivante=positionCarteSuivante, positionSource=positionSource)\n pnj = self._pnj[c][nomPNJ]\n if pnj.positionSource != positionSource:\n pnj.positionSource = positionSource\n if pnj.nomTileset != nomTileset:\n pnj.nomTileset = nomTileset\n if pnj.couleurTransparente != couleurTransparente:\n pnj.couleurTransparente = couleurTransparente\n if pnj.positionCarte != positionCarte:\n pnj.positionCarte = positionCarte\n if pnj.positionCarteSuivante != positionCarteSuivante:\n pnj.positionCarteSuivante = positionCarteSuivante\n self._toutAChanger = True\n self._ajouterSurface( (positionSource.left, positionSource.top, positionSource.width, positionSource.height), nomTileset, couleurTransparente)\n \n def mettreToutAChanger(self):\n self._toutAChanger = True\n\n def _coordonneeScrollingPossible(self, coor, abs=False):\n \"\"\"Retourne si est dans un emplacement où le scrolling est possible. \n Paramètre : quand vaut , il s'agit non pas d'une ordonnée, mais d'une abscisse.\"\"\"\n if abs is False: #Ordonnée\n return coor == self._emplacementScrollingY\n else: #Abscisse\n return coor == self._emplacementScrollingX\n\n def verifierScrollingPossible(self, x, y, direction):\n \"\"\"Vérifie si le scrolling est possible pour faciliter le traitement dans gererScrolling\"\"\"\n self._scrollingPossible, scrollingDirection = False, True\n if direction == \"Bas\" and int(self._scrollingY / 32) + int(FENETRE[\"largeurFenetre\"]/32) >= self._largeur:\n scrollingDirection = False\n if direction == \"Haut\" and self._scrollingY == 0:\n scrollingDirection = False\n if direction == \"Droite\" and int(self._scrollingX / 32) + int(FENETRE[\"longueurFenetre\"]/32) >= self._longueur:\n scrollingDirection = False\n if direction == \"Gauche\" and self._scrollingX == 0:\n scrollingDirection = False\n if scrollingDirection is True:\n x, y = x - self._scrollingX, y - self._scrollingY\n scrollingPossibleX = self._coordonneeScrollingPossible(x, abs=True)\n scrollingPossibleY = self._coordonneeScrollingPossible(y, abs=False)\n if (direction == \"Haut\" or direction == \"Bas\") and scrollingPossibleY is True:\n self._scrollingPossible, self._directionScrolling = True, direction\n elif (direction == \"Gauche\" or direction == \"Droite\") and scrollingPossibleX is True:\n self._scrollingPossible, self._directionScrolling = True, direction\n \n def gererScrolling(self, changement, direction):\n \"\"\"Gère le scrolling\"\"\"\n if (direction == \"Droite\" or direction == \"Gauche\") and self._scrollingPossible is True:\n self._scrollingX += changement\n self.mettreToutAChanger()\n self._ecranVisible.move_ip(changement, 0)\n return True\n elif (direction == \"Bas\" or direction == \"Haut\") and self._scrollingPossible is True:\n self._scrollingY += changement\n self.mettreToutAChanger()\n self._ecranVisible.move_ip(0, changement)\n return True\n else:\n return False\n\n def initialiserScrolling(self, x, y):\n \"\"\"Après la création de la carte, initialise le scrolling à la position du joueur si nécessaire.\n est l'abscisse du joueur, son ordonnée. Ces coordonnées sont données en pixels.\"\"\"\n self._ecranVisible.top, self._ecranVisible.left = 0, 0\n scrollingAInitialiserX, scrollingAInitialiserY, x, y = True, True, x, y\n if FENETRE[\"largeurFenetre\"] >= self._largeur * 32: #Carte petite\n scrollingAInitialiserY = False\n if FENETRE[\"longueurFenetre\"] >= self._longueur * 32:\n scrollingAInitialiserX = False\n if x < self._emplacementScrollingX: #On est dans une partie de la carte où le scrolling est inutile\n scrollingAInitialiserX = False\n if y < self._emplacementScrollingY:\n scrollingAInitialiserY = False\n if scrollingAInitialiserX is True:\n self._scrollingX = x - self._emplacementScrollingX #A chaque instant, on a x - scrollingX = emplacementScrollingX, d'où cette relation\n if int(FENETRE[\"longueurFenetre\"]/32) + int(self._scrollingX/32) >= self._longueur: #Quand on est aux bords de la carte\n self._scrollingX = (self._longueur*32) - FENETRE[\"longueurFenetre\"]\n if scrollingAInitialiserY is True:\n self._scrollingY = y - self._emplacementScrollingY\n if int(FENETRE[\"largeurFenetre\"]/32) + int(self._scrollingY/32) >= self._largeur:\n self._scrollingY = (self._largeur*32) - FENETRE[\"largeurFenetre\"]\n self._ecranVisible = Rect(0, 0, FENETRE[\"longueurFenetre\"], FENETRE[\"largeurFenetre\"])\n self._ecranVisible.move_ip(self._scrollingX, self._scrollingY)\n\n def obsOnNouvelleObservation(self, instance, nomAttribut, info):\n if isinstance(instance, ZonePensee) is True and nomAttribut == \"_surface\":\n self._surfaceZonePensee, self._besoinAffichageZonePensee = info.copy(), True\n elif isinstance(instance, ZonePensee) is True and nomAttribut == \"_positionSurface\":\n self._positionZonePensee = list(info)\n\n def _transformerPartie(self, surface, nomPnj, positionCarte, **p):\n \"\"\"Applique une transformation individuellement à chaque (mobile) lors de sa pose.\"\"\"\n for nomTransformation in self._transformationsParties:\n p = self._parametresTransformations[nomTransformation]\n if nomTransformation == \"AlphaFixe\":\n pixels = surfarray.pixels_alpha(surface)\n positionsNulles = numpy.where(pixels == 0)\n pixels[:,:] = p[\"alpha\"]\n pixels[positionsNulles] = 0\n elif nomTransformation == \"Action Joueur\" and nomPnj == \"Joueur\":\n centre = positionCarte.move(-self._scrollingX, -self._scrollingY).center\n pygame.draw.circle(self._fenetre, (255,255,255), centre, p[\"rayon\"], 1)\n\n def _appliquerTransformationGlobale(self, nomTransformation, **p):\n \"\"\"Applique la transformation globale avec le dico de paramètres
.\"\"\"\n if nomTransformation == \"Rouge\":\n pixels = surfarray.pixels3d(self._fenetre)[:FENETRE[\"longueurFenetre\"], :FENETRE[\"largeurFenetre\"]] #On exclut la zone de pensée\n pixels[:,:,1:] = 0\n elif nomTransformation == \"Noir\":\n pixels = surfarray.pixels3d(self._fenetre)[:FENETRE[\"longueurFenetre\"],:FENETRE[\"largeurFenetre\"]]\n pixels /= p[\"coef\"]\n if p[\"coef\"] >= 12:\n pixels[:] = (0,0,0)\n elif nomTransformation == \"NoirTotal\":\n pixels = surfarray.pixels3d(self._fenetre)[:FENETRE[\"longueurFenetre\"],:FENETRE[\"largeurFenetre\"]]\n pixels[:] = (0,0,0)\n elif nomTransformation == \"RemplirNoir\":\n self._fenetre.fill((0,0,0), rect=(0,0,FENETRE[\"longueurFenetre\"],FENETRE[\"largeurFenetre\"]))\n elif \"SplashText\" in nomTransformation:\n if \"couleurFond\" in p.keys():\n couleurFond=p[\"couleurFond\"]\n else:\n couleurFond=None\n surfaceTexte = self._jeu.zonePensee.polices[\"splashText\"].render(p[\"texte\"], p[\"antialias\"], p[\"couleurTexte\"], couleurFond)\n self._fenetre.blit(surfaceTexte, p[\"position\"])\n elif nomTransformation == \"Nuit\":\n self._fenetre.fill((0,0,0), rect=(0,0,FENETRE[\"longueurFenetre\"],FENETRE[\"largeurFenetre\"]))\n c = 0\n while c < self._nombreCouches: \n for nomPnj in self._pnj[c]: \n self._afficherBlocPnj(c, nomPnj)\n c += 1\n\n\n def _transformerSurfaceGlobalement(self, affichageComplet=False):\n \"\"\"A chaque frame, regarde s'il y a des transformations globales à appliquer, et les exécute lorsque c'est le cas.\n doit valoir si la fonction doit mettre à jour l'écran entier elle-même (car personne ne le fait après).\n Retourne quand la fonction s'est occupée de la mise à jour de l'écran (car on le lui a demandé ET qu'il y avait des transfos à traiter).\"\"\"\n if len(self._transformationsGlobales) > 0: #S'il y a des transformations à opérer\n longueurFenetre, largeurFenetre = FENETRE[\"longueurFenetre\"], FENETRE[\"largeurFenetre\"]\n for nomTransformation in self._transformationsGlobales:\n self._appliquerTransformationGlobale(nomTransformation, **self._parametresTransformations[nomTransformation]) #On applique la transfo\n\n def _afficherZonePensee(self, affichageComplet=False):\n \"\"\"S'il y a quelque chose à afficher, réaffiche la zone de pensée. \n est un booléen qui vaut lorsque pygame.display.flip est appelée à la suite de l'appel de la fonction.\"\"\"\n positionZoneEntiere = (0, FENETRE[\"largeurFenetre\"], FENETRE[\"longueurFenetre\"], FENETRE[\"largeurFenetreReelle\"] - FENETRE[\"largeurFenetre\"])\n self._fenetre.fill(COULEUR_FOND_ZONE_PENSEE,rect=positionZoneEntiere)\n if self._surfaceZonePensee is not None:\n self._fenetre.blit(self._surfaceZonePensee, self._positionZonePensee)\n if affichageComplet is False:\n pygame.display.update(positionZoneEntiere)\n self._besoinAffichageZonePensee = False\n\n def _afficherBlocPnj(self, c, nomPnj):\n \"\"\"Affiche un PNJ sur un bloc\"\"\"\n pnj = self._pnj[c][nomPnj]\n if self._ecranVisible.contains(pnj.positionCarte) or self._ecranVisible.colliderect(pnj.positionCarte):\n positionCollage = pnj.positionCarte.move(-self._scrollingX, -self._scrollingY)\n if len(self._transformationsParties) > 0:\n surfaceCollage = self._dicoSurfaces[pnj.nomTileset][(pnj.positionSource.left, pnj.positionSource.top, pnj.positionSource.width, pnj.positionSource.height)].copy()\n self._transformerPartie(surfaceCollage, nomPnj, pnj.positionCarte)\n else:\n surfaceCollage = self._dicoSurfaces[pnj.nomTileset][(pnj.positionSource.left, pnj.positionSource.top, pnj.positionSource.width, pnj.positionSource.height)]\n self._fenetre.blit(surfaceCollage, positionCollage)\n \n def afficher(self):\n \"\"\"Cette méthode gère l'affichage de la carte\"\"\"\n self._blitFrame = False\n if self._toutAChanger is True:\n coucheActuelle = 0\n self._fenetre.fill((0,0,0))\n while coucheActuelle < self._nombreCouches: \n self._fenetre.blit(self._tilesLayers[coucheActuelle], (0,0), area=self._ecranVisible)\n nomsPnjs = sorted(self._pnj[coucheActuelle], key=lambda nomPNJ: self._pnj[coucheActuelle][nomPNJ].positionCarte.top)\n #Tri des PNJs selon leur ordonnée (de manière croissante) : on affiche ceux en haut de l'écran avant ceux en bas, pour avoir une superposition\n for nomPnj in nomsPnjs: \n self._afficherBlocPnj(coucheActuelle, nomPnj)\n coucheActuelle += 1\n self._afficherZonePensee(affichageComplet=True)\n self._transformerSurfaceGlobalement()\n self._blitFrame = True\n \n if self._blitFrame is True:\n if LIMITER_FPS:\n self._jeu.horlogeFps.tick(NOMBRE_MAX_DE_FPS)\n else:\n self._jeu.horlogeFps.tick()\n pygame.display.flip()\n\n def _getNombreCouches(self):\n \"\"\"Retourne le nombre de couches défini sur la carte\"\"\"\n return self._nombreCouches\n\n def _getHauteurTile(self):\n \"\"\"Retourne la hauteur d'un tile sur la carte\"\"\"\n return self._hauteurTile\n\n def _getNom(self):\n return self._nom\n\n def _getLongueur(self):\n return self._longueur\n\n def _getLargeur(self):\n return self._largeur\n\n def _getTransformationsGlobales(self):\n return self._transformationsGlobales\n\n def _setTransformationsGlobales(self, val):\n self._transformationsGlobales = val\n\n def _getTransformationsParties(self):\n return self._transformationsParties\n\n def _setTransformationsParties(self, val):\n self._transformationsParties = val\n\n def _getParametresTransformations(self):\n return self._parametresTransformations\n\n def _getTiles(self):\n return self._tiles\n\n nombreCouches = property(_getNombreCouches)\n hauteurTile = property(_getHauteurTile)\n nom = property(_getNom)\n longueur = property(_getLongueur)\n largeur = property(_getLargeur)\n tiles = property(_getTiles)\n transformationsGlobales = property(_getTransformationsGlobales, _setTransformationsParties)\n transformationsParties = property(_getTransformationsParties, _setTransformationsParties)\n parametresTransformations = property(_getParametresTransformations)\n","sub_path":"Releases/0.2/narro/carte.py","file_name":"carte.py","file_ext":"py","file_size_in_byte":24827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"}
+{"seq_id":"44342577","text":"def calculate_weight(x_data, y_data):\n\tn = x_data.__len__()\n\tm = (\n\t\t\t((n * sum_all(x_data, y_data, 0, n - 1, lambda x, y: x * y)) - (\n\t\t\t\tsum_all(x_data, y_data, 0, n - 1, lambda x, y: x)) * (\n\t\t\t\t sum_all(x_data, y_data, 0, n - 1, lambda x, y: y))) /\n\t\t\t((n * sum_all(x_data, y_data, 0, n - 1, lambda x, y: x ** 2)) - (\n\t\t\t\tsum_all(x_data, y_data, 0, n - 1, lambda x, y: x)) ** 2)\n\t)\n\treturn m\n\n\ndef sum_all(x_data, y_data, a, b, func):\n\ti = a\n\tsum_res = 0\n\twhile i < b:\n\t\tsum_res += func(x_data[i], y_data[i])\n\t\ti += 1\n\treturn sum_res\n\n\ndef convert_to_float(s):\n\ttry:\n\t\treturn float(s)\n\texcept ValueError:\n\t\tassert False, \"\\\"\" + s + \"\\\" can't be converted\"\n\n\ndef read_file(file_name):\n\tls = []\n\twith open(file_name, \"r\") as f:\n\t\twhile True:\n\t\t\ts = f.readline()\n\t\t\tif s == '':\n\t\t\t\tbreak\n\t\t\tls.append(convert_to_float(s.replace('\\n', '')))\n\treturn ls\n\n\nif __name__ == '__main__':\n\tfile_prefix = input(\"File prefix : \")\n\tx_file_name = file_prefix + \"_x\"\n\ty_file_name = file_prefix + \"_y\"\n\tx_raw_data = read_file(x_file_name)\n\ty_raw_data = read_file(y_file_name)\n\tassert x_raw_data.__len__() == y_raw_data.__len__(), \"Data does not has a same length\"\n\tassert x_raw_data.__len__() is not 0, \"No data acquired\"\n\tprint(\"Appropriate description=\" + str(calculate_weight(x_raw_data, y_raw_data)))\n","sub_path":"Assignment 3/findLineOfTheBestFit.py","file_name":"findLineOfTheBestFit.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"}
+{"seq_id":"571628772","text":"\"\"\"\nFunctional test\n\nDeletion Abuse Epic\n\nStoryboard is defined within the comments of the program itself\n\"\"\"\n\nimport unittest\nfrom flask import url_for\nfrom biblib.tests.stubdata.stub_data import UserShop, LibraryShop\nfrom biblib.tests.base import TestCaseDatabase, MockEmailService\nfrom biblib.views.http_errors import NO_PERMISSION_ERROR\n\nclass TestDeletionAbuseEpic(TestCaseDatabase):\n \"\"\"\n Base class used to test the Deletion Abuse Epic\n \"\"\"\n\n def test_deletion_abuse_epic(self):\n \"\"\"\n Carries out the epic 'Deletion Abuse', where each type of permission\n for a library: None, Read, Write, Admin, try to delete a library and\n get permission denied. The owner then deletes the library, and it is\n successful.\n\n :return: no return\n \"\"\"\n\n # Load stub data\n stub_owner = UserShop(name='owner')\n stub_none = UserShop(name='none')\n stub_reader = UserShop(name='reader')\n stub_editor = UserShop(name='editor')\n stub_admin = UserShop(name='admin')\n stub_library = LibraryShop(public=False)\n\n # Makes the library\n url = url_for('userview')\n response = self.client.post(\n url,\n data=stub_library.user_view_post_data_json,\n headers=stub_owner.headers\n )\n library_id = response.json['id']\n self.assertEqual(response.status_code, 200, response)\n self.assertTrue('name' in response.json)\n self.assertTrue(response.json['name'] == stub_library.name)\n\n # Give the correct permissions to each user\n url = url_for('permissionview', library=library_id)\n for stub_user, permission in [[stub_reader, 'read'],\n [stub_editor, 'write'],\n [stub_admin, 'admin']]:\n with MockEmailService(stub_user):\n response = self.client.post(\n url,\n data=stub_user.permission_view_post_data_json(\n {permission: True}\n ),\n headers=stub_owner.headers\n )\n self.assertEqual(response.status_code, 200)\n\n # The following users try to the delete the library, and fail:\n # reader, editor, admin\n url = url_for('documentview', library=library_id)\n for stub_user in [stub_none, stub_reader, stub_editor, stub_admin]:\n response = self.client.delete(\n url,\n headers=stub_user.headers\n )\n self.assertEqual(response.status_code,\n NO_PERMISSION_ERROR['number'],\n 'User: {0}'.format(stub_user.name))\n self.assertEqual(response.json['error'],\n NO_PERMISSION_ERROR['body'])\n\n # Owner deletes the library, success\n url = url_for('documentview', library=library_id)\n response = self.client.delete(\n url,\n headers=stub_owner.headers\n )\n self.assertEqual(response.status_code, 200)\n\n # Checks that it is deleted\n url = url_for('userview')\n response = self.client.get(\n url,\n headers=stub_owner.headers\n )\n self.assertTrue(len(response.json['libraries']) == 0)\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)","sub_path":"biblib/tests/functional_tests/test_deletion_abuse_epic.py","file_name":"test_deletion_abuse_epic.py","file_ext":"py","file_size_in_byte":3407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"}
+{"seq_id":"455833172","text":"import pandas as pd\r\nimport numpy as np\r\nfrom sklearn import preprocessing\r\nfrom sklearn.cluster import estimate_bandwidth, MeanShift\r\nimport matplotlib.pyplot as plt\r\n\r\ndef load_csv():\r\n '''\r\n 1. load 'iris_data.csv into a dataframe'\r\n '''\r\n df = pd.read_csv('resources/iris_data.csv')\r\n return df\r\n\r\ndef unique_labels():\r\n '''\r\n 2. get unique labels(Species column)\r\n\r\n ????\r\n '''\r\n df = load_csv()\r\n arr = np.unique(df['Species'].values)\r\n return df, arr\r\n\r\ndef encode():\r\n df,_ = unique_labels()\r\n label_enc = preprocessing.LabelEncoder()\r\n df['Species'] = label_enc.fit_transform(df['Species'].astype(str))\r\n return df\r\n\r\ndef scatter01():\r\n \"\"\"\r\n 3. plot with a scatter plot each iris flower sample colored by label(3 different colors)\r\n \"\"\" \r\n df = encode()\r\n fig, axes = plt.subplots(nrows=2)\r\n\r\n df.plot.scatter(ax=axes[0], x='Petal length', y='Petal width', c='Species', colormap='viridis')\r\n df.plot.scatter(ax=axes[1],x='Sepal length', y='Sepal width', c='Species', colormap='viridis')\r\n\r\n plt.show()\r\n\r\ndef cluster():\r\n \"\"\"\r\n 4. use: MeanShift and estimate_bandwidth from sklearn.cluster to first estimate bandwidth and then get the clusters \r\n (HINT: estimate_bandwidth() takes an argument: quantile set it to 0.2 for best result\r\n \"\"\" \r\n df = encode().replace(',','.',regex=True)\r\n bandwidth = estimate_bandwidth(df,quantile=0.2)\r\n analyzer = MeanShift(bandwidth=bandwidth)\r\n analyzer.fit(df)\r\n\r\n labels = analyzer.labels_\r\n centers = analyzer.cluster_centers_\r\n\r\n return bandwidth, labels, centers, df\r\n\r\ndef cluster_print():\r\n \"\"\"\r\n 5. print labels, cluster centers and number of clusters (as returned from the MeanShift function)\r\n \"\"\" \r\n\r\n bandwidth, labels, centers, _ = cluster()\r\n unique = np.unique(labels)\r\n\r\n print('\\n\\n#########\\n')\r\n print(f\"Bandwidth: {bandwidth}\\n\")\r\n print(f'Labels:\\n {labels}\\n')\r\n print(f'Unique labels: {unique}\\n')\r\n print(f'Centers: {centers}\\n')\r\n print(f'Cluster count: {len(centers)}\\n')\r\n print('########\\n')\r\n\r\ndef scatter02():\r\n \"\"\"\r\n 6. Create a new scatter plot where each flower is colored according to cluster label\r\n \"\"\" \r\n bandwidth, labels, centers, df = cluster()\r\n unique = np.unique(labels)\r\n\r\n fig, axes = plt.subplots(nrows=2)\r\n\r\n df.plot.scatter(ax=axes[0], x='Petal length', y='Petal width', c=labels, colormap='viridis')\r\n plt.scatter(centers[:,2], centers[:,3], marker='.')\r\n\r\n plt.show()\r\n\r\n","sub_path":"week10/logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":2546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"}
+{"seq_id":"55187802","text":"\"\"\"\r\nThis module illustrates how to compute Precision at k and Recall at k metrics.\r\n\"\"\"\r\n\r\nfrom __future__ import (absolute_import, division, print_function,\r\n unicode_literals)\r\nfrom collections import defaultdict\r\nimport time\r\nimport datetime\r\nimport random\r\n\r\nimport numpy as np\r\nimport six\r\nfrom tabulate import tabulate\r\n\r\nfrom surprise import Dataset\r\nfrom surprise.model_selection import cross_validate\r\nfrom surprise.model_selection import KFold\r\nfrom surprise import NormalPredictor\r\nfrom surprise import BaselineOnly\r\nfrom surprise import KNNBasic\r\nfrom surprise import KNNWithMeans\r\nfrom surprise import KNNBaseline\r\nfrom surprise import SVD\r\nfrom surprise import SVDpp\r\nfrom surprise import NMF\r\nfrom surprise import SlopeOne\r\nfrom surprise import CoClustering\r\nfrom surprise.model_selection import train_test_split\r\n\r\nclasses = (SVD, SVDpp, NMF, SlopeOne, KNNBasic, KNNWithMeans, KNNBaseline,\r\n CoClustering, BaselineOnly, NormalPredictor)\r\n\r\n# ugly dict to map algo names and datasets to their markdown links in the table\r\nstable = 'http://surprise.readthedocs.io/en/stable/'\r\nLINK = {'SVD': '[{}]({})'.format('SVD',\r\n stable +\r\n 'matrix_factorization.html#surprise.prediction_algorithms.matrix_factorization.SVD'),\r\n 'SVDpp': '[{}]({})'.format('SVD++',\r\n stable +\r\n 'matrix_factorization.html#surprise.prediction_algorithms.matrix_factorization.SVDpp'),\r\n 'NMF': '[{}]({})'.format('NMF',\r\n stable +\r\n 'matrix_factorization.html#surprise.prediction_algorithms.matrix_factorization.NMF'),\r\n 'SlopeOne': '[{}]({})'.format('Slope One',\r\n stable +\r\n 'slope_one.html#surprise.prediction_algorithms.slope_one.SlopeOne'),\r\n 'KNNBasic': '[{}]({})'.format('k-NN',\r\n stable +\r\n 'knn_inspired.html#surprise.prediction_algorithms.knns.KNNBasic'),\r\n 'KNNWithMeans': '[{}]({})'.format('Centered k-NN',\r\n stable +\r\n 'knn_inspired.html#surprise.prediction_algorithms.knns.KNNWithMeans'),\r\n 'KNNBaseline': '[{}]({})'.format('k-NN Baseline',\r\n stable +\r\n 'knn_inspired.html#surprise.prediction_algorithms.knns.KNNBaseline'),\r\n 'CoClustering': '[{}]({})'.format('Co-Clustering',\r\n stable +\r\n 'co_clustering.html#surprise.prediction_algorithms.co_clustering.CoClustering'),\r\n 'BaselineOnly': '[{}]({})'.format('Baseline',\r\n stable +\r\n 'basic_algorithms.html#surprise.prediction_algorithms.baseline_only.BaselineOnly'),\r\n 'NormalPredictor': '[{}]({})'.format('Random',\r\n stable +\r\n 'basic_algorithms.html#surprise.prediction_algorithms.random_pred.NormalPredictor'),\r\n 'ml-100k': '[{}]({})'.format('Movielens 100k',\r\n 'http://grouplens.org/datasets/movielens/100k'),\r\n 'ml-1m': '[{}]({})'.format('Movielens 1M',\r\n 'http://grouplens.org/datasets/movielens/1m'),\r\n }\r\n\r\n\r\ndef precision_recall_at_k(predictions, k=10, threshold=3.5):\r\n '''Return precision and recall at k metrics for each user.'''\r\n\r\n # First map the predictions to each user.\r\n user_est_true = defaultdict(list)\r\n for uid, _, true_r, est, _ in predictions:\r\n user_est_true[uid].append((est, true_r))\r\n\r\n precisions = dict()\r\n recalls = dict()\r\n for uid, user_ratings in user_est_true.items():\r\n\r\n # Sort user ratings by estimated value\r\n user_ratings.sort(key=lambda x: x[0], reverse=True)\r\n\r\n # Number of relevant items\r\n n_rel = sum((true_r >= threshold) for (_, true_r) in user_ratings)\r\n\r\n # Number of recommended items in top k\r\n n_rec_k = sum((est >= threshold) for (est, _) in user_ratings[:k])\r\n\r\n # Number of relevant and recommended items in top k\r\n n_rel_and_rec_k = sum(((true_r >= threshold) and (est >= threshold))\r\n for (est, true_r) in user_ratings[:k])\r\n\r\n # Precision@K: Proportion of recommended items that are relevant\r\n precisions[uid] = n_rel_and_rec_k / n_rec_k if n_rec_k != 0 else 1\r\n\r\n # Recall@K: Proportion of relevant items that are recommended\r\n recalls[uid] = n_rel_and_rec_k / n_rel if n_rel != 0 else 1\r\n\r\n return precisions, recalls\r\n\r\ndataset = 'ml-100k'\r\ndata = Dataset.load_builtin('ml-100k')\r\nkf = KFold(n_splits=5)\r\ntrainset,testset = train_test_split(data,test_size=.75)\r\n'''\r\nfor trainset, testset in kf.split(data):\r\n algo.fit(trainset)\r\n predictions = algo.test(testset)\r\n precisions, recalls = precision_recall_at_k(predictions, k=5, threshold=4)\r\n\r\n # Precision and recall can then be averaged over all users\r\n prec = sum(p for p in precisions.values()) / len(precisions)\r\n recall = sum(rec for rec in recalls.values()) / len(recalls)\r\n f1 = 2 * prec * recall / (prec + recall)\r\n print(prec)\r\n print(recall)\r\n print(f1)\r\n'''\r\ntable = []\r\nfor klass in classes:\r\n start = time.time()\r\n if klass == 'SVD':\r\n algo = SVD()\r\n elif klass == 'SVDpp':\r\n algo = SVDpp()\r\n elif klass == 'NMF':\r\n algo = NMF()\r\n elif klass == 'SlopeOne':\r\n algo = SlopeOne()\r\n elif klass == 'KNNBasic':\r\n algo = KNNBasic()\r\n elif klass == 'KNNWithMeans':\r\n algo = KNNWithMeans()\r\n elif klass == 'KNNBaseline':\r\n algo = KNNBaseline()\r\n elif klass == 'CoClustering':\r\n algo = CoClustering()\r\n elif klass == 'BaselineOnly':\r\n algo = BaselineOnly()\r\n else :\r\n algo = NormalPredictor()\r\n #cv_time = str(datetime.timedelta(seconds=int(time.time() - start)))\r\n algo.fit(trainset)\r\n predictions = algo.test(testset)\r\n precisions, recalls = precision_recall_at_k(predictions, k=5, threshold=4)\r\n\r\n # Precision and recall can then be averaged over all users\r\n prec = sum(p for p in precisions.values()) / len(precisions)\r\n recall = sum(rec for rec in recalls.values()) / len(recalls)\r\n f1 = 2 * prec * recall / (prec + recall)\r\n link = LINK[klass.__name__]\r\n\r\n new_line = [link, prec, recall, f1]\r\n print(tabulate([new_line], tablefmt=\"pipe\")) # print current algo perf\r\n table.append(new_line)\r\n\r\nheader = [LINK[dataset],\r\n 'Precision',\r\n 'Recall',\r\n 'F1',\r\n 'Time'\r\n ]\r\nprint(tabulate(table, header, tablefmt=\"pipe\"))","sub_path":"examples/precision_recall_at_k1.py","file_name":"precision_recall_at_k1.py","file_ext":"py","file_size_in_byte":6988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"}
+{"seq_id":"338605637","text":"import json\nimport pytest\nimport os\nimport itertools\nimport requests\nfrom pprint import pprint\nfrom deepdiff import DeepDiff\nfrom tranql.main import TranQL\nfrom tranql.main import TranQLParser, set_verbose\nfrom tranql.tranql_ast import SetStatement, SelectStatement\nfrom tranql.tests.util import assert_lists_equal, set_mock, ordered\nfrom tranql.tests.mocks import MockHelper\nfrom tranql.tests.mocks import MockMap\n#set_verbose ()\n\ndef assert_parse_tree (code, expected):\n \"\"\" Parse a block of code into a parse tree. Then assert the equality\n of that parse tree to a list of expected tokens. \"\"\"\n tranql = TranQL ()\n tranql.resolve_names = False\n actual = tranql.parser.parse (code).parse_tree\n #print (f\"{actual}\")\n assert_lists_equal (\n actual,\n expected)\n\n#####################################################\n#\n# Parser tests. Verify we produce the AST for the\n# expected grammar correctly.\n#\n#####################################################\n\ndef test_parse_predicate (requests_mock):\n set_mock(requests_mock, \"predicates\")\n\n \"\"\" Test parsing a predicate. \"\"\"\n print (f\"test_parse_predicate()\")\n assert_parse_tree (\n code = \"\"\"\n SELECT chemical_substance-[treats]->disease\n FROM \"/graph/gamma/quick\"\n WHERE chemical_substance='PUBCHEM:2083'\n SET \"$.knowledge_graph.nodes.[*].id as indications\n \"\"\",\n expected = [\n [ [ \"select\",\n \"chemical_substance\",\n [ \"-[\",\n \"treats\",\n \"]->\"\n ], \"disease\", \"\\n\"\n ],\n \" \",\n [ \"from\", [ \"/graph/gamma/quick\"] ],\n [\"where\",\n [\n \"chemical_substance\",\n \"=\",\n \"PUBCHEM:2083\"\n ]\n ], [ \"\" ]\n ]])\n\ndef test_parse_set (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n\n \"\"\" Test parsing set statements. \"\"\"\n print (f\"test_parse_set()\")\n assert_parse_tree (\n code = \"\"\"\n SET disease = 'asthma'\n SET max_p_value = '0.5'\n SET cohort = 'COHORT:22'\n SET population_density = 2\n SET icees.population_density_cluster = 'http://localhost/ICEESQuery'\n SET gamma.quick = 'http://robokop.renci.org:80/api/simple/quick/' \"\"\",\n expected = [\n [\"set\", \"disease\", \"=\", \"asthma\"],\n [\"set\", \"max_p_value\", \"=\", \"0.5\"],\n [\"set\", \"cohort\", \"=\", \"COHORT:22\"],\n [\"set\", \"population_density\", \"=\", 2],\n [\"set\", \"icees.population_density_cluster\", \"=\", \"http://localhost/ICEESQuery\"],\n [\"set\", \"gamma.quick\", \"=\", \"http://robokop.renci.org:80/api/simple/quick/\"]\n ])\n\ndef test_parse_set_with_comment (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n \"\"\" Test parsing set statements with comments. \"\"\"\n print (f\"test_parse_set_with_comment()\")\n assert_parse_tree (\n code = \"\"\"\n -- This is a comment\n SET disease = 'asthma' \"\"\",\n expected = [\n [\"set\", \"disease\", \"=\", \"asthma\"]\n ])\n\ndef test_parse_select_simple (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n \"\"\" Verify the token stream of a simple select statement. \"\"\"\n print (f\"test_parse_select_simple()\")\n assert_parse_tree (\n code = \"\"\"\n SELECT chemical_substance->gene->biological_process->phenotypic_feature\n FROM \"/graph/gamma/quick\"\n WHERE chemical_substance = $chemical_exposures\n SET knowledge_graph \"\"\",\n expected = [\n [[\"select\", \"chemical_substance\", \"->\", \"gene\", \"->\", \"biological_process\", \"->\", \"phenotypic_feature\", \"\\n\"],\n \" \",\n [\"from\", [\"/graph/gamma/quick\"]],\n [\"where\", [\"chemical_substance\", \"=\", \"$chemical_exposures\"]],\n [\"set\", [\"knowledge_graph\"]]]\n ])\n\ndef test_parse_select_complex (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n \"\"\" Verify the token stream of a more complex select statement. \"\"\"\n print (f\"test_parse_select_complex()\")\n assert_parse_tree (\n code = \"\"\"\n SELECT disease->chemical_substance\n FROM \"/flow/5/mod_1_4/icees/by_residential_density\"\n WHERE disease = \"asthma\"\n AND EstResidentialDensity < \"2\"\n AND cohort = \"COHORT:22\"\n AND max_p_value = \"0.5\"\n SET '$.nodes.[*].id' AS chemical_exposures \"\"\",\n expected = [\n [[\"select\", \"disease\", \"->\", \"chemical_substance\", \"\\n\"],\n \" \",\n [\"from\", [\"/flow/5/mod_1_4/icees/by_residential_density\"]],\n [\"where\",\n [\"disease\", \"=\", \"asthma\"], \"and\",\n [\"EstResidentialDensity\", \"<\", \"2\"], \"and\",\n [\"cohort\", \"=\", \"COHORT:22\"], \"and\",\n [\"max_p_value\", \"=\", \"0.5\"]\n ],\n [\"set\", [\"$.nodes.[*].id\", \"as\", \"chemical_exposures\"]]]\n ])\n\ndef test_parse_query_with_repeated_concept (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n \"\"\" Verify the parser accepts a grammar allowing concept names to be prefixed by a name\n and a colon. \"\"\"\n print (f\"test_parse_query_with_repeated_concept\")\n assert_parse_tree (\n code=\"\"\"\n SELECT cohort_diagnosis:disease->diagnoses:disease\n FROM '/clinical/cohort/disease_to_chemical_exposure'\n WHERE cohort_diagnosis = 'asthma'\n AND Sex = '0'\n AND cohort = 'all_patients'\n AND max_p_value = '0.5'\n SET '$.knowledge_graph.nodes.[*].id' AS diagnoses\n \"\"\",\n expected = [\n [[\"select\", \"cohort_diagnosis:disease\",\"->\",\"diagnoses:disease\",\"\\n\"],\n \" \",\n [\"from\",\n [\"/clinical/cohort/disease_to_chemical_exposure\"]\n ],\n [\"where\",\n [\"cohort_diagnosis\",\"=\",\"asthma\"],\n \"and\",\n [\"Sex\",\"=\",\"0\"],\n \"and\",\n [\"cohort\",\"=\",\"all_patients\"],\n \"and\",\n [\"max_p_value\",\"=\",\"0.5\"]\n ],\n [\"set\",\n [\"$.knowledge_graph.nodes.[*].id\",\"as\",\"diagnoses\"]\n ]\n ]])\n\n#####################################################\n#\n# AST tests. Test abstract syntax tree components.\n#\n#####################################################\ndef test_ast_set_variable (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n \"\"\" Test setting a varaible to an explicit value. \"\"\"\n print (\"test_ast_set_variable ()\")\n tranql = TranQL ()\n tranql.resolve_names = False\n statement = SetStatement (variable=\"variable\", value=\"x\")\n statement.execute (tranql)\n assert tranql.context.resolve_arg (\"$variable\") == 'x'\ndef test_ast_set_graph (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n \"\"\" Set a variable to a graph passed as a result. \"\"\"\n print (\"test_ast_set_graph ()\")\n tranql = TranQL ()\n tranql.resolve_names = False\n statement = SetStatement (variable=\"variable\", value=None, jsonpath_query=None)\n statement.execute (tranql, context={ 'result' : { \"a\" : 1 } })\n assert tranql.context.resolve_arg (\"$variable\")['a'] == 1\ndef test_ast_set_graph (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n \"\"\" Set a variable to the value returned by executing a JSONPath query. \"\"\"\n print (\"test_ast_set_graph ()\")\n tranql = TranQL ()\n tranql.resolve_names = False\n statement = SetStatement (variable=\"variable\", value=None, jsonpath_query=\"$.nodes.[*]\")\n statement.execute (tranql, context={\n 'result' : {\n \"nodes\" : [ {\n \"id\" : \"x:y\"\n } ]\n }\n })\n assert tranql.context.resolve_arg (\"$variable\")[0]['id'] == \"x:y\"\ndef test_ast_generate_questions (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n \"\"\" Validate that\n -- named query concepts work.\n -- the question graph is build incorporating where clause constraints.\n \"\"\"\n print (\"test_ast_set_generate_questions ()\")\n app = TranQL ()\n app.resolve_names = False\n ast = app.parse (\"\"\"\n SELECT cohort_diagnosis:disease->diagnoses:disease\n FROM '/clinical/cohort/disease_to_chemical_exposure'\n WHERE cohort_diagnosis = 'MONDO:0004979' --asthma\n AND Sex = '0'\n AND cohort = 'all_patients'\n AND max_p_value = '0.5'\n SET '$.knowledge_graph.nodes.[*].id' AS diagnoses\n \"\"\")\n questions = ast.statements[0].generate_questions (app)\n assert questions[0]['question_graph']['nodes'][0]['curie'] == 'MONDO:0004979'\n assert questions[0]['question_graph']['nodes'][0]['type'] == 'disease'\ndef test_ast_format_constraints (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n \"\"\" Validate that\n -- The syntax to pass values to reasoners in the where clause (e.g. \"icees.foo = bar\") functions properly\n \"\"\"\n print(\"test_ast_format_constraints ()\")\n tranql = TranQL ()\n ast = tranql.parse (\"\"\"\n SELECT population_of_individual_organisms->chemical_substance\n FROM \"/clinical/cohort/disease_to_chemical_exposure\"\n WHERE icees.should_format = 1\n AND robokop.should_not_format = 0\n \"\"\")\n select = ast.statements[0]\n select.format_constraints(tranql)\n print(select.where)\n assert_lists_equal(select.where, [\n ['should_format', '=', 1],\n ['should_format', '=', 1],\n ['robokop.should_not_format', '=', 0],\n ['robokop.should_not_format', '=', 0]\n ])\ndef test_ast_backwards_arrow (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n print(\"test_ast_backwards_arrow ()\")\n tranql = TranQL ()\n ast = tranql.parse (\"\"\"\n SELECT gene->biological_process<-microRNA\n FROM \"/schema\"\n \"\"\")\n select = ast.statements[0]\n statements = select.plan (select.planner.plan (select.query))\n backwards_questions = statements[1].generate_questions(tranql)\n\n assert len(backwards_questions) == 1\n assert len(backwards_questions[0][\"question_graph\"][\"edges\"]) == 1\n assert backwards_questions[0][\"question_graph\"][\"edges\"][0][\"source_id\"] == \"microRNA\"\n assert backwards_questions[0][\"question_graph\"][\"edges\"][0][\"target_id\"] == \"biological_process\"\ndef test_ast_decorate_element (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n \"\"\" Validate that\n -- The SelectStatement::decorate method properly decorates both nodes and edges\n \"\"\"\n print(\"test_ast_decorate_element ()\")\n tranql = TranQL ()\n ast = tranql.parse (\"\"\"\n SELECT chemical_substance->disease\n FROM \"/graph/gamma/quick\"\n \"\"\")\n select = ast.statements[0]\n node = {\n \"id\": \"CHEBI:36314\",\n \"name\": \"glycerophosphoethanolamine\",\n \"omnicorp_article_count\": 288,\n \"type\": \"chemical_substance\"\n }\n edge = {\n \"ctime\": [\n 1544077522.7678425\n ],\n \"edge_source\": [\n \"chembio.graph_pubchem_to_ncbigene\"\n ],\n \"id\": \"df662e2842d44fa2c0b5d945044317e3\",\n \"predicate_id\": \"SIO:000203\",\n \"publications\": [\n \"PMID:16217747\"\n ],\n \"relation\": [\n \"CTD:interacts_with\"\n ],\n \"relation_label\": [\n \"interacts\"\n ],\n \"source_id\": \"CHEBI:36314\",\n \"target_id\": \"HGNC:8971\",\n \"type\": \"directly_interacts_with\",\n \"weight\": 0.4071474314830641\n }\n select.decorate(node,True,{\n \"schema\" : select.get_schema_name(tranql)\n })\n select.decorate(edge,False,{\n \"schema\" : select.get_schema_name(tranql)\n })\n\n assert_lists_equal(node[\"reasoner\"],[\"robokop\"])\n\n assert_lists_equal(edge[\"reasoner\"],[\"robokop\"])\n assert_lists_equal(edge[\"source_database\"],[\"unknown\"])\ndef test_ast_resolve_name (requests_mock):\n set_mock(requests_mock, \"resolve_name\")\n \"\"\" Validate that\n -- The SelectStatement::resolve_name method will correctly retrieve equivalent identifiers from a given name\n \"\"\"\n print(\"test_ast_resolve_name ()\")\n assert_lists_equal(SelectStatement.resolve_name(\"ibuprofen\",\"chemical_substance\"),[\n 'CHEBI:132922',\n 'CHEBI:5855',\n 'CHEBI:43415',\n 'PUBCHEM:3672',\n 'MESH:D007052',\n 'CHEBI:5855',\n 'CHEMBL:CHEMBL521']\n )\ndef test_ast_predicate_question (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n \"\"\" Validate that\n -- A query with a predicate will be properly formatted into a question graph\n \"\"\"\n print(\"test_ast_predicates ()\")\n tranql = TranQL ()\n ast = tranql.parse (\"\"\"\n SELECT chemical_substance-[treats]->disease\n FROM \"/graph/gamma/quick\"\n WHERE chemical_substance='CHEMBL:CHEMBL521'\n \"\"\")\n select = ast.statements[0]\n question = select.generate_questions(tranql)[0][\"question_graph\"]\n\n assert len(question[\"edges\"]) == 1\n\n assert \"type\" in question[\"edges\"][0]\n assert question[\"edges\"][0][\"type\"] == \"treats\"\ndef test_ast_multiple_reasoners (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n \"\"\" Validate that\n -- A query spanning multiple reasoners will query multiple reasoners.\n -- A transitions that multiple reasoners support will query each reasoner that supports it.\n \"\"\"\n print(\"test_ast_multiple_reasoners ()\")\n tranql = TranQL ()\n ast = tranql.parse (\"\"\"\n SELECT chemical_substance->disease->gene\n FROM \"/schema\"\n \"\"\")\n # RTX and Robokop both support transitions between chemical_substance->disease and only Robokop supports transitions between disease->gene\n select = ast.statements[0]\n statements = select.plan (select.planner.plan (select.query))\n assert_lists_equal(statements[0].query.order,['chemical_substance','disease'])\n assert statements[0].get_schema_name(tranql) == \"robokop\"\n\n assert_lists_equal(statements[1].query.order,['chemical_substance','disease'])\n assert statements[1].get_schema_name(tranql) == \"rtx\"\n\n assert_lists_equal(statements[2].query.order,['disease','gene'])\n assert statements[2].get_schema_name(tranql) == \"robokop\"\ndef test_ast_merge_knowledge_maps (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n tranql = TranQL ()\n tranql.asynchronous = False\n tranql.resolve_names = False\n ast = tranql.parse (\"\"\"\n select chemical_substance->disease->gene\n from \"/schema\"\n where chemical_substance=\"CHEMBL:CHEMBL3\"\n \"\"\")\n\n # select = ast.statements[0]\n # statements = select.plan (select.planner.plan (select.query))\n # print(statements[0].query.order)\n\n # (select.execute_plan(tranql))\n\n responses = [\n {\n 'knowledge_map' : [\n {\n 'node_bindings' : {\n 'chemical_substance' : 'CHEBI:100',\n 'disease' : 'MONDO:50'\n },\n 'edge_bindings' : {\n 'e0' : 'ROOT_EDGE'\n }\n }\n ],\n 'question_order' : ['chemical_substance','disease']\n },\n {\n 'knowledge_map' : [\n {\n 'node_bindings' : {\n 'disease' : 'MONDO:50',\n 'gene' : 'HGNC:1',\n 'metabolite' : 'KEGG:C00017'\n },\n 'edge_bindings' : {\n 'e1' : 'TEST_EDGE'\n }\n }\n ],\n 'question_order' : ['disease','gene','metabolite']\n },\n {\n 'knowledge_map' : [\n {\n 'node_bindings' : {\n 'disease' : 'MONDO:50',\n 'gene' : 'HGNC:1',\n 'metabolite' : 'KEGG:FOOBAR'\n },\n 'edge_bindings' : {\n\n }\n }\n ],\n 'question_order' : ['disease','gene','metabolite']\n },\n {\n 'knowledge_map' : [\n {\n 'node_bindings' : {\n 'metabolite' : 'KEGG:FOOBAR',\n 'protein' : 'UniProtKB:TESTING'\n },\n 'edge_bindings' : {\n\n }\n }\n ],\n 'question_order' : ['metabolite','protein']\n },\n {\n 'knowledge_map' : [\n {\n 'node_bindings' : {\n 'metabolite' : 'KEGG:C00017',\n 'protein' : 'UniProtKB:Q9NZJ5'\n },\n 'edge_bindings' : {\n\n }\n }\n ],\n 'question_order' : ['metabolite','protein']\n }\n ]\n\n merged = SelectStatement.connect_knowledge_maps(responses,[\n 'chemical_substance',\n 'disease',\n 'gene',\n 'metabolite',\n 'protein'\n ])\n\n assert_lists_equal(ordered(merged), ordered([\n {\n \"node_bindings\" : {\n \"chemical_substance\" : \"CHEBI:100\",\n \"disease\" : \"MONDO:50\",\n \"gene\" : \"HGNC:1\",\n \"metabolite\" : \"KEGG:FOOBAR\",\n \"protein\" : \"UniProtKB:TESTING\"\n },\n \"edge_bindings\" : {\n \"e0\" : \"ROOT_EDGE\"\n }\n },\n {\n \"node_bindings\" : {\n \"chemical_substance\" : \"CHEBI:100\",\n \"disease\" : \"MONDO:50\",\n \"gene\" : \"HGNC:1\",\n \"metabolite\" : \"KEGG:C00017\",\n \"protein\" : \"UniProtKB:Q9NZJ5\"\n },\n \"edge_bindings\" : {\n \"e0\" : \"ROOT_EDGE\",\n \"e1\" : \"TEST_EDGE\",\n }\n }\n ]))\n\n # print(json.dumps(merged,indent=2))\n\ndef test_ast_merge_results (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n \"\"\" Validate that\n -- Results from the query plan are being merged together correctly\n \"\"\"\n print(\"test_ast_merge_answers ()\")\n tranql = TranQL ()\n tranql.resolve_names = False\n ast = tranql.parse (\"\"\"\n SELECT cohort_diagnosis:disease->diagnoses:disease\n FROM '/clinical/cohort/disease_to_chemical_exposure'\n WHERE cohort_diagnosis = 'MONDO:0004979' --asthma\n AND Sex = '0'\n AND cohort = 'all_patients'\n AND max_p_value = '0.5'\n SET '$.knowledge_graph.nodes.[*].id' AS diagnoses\n \"\"\")\n\n select = ast.statements[0]\n\n # What is the proper format for the name of a mock file? This should be made into one\n mock_responses = [\n {\n 'knowledge_graph': {\n 'nodes': [\n {'id': 'CHEBI:28177', 'type': 'chemical_substance'},\n {'id': 'HGNC:2597', 'type': 'gene'},\n {\n 'id': 'egg',\n 'name':'test_name_merge',\n 'type': 'foo_type',\n 'test_attr': ['a','b']\n },\n {\n 'id': 'equivalent_identifier_merge',\n 'equivalent_identifiers': ['TEST:00000'],\n 'merged_property': [\n 'a',\n 'b'\n ]\n }\n ],\n 'edges': [\n {'id': 'e0', 'source_id': 'CHEBI:28177', 'target_id': 'HGNC:2597'},\n {\n # Test if edges that are connected to merged nodes will be successfully merged with other duplicate edges\n 'source_id' : 'CHEBI:28177',\n 'target_id' : 'egg',\n 'type': ['merge_this'],\n 'merge_this_list' : ['edge_1'],\n 'unique_attr_e_1' : 'e_1',\n 'id' : 'winning_edge_id'\n },\n ]\n },\n 'knowledge_map': [\n {\n 'node_bindings': {\n 'chemical_substance': 'CHEBI:28177',\n 'gene': 'HGNC:2597'\n },\n 'edge_bindings': {}\n }\n ]\n },\n {\n 'knowledge_graph': {\n 'nodes': [\n {'id': 'CHEBI:28177', 'type': 'chemical_substance'},\n {\n 'id': 'also_test_array_type_and_string_type_merge',\n 'name':'test_name_merge',\n 'type': ['foo_type','bar_type'],\n 'test_attr': ['a','c']\n },\n {'id': 'TEST:00000', 'type': 'test', 'merged_property': ['a','c']},\n ],\n 'edges': [\n {'id': 'e0', 'source_id': 'CHEBI:28177', 'target_id': 'TEST:00000'},\n {\n 'source_id' : 'CHEBI:28177',\n 'target_id' : 'also_test_array_type_and_string_type_merge',\n 'type': ['merge_this'],\n 'merge_this_list' : ['edge_2'],\n 'unique_attr_e_2' : 'e_2'\n }\n ]\n },\n 'knowledge_map': [\n {\n 'node_bindings': {\n 'chemical_substance': 'CHEBI:28177',\n 'test': 'TEST:00000'\n },\n 'edge_bindings': {}\n }\n ]\n }\n ]\n\n expected_result = {\n \"knowledge_graph\": {\n \"edges\": [\n {\n \"id\": \"e0\",\n \"source_id\": \"CHEBI:28177\",\n \"target_id\": \"HGNC:2597\",\n \"type\": []\n },\n {\n \"id\": \"e0\",\n \"source_id\": \"CHEBI:28177\",\n \"target_id\": \"equivalent_identifier_merge\",\n \"type\": []\n },\n {\n \"id\" : \"winning_edge_id\",\n \"source_id\" : \"CHEBI:28177\",\n \"target_id\" : \"egg\",\n \"type\" : [\"merge_this\"],\n \"merge_this_list\" : [\"edge_1\", \"edge_2\"],\n \"unique_attr_e_1\" : \"e_1\",\n \"unique_attr_e_2\" : \"e_2\"\n }\n ],\n \"nodes\": [\n {\n \"equivalent_identifiers\": [\n \"CHEBI:28177\"\n ],\n \"id\": \"CHEBI:28177\",\n \"type\": [\"chemical_substance\"]\n },\n {\n \"equivalent_identifiers\": [\n \"HGNC:2597\"\n ],\n \"id\": \"HGNC:2597\",\n \"type\": [\"gene\"]\n },\n {\n \"equivalent_identifiers\": [\n \"also_test_array_type_and_string_type_merge\",\n \"egg\"\n ],\n \"type\": [\n \"foo_type\",\n \"bar_type\"\n ],\n \"id\": \"egg\",\n \"name\": \"test_name_merge\",\n \"test_attr\": [\n \"a\",\n \"b\",\n \"c\"\n ]\n },\n {\n \"equivalent_identifiers\": [\n \"TEST:00000\",\n \"equivalent_identifier_merge\"\n ],\n \"merged_property\": [\"a\", \"b\", \"c\"],\n \"id\": \"equivalent_identifier_merge\",\n \"type\": [\"test\"]\n }\n ]\n },\n \"knowledge_map\": [\n {\n \"edge_bindings\": {},\n \"node_bindings\": {\n \"chemical_substance\": \"CHEBI:28177\",\n \"gene\": \"HGNC:2597\"\n }\n },\n {\n \"edge_bindings\": {},\n \"node_bindings\": {\n \"chemical_substance\": \"CHEBI:28177\",\n \"test\": \"equivalent_identifier_merge\"\n }\n }\n ],\n 'question_graph': {\n 'edges': [\n {\n 'id': 'foo',\n 'type': 'test'\n }\n ],\n 'nodes': [\n {\n 'id': 'bar',\n 'type': 'bartest'\n }\n ]\n }\n }\n merged_results = select.merge_results (\n mock_responses,\n tranql,\n {\n 'edges': [\n {\n 'id': 'foo',\n 'type': 'test'\n }\n ],\n 'nodes': [\n {\n 'id': 'bar',\n 'type': 'bartest'\n }\n ]\n },\n root_order=None\n )\n assert ordered(merged_results) == ordered(expected_result)\ndef test_ast_plan_strategy (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n print (\"test_ast_plan_strategy ()\")\n tranql = TranQL ()\n tranql.resolve_names = False\n # QueryPlanStrategy always uses /schema regardless of the `FROM` clause.\n ast = tranql.parse (\"\"\"\n SELECT cohort_diagnosis:disease->diagnoses:disease\n FROM '/schema'\n WHERE cohort_diagnosis = 'MONDO:0004979' --asthma\n AND Sex = '0'\n AND cohort = 'all_patients'\n AND max_p_value = '0.5'\n SET '$.knowledge_graph.nodes.[*].id' AS diagnoses\n \"\"\")\n\n select = ast.statements[0]\n plan = select.planner.plan (select.query)\n\n # Assert that it has planned to query both gamma and rtx\n assert (\n (plan[0][1] == \"/graph/gamma/quick\" and plan[1][1] == \"/graph/rtx\") or\n (plan[1][1] == \"/graph/rtx\" and plan[1][1] == \"/graph/gamma/quick\")\n )\n # Both should be querying the same thing (disease->diseasee), differing only in the sub_schema that they are querying\n for sub_schema_plan in plan:\n assert sub_schema_plan[2][0][0].type_name == \"disease\"\n assert sub_schema_plan[2][0][0].name == \"cohort_diagnosis\"\n assert sub_schema_plan[2][0][0].nodes == [\"MONDO:0004979\"]\n\n assert sub_schema_plan[2][0][1].direction == \"->\"\n assert sub_schema_plan[2][0][1].predicate == None\n\n assert sub_schema_plan[2][0][2].type_name == \"disease\"\n assert sub_schema_plan[2][0][2].name == \"diagnoses\"\n assert sub_schema_plan[2][0][2].nodes == []\ndef test_ast_implicit_conversion (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n tranql = TranQL ()\n ast = tranql.parse (\"\"\"\n SELECT drug_exposure->chemical_substance\n FROM '/schema'\n \"\"\")\n select = ast.statements[0]\n statements = select.plan (select.planner.plan (select.query))\n\n assert_lists_equal(statements[0].query.order,[\"drug_exposure\",\"chemical_substance\"])\n assert statements[0].get_schema_name(tranql) == \"implicit_conversion\"\n\ndef test_ast_plan_statements (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n print(\"test_ast_plan_statements ()\")\n tranql = TranQL ()\n tranql.resolve_names = False\n # QueryPlanStrategy always uses /schema regardless of the `FROM` clause.\n ast = tranql.parse (\"\"\"\n SELECT cohort_diagnosis:disease->diagnoses:disease\n FROM '/schema'\n WHERE cohort_diagnosis = 'MONDO:0004979' --asthma\n AND Sex = '0'\n AND cohort = 'all_patients'\n AND max_p_value = '0.5'\n SET '$.knowledge_graph.nodes.[*].id' AS diagnoses\n \"\"\")\n\n\n select = ast.statements[0]\n statements = select.plan (select.planner.plan (select.query))\n\n assert len(statements) == 2\n\n for statement in statements:\n assert_lists_equal(\n list(statement.query.concepts.keys()),\n [\n \"cohort_diagnosis\",\n \"diagnoses\"\n ]\n )\n\n assert statement.query.concepts['cohort_diagnosis'].nodes == [\"MONDO:0004979\"]\n assert statement.query.concepts['diagnoses'].nodes == []\n # TODO: figure out why there are duplicates generated??\n assert_lists_equal(statement.where, [\n ['cohort_diagnosis', '=', 'MONDO:0004979'],\n ['Sex', '=', '0'], ['Sex', '=', '0'],\n ['cohort', '=', 'all_patients'],\n ['cohort', '=', 'all_patients'],\n ['max_p_value', '=', '0.5'],\n ['max_p_value', '=', '0.5']\n ])\n assert statement.set_statements == []\n\n assert (\n (statements[0].service == \"/graph/gamma/quick\" and statements[1].service == \"/graph/rtx\") or\n (statements[0].service == \"/graph/rtx\" and statements[1].service == \"/graph/gamma/quick\")\n )\n\ndef test_ast_bidirectional_query (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n \"\"\" Validate that we parse and generate queries correctly for bidirectional queries. \"\"\"\n print (\"test_ast_bidirectional_query ()\")\n app = TranQL ()\n app.resolve_names = False\n disease_id = \"MONDO:0004979\"\n chemical = \"PUBCHEM:2083\"\n app.context.set (\"drug\", chemical)\n app.context.set (\"disease\", disease_id)\n mocker = MockHelper ()\n expectations = {\n \"cop.tranql\" : mocker.get_obj (\"bidirectional_question.json\")\n }\n queries = { os.path.join (os.path.dirname (__file__), \"..\", \"queries\", k) : v\n for k, v in expectations.items () }\n for program, expected_output in queries.items ():\n ast = app.parse_file (program)\n statement = ast.statements\n \"\"\" This uses an unfortunate degree of knowledge about the implementation,\n both of the AST, and of theq query. Consider alternatives. \"\"\"\n questions = ast.statements[2].generate_questions (app)\n nodes = questions[0]['question_graph']['nodes']\n edges = questions[0]['question_graph']['edges']\n node_index = { n['id'] : i for i, n in enumerate (nodes) }\n assert nodes[-1]['curie'] == disease_id\n assert nodes[0]['curie'] == chemical\n assert node_index[edges[-1]['target_id']] == node_index[edges[-1]['source_id']] - 1\n\n#####################################################\n#\n# Interpreter tests. Test the interpreter interface.\n#\n#####################################################\ndef test_interpreter_set (requests_mock):\n set_mock(requests_mock, \"workflow-5\")\n \"\"\" Test set statements by executing a few and checking values after. \"\"\"\n print (\"test_interpreter_set ()\")\n tranql = TranQL ()\n tranql.resolve_names = False\n tranql.execute (\"\"\"\n -- Test set statements.\n SET disease = 'asthma'\n SET max_p_value = '0.5'\n SET cohort = 'COHORT:22'\n SET population_density = 2\n SET icees.population_density_cluster = 'http://localhost/ICEESQuery'\n SET gamma.quick = 'http://robokop.renci.org:80/api/simple/quick/' \"\"\")\n\n variables = [ \"disease\", \"max_p_value\", \"cohort\", \"icees.population_density_cluster\", \"gamma.quick\" ]\n output = { k : tranql.context.resolve_arg (f\"${k}\") for k in variables }\n #print (f\"resolved variables --> {json.dumps(output, indent=2)}\")\n assert output['disease'] == \"asthma\"\n assert output['cohort'] == \"COHORT:22\"\n\ndef test_program (requests_mock):\n print (\"test_program ()\")\n mock_map = MockMap (requests_mock, \"workflow-5\")\n tranql = TranQL (options = {\n \"asynchronous\" : False,\n \"resolve_names\" : False\n })\n ast = tranql.execute (\"\"\"\n --\n -- Workflow 5\n --\n -- Modules 1-4: Chemical Exposures by Clinical Clusters\n -- For sub-clusters within the overall ICEES asthma cohort defined by\n -- differential population density, which chemicals are related to these\n -- clusters with a p_value less than some threshold?\n --\n -- Modules 5-*: Knowledge Graph Phenotypic Associations\n -- For chemicals produced by the first steps, what phenotypes are\n -- associated with exposure to these chemicals?\n --\n SET id_filters = \"SCTID,rxcui,CAS,SMILES,umlscui\"\n\n SELECT population_of_individual_organisms->drug_exposure\n FROM \"/clinical/cohort/disease_to_chemical_exposure\"\n WHERE EstResidentialDensity < '2'\n AND population_of_individual_organizms = 'x'\n AND cohort = 'all_patients'\n AND max_p_value = '0.1'\n SET '$.knowledge_graph.nodes.[*].id' AS chemical_exposures\n\n SELECT chemical_substance->gene->biological_process->phenotypic_feature\n FROM \"/graph/gamma/quick\"\n WHERE chemical_substance = $chemical_exposures\n SET knowledge_graph\n \"\"\")\n\n #print (f\"{ast}\")\n expos = tranql.context.resolve_arg(\"$chemical_exposures\")\n #print (f\" expos =======> {json.dumps(expos)}\")\n\n kg = tranql.context.resolve_arg(\"$knowledge_graph\")\n assert kg['knowledge_graph']['nodes'][0]['id'] == \"CHEBI:28177\"\n assert kg['knowledge_map'][0]['node_bindings']['chemical_substance'] == \"CHEBI:28177\"\n","sub_path":"tranql/tests/test_tranql.py","file_name":"test_tranql.py","file_ext":"py","file_size_in_byte":33425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"}
+{"seq_id":"613254284","text":"import logging\nfrom random import choice\n\nfrom classes.fighter import Fighter\nfrom common import global_vars as gv\nfrom common.helpers import tile_blocked_by\nfrom data.actor_data.barks_data import barks_data\nfrom gui.messages import Message, MessageType, LogLevel\n\n\nclass Monster(Fighter):\n \"\"\" base-class for all hostile mobs \"\"\"\n\n def __init__(self, x, y, name, char, color, descr, hp, nat_armor, vision, endurance, agility, unarmed_dmg, bark_types=None, loadouts=None, faction=None, ai_comp=None):\n super().__init__(x, y, name, char, color, descr, hp, nat_armor, vision, endurance, agility, unarmed_dmg, loadouts=loadouts, faction=faction, ai_comp=ai_comp)\n\n # create the bark.dictionary for the specific monster\n self.barks = None\n if bark_types is not None:\n\n barks = {}\n\n # merge the dictionaries of all types into a single dictionary\n for type in bark_types:\n d = barks_data[type]\n for key, value in d.items():\n for i in value:\n try:\n barks[key].append(i)\n except:\n barks[key] = [i]\n\n self.barks = barks\n\n def attack(self, target):\n \"\"\" basic attack function for monsters \"\"\"\n\n weapon = self.get_weapon()\n\n # if no weapon exists, use unarmed damage\n dmg_done = weapon.hit(target) if weapon else self.hit(target)\n\n if dmg_done > 0 and weapon:\n # make the target take some damage\n Message('The {0} hits you with a {1}'.format(self.name, weapon.name),\n msg_type=MessageType.INFO_BAD, log_level=LogLevel.COMBAT)\n target.take_damage(dmg_done)\n elif dmg_done > 0 and not weapon:\n Message('The {0} rends you with his claws.'.format(self.name),\n msg_type=MessageType.INFO_BAD, log_level=LogLevel.COMBAT)\n target.take_damage(dmg_done)\n elif dmg_done <= 0 and weapon:\n Message(self.name.capitalize() + ' attacks you but it has no effect!',\n msg_type=MessageType.INFO_GOOD, log_level=LogLevel.COMBAT)\n else:\n Message('The {0} pummels you with their fist without effect'.format(self.name),\n msg_type=MessageType.INFO_GOOD, log_level=LogLevel.COMBAT)\n\n # engage the target lock if possible\n if target == gv.player and gv.player.opponent is None and self.hp > 0:\n gv.player.opponent = self\n\n def move(self, dx, dy):\n \"\"\" Basic move function for monsters \"\"\"\n\n to_x, to_y = self.x + dx, self.y + dy\n\n if gv.game_map.walkable[to_x][to_y]:\n\n # check if a blocking object is in the target tile\n target = tile_blocked_by(to_x, to_y)\n\n if target is None:\n self.x += dx\n self.y += dy\n\n # if blocking object is an enemy target\n elif target is gv.player:\n self.attack(target)\n\n def bark(self,type):\n \"\"\" make some sounds \"\"\"\n if self.barks is not None:\n\n try:\n bark = choice(self.barks[type])\n except:\n logging.error('Could not find bark-type {0} in {1}.'.format(type, self.barks))\n else:\n Message('The {0} {1}'.format(self.name, bark), msg_type=MessageType.FLUFF, log_level=LogLevel.GAMEPLAY)\n #Message(random.choice(self.owner.barks), msg_type=MessageType.FLUFF, log_level=LogLevel.GAMEPLAY)","sub_path":"classes/monsters/monster.py","file_name":"monster.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"}
+{"seq_id":"396150683","text":"import pgzrun\n\nWIDTH = 300\nHEIGHT = 300\n\na = Actor('alien', pos=(100, 100))\n# a.angle = 90\n# a.pos = (120, 200)\n# a.x = 30\n# a.y = 30\n\ndef draw():\n screen.clear()\n #screen.fill((255, 255, 255))\n a.draw()\n\npgzrun.go()\n","sub_path":"common_py/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"}
+{"seq_id":"336621499","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nimport numpy as np\nfrom numpy.testing import assert_allclose\nfrom astropy.tests.helper import pytest\nfrom .. import (calculate_total_error, subtract_background,\n interpolate_masked_data)\n\nSHAPE = (5, 5)\nDATAVAL = 2.\nDATA = np.ones(SHAPE) * DATAVAL\nMASK = np.zeros_like(DATA, dtype=bool)\nMASK[2, 2] = True\nERROR = np.ones(SHAPE)\nEFFGAIN = np.ones(SHAPE) * DATAVAL\nBACKGROUND = np.ones(SHAPE)\nWRONG_SHAPE = np.ones((2, 2))\n\n\nclass TestCalculateTotalError(object):\n def test_error_shape(self):\n with pytest.raises(ValueError):\n calculate_total_error(DATA, error=WRONG_SHAPE,\n effective_gain=EFFGAIN)\n\n def test_gain_shape(self):\n with pytest.raises(ValueError):\n calculate_total_error(DATA, error=ERROR,\n effective_gain=WRONG_SHAPE)\n\n @pytest.mark.parametrize('effective_gain', (0, -1))\n def test_gain_le_zero(self, effective_gain):\n with pytest.raises(ValueError):\n calculate_total_error(DATA, error=ERROR,\n effective_gain=effective_gain)\n\n def test_gain_scalar(self):\n error_tot = calculate_total_error(DATA, error=ERROR,\n effective_gain=2.)\n assert_allclose(error_tot, np.sqrt(2.) * ERROR)\n\n def test_gain_array(self):\n error_tot = calculate_total_error(DATA, error=ERROR,\n effective_gain=EFFGAIN)\n assert_allclose(error_tot, np.sqrt(2.) * ERROR)\n\n\nclass TestSubtractBackground(object):\n def test_background_shape(self):\n with pytest.raises(ValueError):\n subtract_background(DATA, WRONG_SHAPE)\n\n def test_background_scalar(self):\n data, background = subtract_background(DATA, 1.)\n assert_allclose(data, DATA - 1.)\n assert_allclose(background, BACKGROUND)\n\n def test_background_array(self):\n data, background = subtract_background(DATA, BACKGROUND)\n assert_allclose(data, DATA - 1.)\n assert_allclose(background, BACKGROUND)\n\n\nclass TestInterpolateMaskedData(object):\n def test_mask_shape(self):\n with pytest.raises(ValueError):\n interpolate_masked_data(DATA, WRONG_SHAPE)\n\n def test_error_shape(self):\n with pytest.raises(ValueError):\n interpolate_masked_data(DATA, MASK, error=WRONG_SHAPE)\n\n def test_background_shape(self):\n with pytest.raises(ValueError):\n interpolate_masked_data(DATA, MASK, background=WRONG_SHAPE)\n\n def test_interpolation(self):\n data2 = DATA.copy()\n data2[2, 2] = 100.\n error2 = ERROR.copy()\n error2[2, 2] = 100.\n background2 = BACKGROUND.copy()\n background2[2, 2] = 100.\n data, error, background = interpolate_masked_data(\n data2, MASK, error=error2, background=background2)\n assert_allclose(data, DATA)\n assert_allclose(error, ERROR)\n assert_allclose(background, BACKGROUND)\n\n def test_interpolation_larger_mask(self):\n data2 = DATA.copy()\n data2[2, 2] = 100.\n error2 = ERROR.copy()\n error2[2, 2] = 100.\n background2 = BACKGROUND.copy()\n background2[2, 2] = 100.\n mask2 = MASK.copy()\n mask2[1:4, 1:4] = True\n data, error, background = interpolate_masked_data(\n data2, MASK, error=error2, background=background2)\n assert_allclose(data, DATA)\n assert_allclose(error, ERROR)\n assert_allclose(background, BACKGROUND)\n","sub_path":"photutils/utils/tests/test_prepare_data.py","file_name":"test_prepare_data.py","file_ext":"py","file_size_in_byte":3721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"}
+{"seq_id":"252324986","text":"import minqlx\n\nimport requests\nimport threading\n\n\"\"\"\nPlugin that restricts playing on the server to certain QLStats.net privacy settings.\n\nUses:\n- qlx_qlstatsPrivacyKick (default: 0), set to 1 to kick any clients with unallowed privacy settings upon connect.\n- qlx_qlstatsPrivacyWhitelist (default: \"public, anoynmous, private, untracked\")\n List of allowed privacy settings on this server. Take out any value from the default expansive list.\n- qlx_qlstatsPrivacyJoinAttempts (default: 5), amount of join attempts before the player gets kicked,\n if privacyKick is disabled. Set to -1 to disable kicking of players for their join attempts.\n\"\"\"\n\nCOLORED_QLSTATS_INSTRUCTIONS = \"Error: Open qlstats.net, click Login/Sign-up, set privacy settings to ^6{}^7, \" \\\n \"click save and reconnect!\"\n\n\nclass qlstats_privacy_policy(minqlx.Plugin):\n\n def __init__(self):\n super().__init__()\n self.set_cvar_once(\"qlx_qlstatsPrivacyBlock\", \"1\")\n self.set_cvar_once(\"qlx_qlstatsPrivacyKick\", \"0\")\n self.set_cvar_once(\"qlx_qlstatsPrivacyWhitelist\", \"public, anonymous, private, untracked\")\n self.set_cvar_once(\"qlx_qlstatsPrivacyJoinAttempts\", \"5\")\n\n self.plugin_enabled = True\n self.kick_players = self.get_cvar(\"qlx_qlstatsPrivacyKick\", bool)\n self.allowed_privacy = self.get_cvar(\"qlx_qlstatsPrivacyWhitelist\", list)\n self.max_num_join_attempts = self.get_cvar(\"qlx_qlstatsPrivacyJoinAttempts\", int)\n\n self.exceptions = set()\n self.join_attempts = dict()\n\n # Collection of threads looking up elo of players {steam_id: thread }\n self.connectthreads = {}\n\n self.add_hook(\"player_connect\", self.handle_player_connect, priority=minqlx.PRI_HIGHEST)\n self.add_hook(\"player_disconnect\", self.handle_player_disconnect)\n self.add_hook(\"team_switch_attempt\", self.handle_team_switch_attempt)\n\n self.add_command((\"except\", \"e\"), self.cmd_policy_exception, permission=5, usage=\"\")\n self.add_command(\"privacy\", self.cmd_switch_plugin, permission=1, usage=\"[status]\")\n\n def check_balance_plugin_loaded(self):\n return 'balance' in self.plugins\n\n def check_for_right_version_of_balance_plugin(self):\n return hasattr(self.plugins[\"balance\"], \"player_info\")\n\n def check_for_correct_balance_plugin(self):\n if not self.check_balance_plugin_loaded():\n self.logger.info(\"Balance plugin not loaded. \"\n \"This plugin just works with the balance plugin in place.\")\n return False\n\n if not self.check_for_right_version_of_balance_plugin():\n self.logger.info(\"Wrong version of the ^6balance^7 plugin loaded. Make sure to load \"\n \"https://github.com/MinoMino/minqlx-plugins/blob/master/balance.py.\")\n return False\n\n return True\n\n def handle_player_connect(self, player):\n if not self.plugin_enabled:\n return\n\n if not self.game:\n return\n\n if not self.check_for_correct_balance_plugin():\n self.disable_policy_check(minqlx.CHAT_CHANNEL)\n return\n\n b = minqlx.Plugin._loaded_plugins['balance']\n b.add_request({player.steam_id: self.game.type_short}, self.callback_connect, minqlx.CHAT_CHANNEL)\n\n if not self.get_cvar(\"qlx_qlstatsPrivacyBlock\", bool):\n return\n\n if player.steam_id not in self.connectthreads:\n ct = ConnectThread(player.steam_id, self.get_cvar(\"qlx_balanceApi\"))\n self.connectthreads[player.steam_id] = ct\n ct.start()\n self.remove_thread(player.steam_id) # remove it after a while\n\n # Check if thread is ready or not\n ct = self.connectthreads[player.steam_id]\n if ct.isAlive():\n return \"Fetching your qlstats settings...\"\n\n # Check if thread is ready or not\n try:\n res = ct._result\n if not res:\n return \"Fetching your qlstats settings...\"\n\n if res.status_code != requests.codes.ok:\n raise IOError(\"Invalid response code from qlstats.net.\")\n self.logger.debug(res.text)\n js = res.json()\n\n if \"playerinfo\" not in js:\n raise IOError(\"Invalid response content from qlstats.net.\")\n\n if str(player.steam_id) not in js[\"playerinfo\"]:\n raise IOError(\"Response from qlstats.net did not include data for the requested player.\")\n\n if \"privacy\" not in js[\"playerinfo\"][str(player.steam_id)]:\n raise IOError(\"Response from qlstats.net did not include privacy information.\")\n\n if js[\"playerinfo\"][str(player.steam_id)][\"privacy\"] not in self.allowed_privacy:\n return minqlx.Plugin.clean_text(self.colored_qlstats_instructions())\n\n except Exception as e:\n minqlx.console_command(\"echo QLStatsPrivacyError: {}\".format(e))\n\n def callback_connect(self, players, channel):\n if not self.plugin_enabled:\n return\n\n if not self.kick_players:\n return\n\n player_info = self.plugins[\"balance\"].player_info\n\n for sid in players:\n if sid in self.exceptions:\n continue\n\n if sid not in player_info:\n continue\n\n if player_info[sid][\"privacy\"] not in self.allowed_privacy:\n self.delayed_kick(sid, minqlx.Plugin.clean_text(self.colored_qlstats_instructions()))\n\n def colored_qlstats_instructions(self):\n return COLORED_QLSTATS_INSTRUCTIONS.format(\"^7, ^6\".join(self.allowed_privacy))\n\n @minqlx.delay(5)\n def delayed_kick(self, sid, reason):\n self.kick(sid, reason)\n\n def handle_player_disconnect(self, player, reason):\n if player.steam_id in self.exceptions:\n self.exceptions.remove(player.steam_id)\n\n if player.steam_id in self.join_attempts:\n del self.join_attempts[player.steam_id]\n\n def handle_team_switch_attempt(self, player, old, new):\n if not self.plugin_enabled:\n return\n\n if not self.game:\n return\n\n if player.steam_id in self.exceptions:\n return\n\n if not self.check_for_correct_balance_plugin():\n self.disable_policy_check(minqlx.CHAT_CHANNEL)\n return\n\n if new in [\"red\", \"blue\", \"any\"]:\n player_info = self.plugins[\"balance\"].player_info\n if player.steam_id not in player_info:\n player.tell(\"We couldn't fetch your ratings, yet. You will not be able to join, until we did.\")\n return minqlx.RET_STOP_ALL\n if player_info[player.steam_id][\"privacy\"] not in self.allowed_privacy:\n if self.max_num_join_attempts > 0:\n if player.steam_id not in self.join_attempts:\n self.join_attempts[player.steam_id] = self.max_num_join_attempts\n\n self.join_attempts[player.steam_id] -= 1\n\n if self.join_attempts[player.steam_id] < 0:\n player.kick(minqlx.Plugin.clean_text(self.colored_qlstats_instructions()))\n return minqlx.RET_STOP_ALL\n self.msg(\"{}^7 not allowed to join due to {} QLStats.net privacy settings. \"\n \"{} join attempts before automatically kicking you.\"\n .format(player.name, player_info[player.steam_id][\"privacy\"].lower(),\n self.join_attempts[player.steam_id]))\n player.tell(\"Not allowed to join due to ^6{}1^7 QLStats.net data. \"\n \"{} join attempts before automatically kicking you.\"\n .format(player_info[player.steam_id][\"privacy\"].lower(),\n self.join_attempts[player.steam_id]))\n else:\n self.msg(\"{}^7 not allowed to join due to {} QLStats.net privacy settings. \"\n .format(player.name, player_info[player.steam_id][\"privacy\"].lower()))\n player.tell(\"Not allowed to join due to ^6{}1^7 QLStats.net data. \"\n .format(player_info[player.steam_id][\"privacy\"].lower()))\n\n player.center_print(\"^3Join not allowed. See instructions in console!\")\n player.tell(self.colored_qlstats_instructions())\n\n if old in [\"spectator\", \"free\"]:\n return minqlx.RET_STOP_ALL\n\n player.put(\"spectator\")\n\n def cmd_policy_exception(self, player, msg, channel):\n if len(msg) != 2:\n return minqlx.RET_USAGE\n\n teams = self.teams()\n speccing_players = teams[\"spectator\"] + teams[\"free\"]\n except_player = self.find_player(msg[1], speccing_players)\n\n if except_player is None or len(except_player) == 0:\n player.tell(\"^7Could not find player identified by ^1{}^7.\".format(msg[1]))\n return\n\n if len(except_player) > 1:\n player.tell(\"^7More than one matching spectator found: {}\"\n .format(\"^7, \".join([player.name for player in except_player])))\n player.tell(\"^7Please be more specific which one to put on the exception list!\")\n return\n\n channel.reply(\"^7An admin has allowed ^2{}^7 to temporarily join despite missing or \"\n \"inadequate qlstats privacy information.\"\n .format(except_player[0].clean_name))\n self.exceptions.add(except_player[0].steam_id)\n\n def cmd_switch_plugin(self, player, msg, channel):\n if len(msg) > 2:\n return minqlx.RET_USAGE\n\n if len(msg) == 2:\n if msg[1] != \"status\":\n return minqlx.RET_USAGE\n\n channel.reply(\"^7QLStats policy check is {}\".format(\"enabled\" if self.plugin_enabled else \"disabled\"))\n return\n\n if not self.plugin_enabled:\n self.enable_policy_check(channel)\n return\n\n self.disable_policy_check(channel)\n\n def disable_policy_check(self, channel):\n self.plugin_enabled = False\n channel.reply(\"^7QLStats policy check disabled. Everyone will be able to join.\")\n\n def enable_policy_check(self, channel):\n if not self.check_for_correct_balance_plugin():\n return\n\n self.plugin_enabled = True\n channel.reply(\"^7QLStats policy check enabled.\")\n\n if self.kick_players:\n self.callback_connect(\n {player.steam_id: self.game.type_short for player in self.players()}, channel)\n return\n\n teams = self.teams()\n player_info = self.plugins[\"balance\"].player_info\n\n for player in teams[\"red\"] + teams[\"blue\"]:\n if player.steam_id not in player_info:\n player.tell(\"We couldn't fetch your ratings, yet. You will not be able to play, until we did.\")\n player.put(\"spectator\")\n continue\n\n if player_info[player.steam_id][\"privacy\"] not in self.allowed_privacy:\n self.msg(\"{}^7 not allowed to join due to {} QLStats.net privacy settings.\"\n .format(player.name, player_info[player.steam_id][\"privacy\"].lower()))\n player.center_print(\"^3Join not allowed. See instructions in console!\")\n player.tell(\"Not allowed to join due to ^6{}1 7 QLStats.net data.\"\n .format(player_info[player.steam_id][\"privacy\"].lower()))\n player.tell(self.colored_qlstats_instructions())\n player.put(\"spectator\")\n\n @minqlx.delay(30) # 30 seconds\n def remove_thread(self, sid):\n if sid in self.connectthreads:\n del self.connectthreads[sid]\n\n\nclass ConnectThread(threading.Thread):\n\n def __init__(self, steam_id, balance_api):\n super(ConnectThread, self).__init__()\n self._balance_api = balance_api\n self._steam_id = steam_id\n self._result = None\n\n def run(self):\n url = \"http://qlstats.net/{elo}/{}\".format(self._steam_id, elo=self._balance_api)\n self._result = requests.get(url)\n","sub_path":"src/main/python/qlstats_privacy_policy.py","file_name":"qlstats_privacy_policy.py","file_ext":"py","file_size_in_byte":12291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"}
+{"seq_id":"595870762","text":"import logging\n\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.db import connection\n\ntry:\n from django.apps import apps\n\n get_model = apps.get_model\nexcept ImportError:\n from django.db.models.loading import get_model\n\nfrom django.http import HttpResponse\n\ntry:\n model = get_model(settings.HEALTH_MODEL)\nexcept Exception as e:\n raise ImproperlyConfigured(\n f\"settings.HEALTH_MODEL doesn't resolve to a useable model {str(e)}\"\n )\n\n\nlog = logging.getLogger(__name__)\n\n\ndef health(request):\n # check debug\n if settings.DEBUG:\n log.exception(\"Debug mode not allowed in production\")\n return HttpResponse(\n \"Debug mode not allowed in production\",\n content_type=\"text/plain\",\n status=500,\n )\n\n # check database\n try:\n with connection.cursor() as cursor:\n cursor.execute(\"select 1\")\n assert cursor.fetchone()\n except Exception as e:\n log.exception(f\"Database connectivity failed: {str(e)}\")\n return HttpResponse(\n \"Database connectivity failed\", content_type=\"text/plain\", status=500\n )\n\n return HttpResponse(\"Connectivity OK\", content_type=\"text/plain\", status=200)\n\n\ndef check_data(request):\n if model.objects.all().count() < 30000:\n return HttpResponse(\n \"Too few tellus data in the database\", content_type=\"text/plain\", status=500\n )\n return HttpResponse(\"Database data OK\", content_type=\"text/plain\", status=200)\n","sub_path":"src/health/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"469869297","text":"# server_skel.py\r\nimport socket\r\nimport time\r\nimport threading\r\nimport random\r\n\r\nserver_ip = \"127.0.0.1\"\r\nserver_port = 5005\r\n\r\n# class for thread\r\nclass serverThread (threading.Thread):\r\n\tAVERAGE_DELAY = 100 # ms\r\n\r\n\t# constructor\r\n\tdef __init__(self, data, addr):\r\n\t\tthreading.Thread.__init__(self)\r\n\t\tself.data = data\r\n\t\tself.addr = addr\r\n\r\n\t# method for handling incoming packets\r\n\tdef run(self):\r\n\t\tprint (\"Server: recv \\\"\" + data.decode('utf-8') + \"\\\"\") # receive message\r\n\t\tdropRate = random.uniform(0, 0.4) # unifrom distribution with an average of 0.2\r\n\t\tif (random.random() < dropRate): # drop packet with probability dropRate\r\n\t\t\tprint (\"Server: drop \\\"\" + data.decode('utf-8') + \"\\\"\") # drop message\r\n\t\telse:\r\n\t\t\tdelay = random.expovariate(1/self.AVERAGE_DELAY) # exponential distribution with an average of AVERAGE_DELAY\r\n\t\t\ttime.sleep(delay*0.001) # sleep for delay ms\r\n\t\t\tsock.sendto(self.data, self.addr) # send datagram to client\r\n\t\t\tprint (\"Server: reply \\\"\" + self.data.decode('utf-8') + \"\\\"\") # reply message\r\n\r\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # open UDP socket\r\nsock.bind((server_ip, server_port)) # bind socket\r\n\r\nwhile True:\r\n\tdata, addr = sock.recvfrom(1024) # receive incoming packets\r\n\tserverThread(data, addr).start() # create and start new thread\r\n","sub_path":"server_skel_optional.py","file_name":"server_skel_optional.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"20850401","text":"import os.path\nimport sqlite3\nimport unittest\n\nDATABASE = os.path.join(os.path.dirname(__file__), 'data/test.db')\n\n\nclass TablesMatchTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.conn = sqlite3.connect(DATABASE)\n\n @classmethod\n def tearDownClass(cls):\n cls.conn.close()\n\n def setUp(self):\n self.cursor = self.conn.cursor()\n self.addCleanup(self.cursor.close)\n\n def test_data_match(self):\n \"\"\"Test that same data is stored for same ids in both tables.\"\"\"\n query = \"\"\"\n SELECT COUNT(*) FROM (\n SELECT * FROM (\n SELECT * FROM consultants_backend\n UNION\n SELECT * FROM consultants_frontend)\n GROUP BY id HAVING COUNT(*) > 1);\n \"\"\"\n self.cursor.execute(query)\n rows = self.cursor.fetchone()\n non_matching = rows[0]\n self.assertEqual(\n non_matching, 0,\n \"The number of entries with different data - {}\".format(non_matching))\n\n def test_count_match(self):\n \"\"\"Test that the number of records is the same.\"\"\"\n query = \"\"\"\n SELECT COUNT(*) FROM consultants_backend\n UNION ALL\n SELECT COUNT(*) FROM consultants_frontend;\"\"\"\n self.cursor.execute(query)\n rows = self.cursor.fetchall()\n backend_count, frontend_count = [row[0] for row in rows]\n self.assertEqual(\n backend_count, frontend_count,\n 'The number of entries in consultants_backend (count {}) and consultants_frontend '\n '(count {}) tables do not match'.format(backend_count, frontend_count))\n\n def test_id_match(self):\n \"\"\"Test that ids are the same in both tables.\"\"\"\n query = \"\"\"\n SELECT COUNT(*) FROM (\n SELECT * FROM (\n SELECT id FROM consultants_backend\n UNION ALL\n SELECT id FROM consultants_frontend )\n GROUP BY id HAVING COUNT(*) = 1);\n \"\"\"\n self.cursor.execute(query)\n rows = self.cursor.fetchone()\n id_diff_num = rows[0]\n self.assertEqual(\n id_diff_num, 0,\n \"The number of different ids - {}\".format(id_diff_num))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tables_test.py","file_name":"tables_test.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"485092469","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nfrom sys import stdout\nfrom twisted.internet import interfaces, reactor, task, defer, protocol\nfrom twisted.internet.protocol import Factory, Protocol\nfrom twisted.internet.endpoints import TCP4ServerEndpoint\nimport construct as c2\nimport simple_message as sm\nfrom twisted.protocols.basic import LineReceiver\n\n\n\nclass feedbackPublisher(Protocol):\n\n def __init__(self):\n self.lc = task.LoopingCall(self.FeedbackMessage) #add both feedback and status as loops to a connection\n self.lc1 = task.LoopingCall(self.StatusMessage)\n self.data = {}\n self.drives_powered = 0\n self.e_stopped = 0\n self.error_code = 0\n self.in_error = 0\n self.in_motion = 0\n self.mode = 0\n self.motion_possible = 0\n\n def connectionMade(self):\n print('Connection made from {}'.format(self.transport.getPeer()))\n self.lc.start(0.5) #Start loops on intervals\n self.lc1.start(3.0)\n print(\"starting feedback\")\n\n def connectionLost(self, reason):\n print('Connection lost from {}'.format(self.transport.getPeer()))\n self.lc.stop() #Stop loops on disconnect\n self.lc1.stop()\n print(\"Stopping feedback\")\n\n def dataReceived(self, data):\n print(\"connect\")\n print(data)\n\n def FeedbackMessage(self):\n #create a feedback message we populate and send\n joint_1 = 0.0\n joint_2 = 0.0\n joint_3 = 0.0\n joint_4 = 0.0\n joint_5 = 0.0\n joint_6 = 0.0\n\n SimpleMessage = c2.Struct(\n 'Header' / c2.Struct(\n 'msg_type' / c2.Int32sl,\n 'comm_type' / c2.Int32sl,\n 'reply_type' / c2.Int32sl,\n ),\n 'body' / c2.Struct(\n 'seq_nr' / c2.Int32sl ,\n 'joint_data'/ c2.Float32b[10]\n ),\n c2.Terminated\n ) #packa a message.\n msg = dict(\n Header=dict(msg_type=10, comm_type=1, reply_type=0),\n body=dict(seq_nr=0,joint_data=[joint_1, joint_2, joint_3, joint_4, joint_5, joint_6,0.0,0.0,0.0,0.0]\n ))\n feedback_data = SimpleMessage.build(msg)\n #print(feedback_data)\n data_len = c2.Int32sl.build(len(feedback_data))\n #print('sending feedback')\n self.transport.write(data_len + feedback_data)\n\n def StatusMessage(self):\n #Create a statusmessage that we populate and send\n StatusMessage = c2.Struct(\n 'Header' / c2.Struct(\n 'msg_type' / c2.Int32sl,\n 'comm_type' / c2.Int32sl,\n 'reply_type' / c2.Int32sl,\n ),\n 'body' / c2.Struct(\n 'drives_powered' / c2.Int32sl,\n 'e_stopped' / c2.Int32sl,\n 'error_code' / c2.Int32sl,\n 'in_error' / c2.Int32sl,\n 'in_motion' / c2.Int32sl,\n 'mode' / c2.Int32sl,\n 'motion_possible' / c2.Int32sl\n ),\n c2.Terminated\n )\n\n self.mode = 2\n self.drives_powered = 1\n self.motion_possible = 1\n\n msg = dict(\n Header=dict(msg_type=13, comm_type=1, reply_type=0),\n body=dict(drives_powered= self.drives_powered,\n e_stopped=self.e_stopped,\n error_code= self.error_code,\n in_error=self.in_error,\n in_motion=self.in_motion,\n mode=self.mode,\n motion_possible=self.motion_possible\n ))\n status_data = StatusMessage.build(msg)\n #print(status_data)\n data_len = c2.Int32sl.build(len(status_data))\n #print('sending status')\n self.transport.write(data_len + status_data)\n\n def jointMessage(self):\n\n\n JointMessage = c2.Struct(\n 'Header' / c2.Struct(\n 'msg_type' / c2.Int32sl,\n 'comm_type' / c2.Int32sl,\n 'reply_type' / c2.Int32sl,\n ),\n 'body' / c2.Struct(\n 'SequenceNumber' / c2.Int32sl,\n 'Joint_data'/c2.Float32b[10],\n 'velocity' / c2.Int32sl,\n 'duration' / c2.Int32sl,\n ),\n c2.Terminated\n )\n\nclass feedbackfactory(Factory):\n protocol = feedbackPublisher\n","sub_path":"simple_message/feedback.py","file_name":"feedback.py","file_ext":"py","file_size_in_byte":4667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"224000425","text":"# Import\nimport numpy as np\nimport colorsys\nfrom PIL import Image, ImageDraw\nimport pandas as pd\nfrom sklearn.cluster import MeanShift\n\n\nclass FindAPool:\n\n def __init__(self):\n pass\n\n @staticmethod\n def load_image(image):\n return Image.open(\"{}\".format(image))\n\n @staticmethod\n def image_rgb(image):\n ''' Get the RGB of each pixel of the image'''\n r = [i / 255.0 for i in image.getdata(0)]\n g = [i / 255.0 for i in image.getdata(1)]\n b = [i / 255.0 for i in image.getdata(2)]\n return pd.DataFrame({'r': r, 'g': g, 'b': b})\n\n @staticmethod\n def rgb_2_hsv(dataframe):\n ''' Convert the RGB to HSV '''\n hsv = np.array([colorsys.rgb_to_hsv(x, y, z) for x, y, z\n in zip(dataframe.r, dataframe.g, dataframe.b)])\n df = pd.DataFrame(data=hsv, columns=['h', 's', 'v'])\n return df\n\n @staticmethod\n def find_pix(image, dataframe, h1, h2, s, v):\n ''' Select the pixels who fit the criteria'''\n mask = (dataframe['v'] > v / 100.0) & (dataframe['s'] > s / 100.0) & \\\n (dataframe['h'] > h2 / 360.0) & (dataframe['h'] < h1 / 360.0)\n xs, ys = image.size\n maskbool = mask.values.reshape(ys, xs)\n return maskbool, np.argwhere(maskbool)\n\n @staticmethod\n def colour_pix(image, maskbool):\n ''' Colour the good pixels in red'''\n new_image = image.copy()\n mask = Image.fromarray(np.uint8(255 * maskbool))\n new_image.paste('red', mask=mask)\n\n return new_image\n\n @staticmethod\n def cluster_pixels(piscine_locs,\n clust_algo='meanshift', quantile=0.003):\n ''' Function to group the pixels into swimming pools'''\n\n if clust_algo == 'meanshift':\n # bandwidth = estimate_bandwidth(piscine_locs, quantile=quantile)\n # if bandwidth == 0:\n # while bandwidth ==0:\n # quantile = quantile + 0.001\n # bandwidth = estimate_bandwidth(piscine_locs,\n # quantile=quantile)\n bandwidth = 6\n\n ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)\n ms.fit(piscine_locs)\n labels = ms.labels_\n cluster_centers = ms.cluster_centers_\n\n labels_unique = np.unique(labels)\n n_clusters_ = len(labels_unique)\n\n return n_clusters_, cluster_centers\n\n @staticmethod\n def draw_pisc(image, clusters):\n ''' Draw circles on'''\n imc = image.copy()\n draw = ImageDraw.Draw(imc)\n r = 2\n for t in clusters:\n draw.ellipse((int(t[1]) - r, int(t[0]) - r, int(t[1]) + r,\n int(t[0]) + r), fill=(255, 255, 255, 255))\n\n return imc\n\n def run(self, image, h1=205, h2=140, s=20, v=65,\n clust_algo='meanshift', quantile=0.003):\n\n im = self.load_image(image)\n print(im.size)\n rgb_df = self.image_rgb(im)\n hsv_df = self.rgb_2_hsv(rgb_df)\n maskbool, piscine_locs = self.find_pix(im, hsv_df, h1, h2, s, v)\n coloredim = self.colour_pix(im, maskbool)\n\n nclusts, ccents = self.cluster_pixels(piscine_locs, clust_algo,\n quantile)\n piscim = self.draw_pisc(coloredim, ccents)\n\n return coloredim, piscim, len(piscine_locs), nclusts\n","sub_path":"src/FindAPool/findapool.py","file_name":"findapool.py","file_ext":"py","file_size_in_byte":3409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"347232391","text":"\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras import layers\r\nfrom tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply, GaussianNoise\r\nfrom tensorflow.keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D\r\nfrom tensorflow.keras.layers import Concatenate\r\nfrom tensorflow.keras.layers import LeakyReLU\r\nfrom tensorflow.keras.layers import UpSampling2D, Conv2D, Conv2DTranspose\r\nfrom tensorflow.keras.models import Sequential, Model\r\nfrom tensorflow.keras.optimizers import Adam, RMSprop\r\nfrom tensorflow.keras import losses\r\nfrom tensorflow.keras.utils import to_categorical\r\nimport tensorflow.keras.backend as K\r\nimport scipy\r\nimport logging\r\nimport matplotlib.pyplot as plt\r\nimport os\r\n\r\nimport cv2\r\nimport numpy as np\r\nimport utils.plots as plots\r\nfrom skimage.util import random_noise\r\nfrom IPython import display\r\n\r\ndef get_noisy_data(data):\r\n lst_noisy = []\r\n sigma = 0.155\r\n for image in data:\r\n noisy = random_noise(image, var=sigma ** 2)\r\n lst_noisy.append(noisy)\r\n return np.array(lst_noisy)\r\n\r\nclass ALOCC():\r\n def __init__(self, \r\n image_shape,\r\n latent_dim = 100, \r\n filters = (16, 64, 128, 256), \r\n kernel_size = 3, \r\n strides = 1, \r\n padding = \"same\",\r\n learning_rate = 0.002,\r\n r_alpha = 0.2,\r\n checkpoint_dir = None):\r\n\r\n self.image_shape = image_shape\r\n self.latent_dim = latent_dim\r\n self.filters = filters\r\n self.kernel_size = kernel_size\r\n self.strides = strides\r\n self.padding = padding\r\n self.learning_rate = learning_rate\r\n\r\n self.r_alpha = r_alpha\r\n self.checkpoint_dir = checkpoint_dir\r\n\r\n self.best_se = -1\r\n self.best_eval_loss = -1\r\n self.best_mse = -1\r\n self.__build_model() \r\n\r\n def __build_model(self):\r\n image_dims = self.image_shape\r\n optimizer = RMSprop(lr=self.learning_rate, clipvalue=1.0, decay=1e-8)\r\n self.discriminator = self.__build_discriminator(image_dims)\r\n self.discriminator.compile(optimizer=optimizer, loss='binary_crossentropy')\r\n self.generator = self.__build_generator(image_dims)\r\n img = Input(shape=image_dims)\r\n reconstructed_img = self.generator(img)\r\n self.discriminator.trainable = False\r\n validity = self.discriminator(reconstructed_img)\r\n self.adversarial_model = Model(img, [reconstructed_img, validity])\r\n self.adversarial_model.compile(loss=['binary_crossentropy', 'binary_crossentropy'],\r\n loss_weights=[self.r_alpha, 1],\r\n optimizer=optimizer)\r\n\r\n def __build_generator(self, input_shape):\r\n inputs = tf.keras.Input(shape=self.image_shape)\r\n x = inputs\r\n for f in self.filters:\r\n x = layers.Conv2D(\r\n filters=f, kernel_size=self.kernel_size, strides=self.strides, padding=self.padding\r\n )(x)\r\n #x = layers.BatchNormalization(axis=self.image_shape[2])(x)\r\n x = layers.LeakyReLU()(x)\r\n x = layers.MaxPooling2D(pool_size=2)(x)\r\n self.volume_size = tf.keras.backend.int_shape(x)\r\n x = layers.Flatten()(x)\r\n x = layers.Dense(self.latent_dim)(x)\r\n enc = Model(inputs, x, name=\"encoder\")\r\n\r\n latent_inputs = keras.Input(shape=(self.latent_dim,))\r\n x = layers.Dense(np.prod(self.volume_size[1:]))(latent_inputs)\r\n x = layers.Reshape((self.volume_size[1], self.volume_size[2], self.volume_size[3]))(x)\r\n for f in self.filters[::-1]:\r\n x = layers.UpSampling2D(size=2)(x)\r\n x = layers.Conv2DTranspose(\r\n filters=f, kernel_size=self.kernel_size, strides=self.strides, padding=self.padding\r\n )(x)\r\n #x = layers.BatchNormalization(axis=self.image_shape[2])(x)\r\n x = layers.LeakyReLU()(x)\r\n x = layers.Conv2DTranspose(filters=self.image_shape[2], kernel_size=self.kernel_size, padding=self.padding)(\r\n x\r\n )\r\n #x = layers.BatchNormalization()(x)\r\n x = layers.LeakyReLU()(x)\r\n outputs = layers.Conv2DTranspose(self.image_shape[2], 3, activation=\"sigmoid\", padding=self.padding)(x)\r\n dec = keras.Model(latent_inputs, outputs, name=\"decoder\")\r\n\r\n return Model(inputs, dec(enc(inputs)), name=\"generator\")\r\n #return Model(inputs, outputs, name=\"generator\")\r\n\r\n # def __build_discriminator(self, input_shape):\r\n # image = Input(shape=input_shape, name='d_input')\r\n # x = layers.Conv2D(filters=16, kernel_size=self.kernel_size, strides=self.strides, padding=self.padding)(image)\r\n # x = layers.BatchNormalization()(x)\r\n # x = layers.LeakyReLU()(x)\r\n # x = layers.MaxPooling2D(pool_size=2)(x)\r\n\r\n # x = layers.Conv2D(filters=32, kernel_size=self.kernel_size, strides=self.strides, padding=self.padding)(x)\r\n # x = layers.BatchNormalization()(x)\r\n # x = layers.LeakyReLU()(x)\r\n # x = layers.MaxPooling2D(pool_size=2)(x)\r\n\r\n # x = Conv2D(filters=64, kernel_size=self.kernel_size, strides=self.strides, padding=self.padding)(x)\r\n # x = layers.BatchNormalization()(x)\r\n # x = layers.LeakyReLU()(x)\r\n # x = layers.MaxPooling2D(pool_size=2)(x)\r\n\r\n # x = layers.Flatten()(x)\r\n # x = layers.Dense(1, activation='sigmoid')(x)\r\n\r\n # return Model(image, x, name='discriminator')\r\n\r\n def __build_discriminator(self, input_shape):\r\n image = Input(shape=input_shape, name='d_input')\r\n cla_model = tf.keras.applications.MobileNetV2(include_top=False, weights=None, input_shape=input_shape, pooling=\"avg\")\r\n\r\n return Model(image, cla_model(image), name='discriminator')\r\n \r\n def train(self, dataset, epochs=100, batch_size=32):\r\n sample_count = 10\r\n sample = dataset[0:sample_count]\r\n sample_inputs = np.array(sample).astype(np.float32)\r\n predict_data = np.array(sample_inputs, dtype=np.float32)\r\n\r\n plt_shape = (self.image_shape[0], self.image_shape[1])\r\n if self.image_shape[2] > 1:\r\n plt_shape = (\r\n self.image_shape[0],\r\n self.image_shape[1],\r\n self.image_shape[2],\r\n )\r\n\r\n counter = 1\r\n # Record generator/R network reconstruction training losses.\r\n plot_epochs = []\r\n plot_g_recon_losses = []\r\n plot_mse = []\r\n\r\n # Load traning data, add random noise.\r\n sample_w_noise = get_noisy_data(dataset)\r\n\r\n # Adversarial ground truths\r\n ones = np.ones((batch_size, 1))\r\n zeros = np.zeros((batch_size, 1))\r\n\r\n for epoch in range(epochs):\r\n # Number of batches computed by total number of target data / batch size.\r\n batch_idxs = len(dataset) // batch_size\r\n \r\n for idx in range(0, batch_idxs):\r\n # Get a batch of images and add random noise.\r\n batch = dataset[idx * batch_size:(idx + 1) * batch_size]\r\n batch_noise = sample_w_noise[idx * batch_size:(idx + 1) * batch_size]\r\n batch_clean = dataset[idx * batch_size:(idx + 1) * batch_size]\r\n # Turn batch images data to float32 type.\r\n batch_images = np.array(batch).astype(np.float32)\r\n batch_noise_images = np.array(batch_noise).astype(np.float32)\r\n batch_clean_images = np.array(batch_clean).astype(np.float32)\r\n batch_fake_images = self.generator.predict(batch_noise_images)\r\n # Update D network, minimize real images inputs->D-> ones, noisy z->R->D->zeros loss.\r\n d_loss_real = self.discriminator.train_on_batch(batch_images, ones)\r\n d_loss_fake = self.discriminator.train_on_batch(batch_fake_images, zeros)\r\n\r\n # Update R network twice, minimize noisy z->R->D->ones and reconstruction loss.\r\n self.adversarial_model.train_on_batch(batch_noise_images, [batch_clean_images, ones])\r\n g_loss = self.adversarial_model.train_on_batch(batch_noise_images, [batch_clean_images, ones])\r\n last_g_loss = g_loss[1]\r\n val_loss = d_loss_real+d_loss_fake\r\n plot_epochs.append(epoch+idx/batch_idxs)\r\n plot_g_recon_losses.append(g_loss[1])\r\n \r\n # Create difference and MSE\r\n mse = []\r\n predictions = self.adversarial_model.predict(\r\n predict_data, batch_size=sample_count\r\n )[0]\r\n\r\n for pred_idx in range(0, len(predictions)):\r\n pred_image = predictions[pred_idx].reshape(plt_shape)\r\n diff = cv2.absdiff(sample_inputs[pred_idx], pred_image)\r\n mse.append(np.sum(diff * diff))\r\n\r\n mse = np.mean(mse)\r\n plot_mse.append(mse)\r\n #val_loss, val_gen_loss, val_dis_loss = self.adversarial_model.evaluate(sample_inputs, [sample_inputs, ones[0:sample_count]], sample_count, verbose=0)\r\n\r\n counter += 1\r\n if self.best_mse == -1 or mse < self.best_mse or val_loss-0.5 > 0.0:\r\n #self.best_se = last_se\r\n if self.best_mse == -1 or mse < self.best_mse:\r\n self.best_mse = mse\r\n self.save_weights()\r\n display.clear_output(wait=True)\r\n plots.plot_difference([predictions[0]], [sample_inputs[0]], self.image_shape, threshold=0.0, mask_color=\"Reds_r\")\r\n\r\n if val_loss-0.5 > 0.0:\r\n self.save_weights(\"disc_best.h5\")\r\n msg = 'Epoch:[{0}] --> dis_loss: {1:>0.3f}, recon_loss:{2:>0.3f}, mse:{3:>0.3f}'.format(epoch, val_loss, g_loss[1], mse)\r\n print(msg)\r\n return plot_epochs, plot_g_recon_losses, plot_mse\r\n\r\n def save_weights(self, model_name=\"best.h5\"):\r\n if self.checkpoint_dir == None:\r\n return\r\n os.makedirs(self.checkpoint_dir, exist_ok=True)\r\n self.adversarial_model.save_weights(os.path.join(self.checkpoint_dir, model_name))\r\n \r\n def load_weights(self, model_name=\"best.h5\"):\r\n file_path = os.path.join(self.checkpoint_dir, model_name)\r\n if os.path.exists(file_path) == False:\r\n return\r\n self.adversarial_model.load_weights(file_path)\r\n","sub_path":"src/models/anomaly_detection/ALOCC.py","file_name":"ALOCC.py","file_ext":"py","file_size_in_byte":10546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"500441092","text":"from pyspark import SparkContext\nsc = SparkContext()\n\n#checkin_id user_id session_id utc_time timezone_offset lat lon category subcategory\nval = 0\n\ndef filter_first(line):\n if (\"checkin_id\" in line):\n return False\n else:\n return True\n\ndef sumCheckins(x):\n val += int(x)\n\nraw_data = sc.textFile(\"dataset_TIST2015.tsv\")\n#raw_data = sc.textFile(\"dataset_TIST2015.tsv\")\n\n#mapping value second value with 1 to giv each ID a value of 1\n#then using reduceByKey to sum up the value of that ID with each matching ID\n#returning a list of ID's with count\ndata = raw_data.map(lambda line: line.split('\\t')) \\\n .filter(lambda line: filter_first(line))\n\nusers = data.map(lambda x: (x[1], 1)) \\\n .reduceByKey(lambda x,y: x+y)\n\ncheckins = users.map(lambda x: x[1]).sum()\n\nsessions = data.map(lambda x: (x[2], 1)) \\\n .reduceByKey(lambda x,y: x+y)\n\n\n#printing unique users\nprint(\"Num unique users: \" + str(users.count()))\nprint(\"Num checkin's: \" + str(checkins))\nprint(\"Num unique sessions: \" + str(sessions.count()))\n","sub_path":"scripts/unique.py","file_name":"unique.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"40902089","text":"from django.conf.urls.defaults import *\nfrom oswalpalash.views import api_view, api_fb,api_po,poke_api,ascii,gif\nurlpatterns = patterns('',\n (r'^', include('oswalpalash.urls')),\n \n)\nurlpatterns += patterns('',\n url(r'^football/(?P[\\w-]+)/?', api_view, name = \"api\"),\n url(r'^pokemon/(?P[\\w-]+)/?', poke_api, name = \"api\"),\n url(r'^pokemon', api_po),\n url(r'^football', api_fb),\n\turl(r'^ascii',ascii),\n\turl(r'^gif',gif),\n )\n","sub_path":"Django_AppEngine/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"442435866","text":"import pandas as pd\nimport numpy as np\n\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.naive_bayes import MultinomialNB, GaussianNB, BernoulliNB\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\nfrom sklearn.svm import LinearSVC, SVC\nfrom sklearn.metrics import classification_report, accuracy_score\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.decomposition import PCA\n\nTOPIC_5_BIGRAM = './topic_modeling_data/topic-5-bigram-dis-vector.csv'\nTOPIC_5 = './topic_modeling_data/topic-5-dis-vector.csv'\nTOPIC_222_BIGRAM = './topic_modeling_data/topic-222-bigram-dis-vector.csv'\nTOPIC_222 = './topic_modeling_data/topic-222-dis-vector.csv'\n\nTOPIC_5_BIGRAM_NEW = './topic_modeling_data/topic_modeling_new/topic-5-bigram-new-dis-vector.csv'\nTOPIC_5_NEW = './topic_modeling_data/topic_modeling_new/topic-5-new-dis-vector.csv'\nTOPIC_222_BIGRAM_NEW = './topic_modeling_data/topic_modeling_new/topic-222-bigram-new-dis-vector.csv'\nTOPIC_222_NEW = './topic_modeling_data/topic_modeling_new/topic-222-new-dis-vector.csv'\n\nTEST_PREDICT_FILE = './dataset/topic-test-222-dis-vector.csv'\nSUBMISSION_FILE = './submission/disaster_topic_modelling.csv'\nSAMPLE_SUBMISSION_FILE = './dataset/sample_submission.csv'\n\ndef read_csv(file_name):\n print(str(file_name))\n print('------------------------------')\n data = pd.read_csv(file_name)\n\n # create dataframe\n train_df = pd.DataFrame(data)\n\n return train_df\n\ndf = read_csv(TOPIC_222_NEW)\n# print(df)\n\ny = df['target']\nX = df.drop(columns=['Unnamed: 0', 'target'])\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, shuffle=True)\n\n# classification model\n# these 3 models, especially BernoulliNB does not work well with these dataset\n# clf = BernoulliNB()\n# clf = MultinomialNB()\n# clf = SVC(kernel='linear')\n\n# clf = GaussianNB()\nclf = RandomForestClassifier(n_jobs=3, n_estimators=500, verbose=True)\n# clf = LogisticRegression(class_weight='balanced', solver='newton-cg')\n# clf = AdaBoostClassifier(n_estimators=500)\n\nclf.fit(X_train, y_train)\n\nprint(clf)\ny_pred = clf.predict(X_test)\nprint(accuracy_score(y_pred, y_test))\nprint(classification_report(y_pred, y_test))\n\n\n# prepare file to submission to kaggle\ntest = read_csv(TEST_PREDICT_FILE)\nsample_sub= read_csv(SAMPLE_SUBMISSION_FILE)\ntest = test.drop(columns=['Unnamed: 0'])\n\nsample_sub['target'] = clf.predict(test)\nsample_sub.to_csv(SUBMISSION_FILE,index=False)","sub_path":"disaster_topic_modeling.py","file_name":"disaster_topic_modeling.py","file_ext":"py","file_size_in_byte":2523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"638849751","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 26 17:39:52 2017\n\n@author: Varun\n\"\"\"\nimport pandas as pd\nimport xgboost as xgb\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import roc_auc_score\n\nprint(\"XGBoost:\")\n\nsamples = pd.read_csv('FINAL.csv')\nfeatures = ['LOCATION','SHOT_NUMBER','GAME_CLOCK','SHOT_CLOCK','TOUCH_TIME','SHOT_DIST','CLOSE_DEF_DIST','FG%','DBPM','DAYS_SINCE_START','1','2','3','4','5','6']\n\nX_train, X_test, y_train, y_test = train_test_split(samples[['LOCATION','SHOT_NUMBER','GAME_CLOCK','SHOT_CLOCK','TOUCH_TIME','SHOT_DIST','CLOSE_DEF_DIST','FG%','DBPM','DAYS_SINCE_START','1','2','3','4','5','6']],samples['FGM'],test_size = 0.25, random_state=10)\n\nmodel0 = xgb.XGBClassifier()\nkfold = KFold(n_splits = 10, random_state = 7)\nresults = cross_val_score(model0, X_train, y_train, cv = kfold)\nprint(\"(all shots) Accuracy: %.2f%% (%.2f%%)\" % (results.mean()*100, results.std()*100))\n\nmodel0.fit(X_train, y_train)\nimportances = model0.feature_importances_\nindices = np.argsort(importances)[::-1]\n\n# Print the feature ranking\nprint(\"Feature ranking:\")\n\nfor f in range(X_train.shape[1]):\n print(\"%d. %s (%f)\" % (f + 1, features[indices[f]], importances[indices[f]]))\n\ndef corr_class(ground_truth, predictions):\n mat=confusion_matrix(ground_truth,predictions)\n return (mat[0][0]+mat[1][1]*1.0)/np.sum(mat)\n\n# Plot the feature importances of the forest\nplt.figure()\nplt.title(\"Feature importances\")\nplt.bar(range(X_train.shape[1]), importances[indices],\n color=\"r\", align=\"center\")\nplt.xticks(range(X_train.shape[1]))\nplt.xlim([-1, X_train.shape[1]])\nplt.show()\n\nfpr, tpr, thresholds = roc_curve(model0.predict(X_test), y_test)\nauc = roc_auc_score(model0.predict(X_test), y_test)\n\nplt.figure()\nlw = 2\nplt.plot(fpr, tpr, color='darkorange',\n lw=lw, label='ROC curve (area = %0.2f)' % auc)\nplt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\nplt.xlim([0.0, 1.0])\nplt.ylim([0.0, 1.05])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('XGBoost ROC')\nplt.legend(loc=\"lower right\")\n# This is the ROC curve\nplt.show() \n\nprint(\"Test True Positive Rate: \", corr_class(y_test, model0.predict(X_test)))\n\n\n\ntwoptsamples = samples[samples['PTS_TYPE'] == 0]\n\nX2_train, X2_test, y2_train, y2_test = train_test_split(twoptsamples[['LOCATION','SHOT_NUMBER','GAME_CLOCK','SHOT_CLOCK','TOUCH_TIME','SHOT_DIST','CLOSE_DEF_DIST','FG%','DBPM','DAYS_SINCE_START','1','2','3','4','5','6']],twoptsamples['FGM'],test_size = 0.25, random_state=10)\n\nmodel1 = xgb.XGBClassifier()\nkfold = KFold(n_splits = 10, random_state = 7)\nresults = cross_val_score(model1, X2_train, y2_train, cv = kfold)\nprint(\"(2 pt shots) Accuracy: %.2f%% (%.2f%%)\" % (results.mean()*100, results.std()*100))\n\nmodel1.fit(X2_train, y2_train)\nimportances = model1.feature_importances_\nindices = np.argsort(importances)[::-1]\n\n# Print the feature ranking\nprint(\"Feature ranking:\")\n\nfor f in range(X2_train.shape[1]):\n print(\"%d. %s (%f)\" % (f + 1, features[indices[f]], importances[indices[f]]))\n\n# Plot the feature importances of the forest\nplt.figure()\nplt.title(\"Feature importances\")\nplt.bar(range(X2_train.shape[1]), importances[indices],\n color=\"r\", align=\"center\")\nplt.xticks(range(X2_train.shape[1]), indices)\nplt.xlim([-1, X2_train.shape[1]])\nplt.show()\n\nthreeptsamples = samples[samples['PTS_TYPE'] == 1]\n\nX3_train, X3_test, y3_train, y3_test = train_test_split(threeptsamples[['LOCATION','SHOT_NUMBER','GAME_CLOCK','SHOT_CLOCK','TOUCH_TIME','SHOT_DIST','CLOSE_DEF_DIST','FG%','DBPM','DAYS_SINCE_START','1','2','3','4','5','6']],threeptsamples['FGM'],test_size = 0.25, random_state=10)\n\nmodel2 = xgb.XGBClassifier()\nkfold = KFold(n_splits = 10, random_state = 7)\nresults = cross_val_score(model2, X3_train, y3_train, cv = kfold)\nprint(\"(3 pt shots) Accuracy: %.2f%% (%.2f%%)\" % (results.mean()*100, results.std()*100))\n\nmodel2.fit(X3_train, y3_train)\nimportances = model2.feature_importances_\nindices = np.argsort(importances)[::-1]\n\n# Print the feature ranking\nprint(\"Feature ranking:\")\n\nfor f in range(X3_train.shape[1]):\n print(\"%d. %s (%f)\" % (f + 1, features[indices[f]], importances[indices[f]]))\n\n# Plot the feature importances of the forest\nplt.figure()\nplt.title(\"Feature importances\")\nplt.bar(range(X3_train.shape[1]), importances[indices],\n color=\"r\", align=\"center\")\nplt.xticks(range(X3_train.shape[1]), indices)\nplt.xlim([-1, X3_train.shape[1]])\nplt.show()\n\n","sub_path":"XGBoost.py","file_name":"XGBoost.py","file_ext":"py","file_size_in_byte":4674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"624156667","text":"import io\nimport json\nimport flask\nfrom flask import Flask, jsonify, request\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torchvision import models\nfrom PIL import Image\n\nprint(flask.__version__)\nprint(torchvision.__version__)\n\n\napp = Flask(__name__)\nimagenet_class_index = json.load(open(\"/Users/zfwang/machinelearning/deeplearning/src/src_pytorch/deploy/deploy_flask/imagenet_class_index.json\"))\nmodel = models.densenet121(pretrained = True)\nmodel.eval()\n\n\n# 准备图像\ndef transform_image(image_bytes):\n my_transforms = transforms.Compose([\n transforms.Resize(255),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n image = Image.open(io.BytesIO(image_bytes))\n return my_transforms(image).unsqueeze(0)\n\n# 预测\ndef get_prediction(image_bytes):\n tensor = transform_image(image_bytes = image_bytes)\n outputs = model.forward(tensor)\n _, y_hat = outputs.max(1)\n predicted_idx = str(y_hat.item())\n return imagenet_class_index[predicted_idx]\n\n# API\n@app.route('/predict', methods=['POST'])\ndef predict():\n if request.method == 'POST':\n file = request.files['file']\n img_bytes = file.read()\n class_id, class_name = get_prediction(image_bytes = img_bytes)\n return jsonify({\n 'class_id': class_id, \n 'class_name': class_name\n })\n\n\n\nif __name__ == \"__main__\":\n app.run()","sub_path":"src/src_pytorch/deploy/deploy_flask/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"12656704","text":"import sys \nsys.path.append(\"../\")\nimport bencode\nfrom twisted.internet import reactor\nfrom twisted.internet import task\nimport socket\nimport PeerProtocol\nimport PeerFactory\nimport Peer\nfrom Client import RequestClient\n\nINTERVAL_CONNECT_PEER = 1/10\nINTERVAL_ADD_REQUEST = 1/300\nINTERVAL_SEND_REQUEST = 1/200\n# PEER_LISTEN_TCP_PORT = 6788\n# CLIENT_UDP_PORT = 56788\nimport random\nPEER_LISTEN_TCP_PORT = random.randint(6000, 7000)\nCLIENT_UDP_PORT = random.randint(50000, 57000)\n\ndef readMetafileFromFile(filename):\n return bencode.decode(open(filename, 'rb').read())\n\ndef main():\n metafile = readMetafileFromFile('../test.torrent')\n peer = Peer.Peer(PEER_LISTEN_TCP_PORT, reactor, metafile)\n reqClient = RequestClient(\n peer,\n PEER_LISTEN_TCP_PORT,\n clientIpstr = '127.0.0.1',\n # clientIpstr=socket.gethostbyname(socket.gethostname()),\n clientPort=CLIENT_UDP_PORT,\n protocol_id=1,\n info_hash=peer._getInfoHash(),\n peer_id=peer._getPeerID(),\n downloaded=0,\n left=0,\n uploaded=0,\n event=0,\n key=0,\n num_want=0)\n\n reactor.adoptDatagramPort(reqClient.portSocket.fileno(),\n socket.AF_INET, reqClient)\n\n reactor.listenTCP(PEER_LISTEN_TCP_PORT, peer.Serverfactory)\n # loopConnectPeer = task.LoopingCall(peer.tryConnectPeer)\n loopAddRequest = task.LoopingCall(peer.tryAddRequest)\n loopSendRequest = task.LoopingCall(peer.trySendRequest)\n\n reactor.callLater(INTERVAL_CONNECT_PEER, peer.tryConnectPeer)\n # loopConnectPeer.start(INTERVAL_CONNECT_PEER)\n loopAddRequest.start(INTERVAL_ADD_REQUEST)\n loopSendRequest.start(INTERVAL_SEND_REQUEST)\n\n reactor.run()\n\nif __name__ == '__main__':\n main()\n","sub_path":"Bittorrent/another/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"177485599","text":"\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom networks.blocks import ConvBatchNormRelu2D, ConvGroupNormRelu2D\n\n# classical convolutional neural network implementation\nclass CNN(nn.Module):\n\n def __init__(self, input_size, conv_channels, fc_channels, kernel_size=3, group_norm=False):\n super(CNN, self).__init__()\n\n self.input_size = input_size\n self.conv_channels = conv_channels\n self.fc_channels = fc_channels\n self.kernel_size = kernel_size\n\n self.conv_features = nn.Sequential()\n self.fc_features = nn.Sequential()\n\n # convolutional layers\n in_channels = input_size[0]\n data_size = input_size[1]\n for i, out_channels in enumerate(conv_channels):\n if group_norm:\n self.conv_features.add_module('conv%d' % (i + 1), ConvGroupNormRelu2D(in_channels, out_channels, kernel_size=kernel_size))\n else:\n self.conv_features.add_module('conv%d' % (i + 1), ConvBatchNormRelu2D(in_channels, out_channels, kernel_size=kernel_size))\n in_channels = out_channels\n data_size /= 2\n\n # full connections\n in_channels = conv_channels[-1]*data_size*data_size\n for i, out_channels in enumerate(fc_channels):\n if i==len(fc_channels)-1:\n fc = nn.Sequential(nn.Linear(int(in_channels), out_channels))\n else:\n fc = nn.Sequential(nn.Linear(int(in_channels), out_channels),\n nn.BatchNorm1d(out_channels),\n nn.ReLU())\n self.fc_features.add_module('linear%d' % (i + 1), fc)\n in_channels = out_channels\n\n def forward(self, inputs):\n\n outputs = inputs\n for i in range(len(self.conv_channels)):\n outputs = getattr(self.conv_features, 'conv%d' % (i + 1))(outputs)\n outputs = F.max_pool2d(outputs, kernel_size=2)\n\n outputs = outputs.view(outputs.size(0),-1)\n for i in range(len(self.fc_channels)):\n outputs = getattr(self.fc_features, 'linear%d' % (i + 1))(outputs)\n\n return outputs","sub_path":"networks/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"652188804","text":"import numpy as np\nimport random\n\n\nclass SphericalNodes():\n\t\n\tdef __init__(self,radius,node_count,interval):\n\t\tnodes = 0\n\t\tslope = 0\n\t\tif node_count > 100000:\n\t\t\tprint('Warning: such a high node count will likely result in a mesh too complex to converge')\n\t\t\n\t\twhile nodes 0 else []\n\n def is_valid(self, word):\n if word is None or word.isspace() or len(word) == 0:\n return False\n return True\n\n\nwordList = [\n \"ant\", \"anthology\", \"antagonist\", \"antonym\",\n \"fun\", \"function\", \"factory\",\n \"trie\", \"trigger\", \"trigonometry\", \"tripod\"\n]\n\ntrie = Trie()\n\nfor word in wordList:\n trie.insert(word)\n\nprint(trie.auto_complete('')) # Result = []\nprint(trie.auto_complete(None)) # Result = []\nprint(trie.auto_complete(' ')) # Result = []\nprint(trie.auto_complete('123')) # Result = []\nprint(trie.auto_complete('an')) # Result = ['ant', 'anthology', 'antagonist', 'antonym']\nprint(trie.auto_complete('ant')) # Result = ['ant', 'anthology', 'antagonist', 'antonym']\nprint(trie.auto_complete('anto')) # Result = ['antonym']\nprint(trie.auto_complete('trig')) # Result = ['trigger', 'trigonometry']\nprint(trie.auto_complete('fun')) # Result = ['fun', 'function']\n\n\n\n","sub_path":"Problem Vs Algorithm/Autocomplete with tries/autocomplete.py","file_name":"autocomplete.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"336940959","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 10 23:40:28 2019\n\n@author: caspe\n\"\"\"\n\nimport pandas as pd\nfrom fancyimpute import IterativeImputer\nfrom matplotlib import pyplot as plt\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import GridSearchCV, KFold, ParameterGrid, ParameterSampler\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error\nimport xgboost as xgb\nimport lightgbm as lgb\nfrom nested_cv import nested_cv\n\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\ntrain = pd.read_csv('./data/train.csv')\ntest = pd.read_csv('./data/test.csv')\n\ndef fill_ii(df):\n df_filled_ii = pd.DataFrame(IterativeImputer().fit_transform(df.values))\n df_filled_ii.columns = df.columns\n df_filled_ii.index = df.index\n\n return df_filled_ii\n\ndef data_engineering(train, test):\n train = train.drop(train.index[0])\n \n cc_data = pd.concat([train, test], sort=True)\n cc_data = cc_data.drop(['Id', 'SalePrice','Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)\n \n train[\"SalePrice\"] = np.log1p(train[\"SalePrice\"])\n y = train['SalePrice']\n \n cc_data = pd.get_dummies(cc_data, prefix_sep='_')\n \n cc_data = fill_ii(cc_data)\n \n X_train = cc_data[:train.shape[0]]\n X_test = cc_data[train.shape[0]:]\n \n return X_train,X_test,y\n\nX,X_test,y = data_engineering(train,test)\n\nmodels_to_run = [RandomForestRegressor(), xgb.XGBRegressor(), lgb.LGBMRegressor()]\nmodels_param_grid = [ \n { # 1st param grid, corresponding to RandomForestRegressor\n 'max_depth': [3, None],\n 'n_estimators': np.random.randint(100,1000,20)\n }, \n { # 2nd param grid, corresponding to XGBRegressor\n 'colsample_bytree': np.linspace(0.3, 0.5),\n 'n_estimators': np.random.randint(100,1000,20)\n },\n { # 3rd param grid, corresponding to LGBMRegressor\n 'learning_rate': [0.05],\n 'n_estimators': np.random.randint(100,1000,20),\n 'num_leaves': np.random.randint(10,30,10),\n 'reg_alpha' : (1,1.2),\n 'reg_lambda' : (1,1.2,1.4)\n }\n ]\n\n# Allocate inner arrays for each algorithm being run\nouter_score = [ [] for i in range(len(models_to_run)) ]\nbest_inner_score = [ [] for i in range(len(models_to_run)) ]\nbest_params = [ [] for i in range(len(models_to_run)) ]\n\n# Define parameters for function and run different algorithms in a loop\n# If sqrt_of_score = True, the default scoring will be RMSE\nfor i,model in enumerate(models_to_run):\n outer_score[i], best_inner_score[i], best_params[i] = nested_cv(X, y, model, models_param_grid[i], \n 5, 5, sqrt_of_score = True, do_recursive_feature_elimination=True)\n# Print the output of nested_cv function\nfor i,results in enumerate(zip(outer_score, best_inner_score, best_params)):\n print('Outer scores, inner score and best params for model {0}: \\n{1}\\n{2}\\n{3}\\n'\n .format(type(models_to_run[i]).__name__,results[0],results[1],results[2]))","sub_path":"Experimental/nestedCV_innerloop.py","file_name":"nestedCV_innerloop.py","file_ext":"py","file_size_in_byte":3319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"162973336","text":"# #[?] N명의 점수 중에서 80점 이상인 점수의 합계\n\n# # 합계 알고리즘(Sum Algorithm): 주어진 범위에 주어진 조건에 해당하는 자료들의 합계\n\n# # [1] Input : n명의 점수\n\n# In[2]:\n\n\nscores = [ 100, 75, 50, 37, 90, 95 ]\n\n\n# In[17]:\n\n\nsum = 0 # 합계가 저장될 그릇\n\n\n# In[18]:\n\n\nN = len(scores) # 의사코드(슈도코드)\n\n\n# # [2] Process : 합계 알고리즘 영역 : 주어진 범위에 주어진 조건(필터링)\n\n# In[19]:\n\n\nfor i in range(0,N): # 주어진 범위\n if scores[i] >= 80: # 주어진 조건\n sum = sum + scores[i] # 처리\n\n\n# # [3] Output\n\n# In[20]:\n\n\nprint(f\"{N}명의 점수 중 80점 이상의 총점: {sum}\") # 결과 출력\n\n# [!] 디버거 사용하여 디버깅 사용하기 : F9 -> F5 -> F11 -> F5\n","sub_path":"Python Algorithm Learning/SumAlgorithm/SumAlgorithm.py","file_name":"SumAlgorithm.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"123063397","text":"from typing import List\n\nfrom admin import admin_site\nfrom sources import models\nfrom sources.admin.filters import AttributeeFilter\nfrom sources.admin.source_admins.source_admin import SourceAdmin, SourceForm\n\n\nclass TextualSourceForm(SourceForm):\n \"\"\"Form for adding/updating books.\"\"\"\n\n model = models.TextualSource\n\n class Meta:\n model = models.TextualSource\n exclude = []\n\n\nclass TextualSourceAdmin(SourceAdmin):\n \"\"\"Admin for textual sources.\"\"\"\n\n form = TextualSourceForm\n list_display = ['pk', 'html', 'detail_link', 'date_string']\n list_filter = ['verified', AttributeeFilter]\n\n def get_fields(self, request, model_instance=None):\n \"\"\"Return reordered fields to be displayed in the admin.\"\"\"\n fields: List = list(super().get_fields(request, model_instance))\n # Fields to display at the top, in order\n top_fields = ('full_string', 'creators', 'title')\n # Fields to display at the bottom, in order\n bottom_fields = (\n 'volume',\n 'number',\n 'page_number',\n 'end_page_number',\n 'container',\n 'description',\n 'citations',\n )\n index: int = 0\n for top_field in top_fields:\n if top_field in fields:\n fields.remove(top_field)\n fields.insert(index, top_field)\n index += 1\n for bottom_field in bottom_fields:\n if bottom_field in fields:\n fields.remove(bottom_field)\n fields.append(bottom_field)\n return fields\n\n\nadmin_site.register(models.Piece, TextualSourceAdmin)\n","sub_path":"sources/admin/source_admins/textual_source_admin.py","file_name":"textual_source_admin.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"590937717","text":"#N students take k apples and distribute them among each other evenly. The remaining (the undicisible ) part remains in\n#the basket. How many apples will each student get ? How many apples will remain in the basket ? The program reads the\n#number N and K. It should print the two answers for the question above.\n\nno_students=int(input(\"enter the number of students: \"))\nno_apples=int(input(\"enter the number of apples: \"))\n\nD = no_apples // no_students\nB = no_apples % no_students\n\nprint(f\"each student get {D} apples\")\nprint(f\"the remaining apples are {B}\")\n","sub_path":"venv/addition/apples.py","file_name":"apples.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"2012401","text":"from __future__ import division\r\nimport numpy as np\r\nimport random\r\nimport math\r\n\r\nmonopoly_board = [\"Go\", \"brown_1\", \"cc_1\", \"brown_2\", \"income_tax\", \"station_1\", \"light_blue_1\", \"chance_1\", \"light_blue_2\", \"light_blue_3\", \"prison_field\",\r\n\"pink_1\", \"electric_c\", \"pink_2\", \"pink_3\", \"station_2\", \"orange_1\", \"cc_2\", \"orange_2\", \"orange_3\", \"free_parking\",\r\n\"red_1\", \"chance_2\", \"red_2\", \"red_3\", \"station_3\", \"yellow_1\", \"yellow_2\", \"water_works\", \"yellow_3\", \"go_to_jail\",\r\n\"green_1\", \"green_2\", \"cc_3\", \"green_3\", \"station_4\", \"chance_3\", \"dark_blue_1\", \"luxury_tax\", \"dark_blue_2\"]\r\nchance_card_stack = [0, 24, 11, 999, 10, 5, 15, 39, 1, 777, 777, 777, 777, 777, 777, 777]\r\ncc_card_stack = [0, 777, 777, 777, 10, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777]\r\n\r\ndef advance_to_next_station(pos):\r\n if (pos < 5 or pos > 35):\r\n return 5\r\n elif pos < 15:\r\n return 15\r\n elif pos < 25:\r\n return 25\r\n elif pos < 35:\r\n return 35\r\n\r\n\r\ndef draw_card(special_field, pos):\r\n if \"cc\" in special_field:\r\n print(\" - community chest card has been drawn - \")\r\n num1 = np.random.randint(0,len(cc_card_stack))\r\n #print(\"random number out of stack: \" + str(num1))\r\n advance_to = cc_card_stack[num1]\r\n if advance_to == 777:\r\n advance_to = pos\r\n print(\" - no need to move - \")\r\n return 0\r\n else:\r\n print(\"- move forward \" + str(advance_to-pos) + \" steps - \")\r\n return advance_to - pos\r\n elif \"chance\" in special_field:\r\n print(\" - chance card has been drawn - \")\r\n num2 = np.random.randint(0,len(chance_card_stack))\r\n advance_to = chance_card_stack[num2]\r\n if advance_to == 12: # nearest utility\r\n print(\"Advance to the next utility!\")\r\n if pos <= 12:\r\n return 12 - pos\r\n else:\r\n return 28 - pos\r\n elif advance_to == 15: # nearest station\r\n print(\"Advance to the next station!\")\r\n advance_to = advance_to_next_station(pos)\r\n return (advance_to - pos)%40\r\n elif advance_to == 999: # go back 3 spaces\r\n advance_to = pos-3\r\n print(\"Go back 3 spaces!\")\r\n return - 3\r\n elif advance_to == 777: # no need to move\r\n advance_to = pos\r\n print(\" - no need to move - \")\r\n return 0 # don't move anywhere\r\n else: # go straight to position XYZ\r\n print(\"Advance to \" + monopoly_board[advance_to] + \" !\")\r\n return (advance_to - pos)%40\r\n\r\n\r\nclass card:\r\n def __init__(self, card_type, pos):\r\n self.card_type = card_type\r\n self.position = pos\r\n self.step_change = draw_card(card_type, pos)\r\n self.advance_to = (self.step_change + pos)%40\r\n","sub_path":"code/cards.py","file_name":"cards.py","file_ext":"py","file_size_in_byte":2836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"632032183","text":"\"\"\"Directory management utilities in Python.\n\n @author Vaishak K Nair (19MCMI08)\n\n\n\"\"\"\n\n\nimport os\nimport shutil\n\ndef processChoice():\n \"\"\"Process the outermost choice list.\n \"\"\"\n if choice == 1:\n createDir()\n elif choice == 2:\n modifyDir()\n elif choice == 3:\n navIntoDir()\n elif choice == 4:\n listDirs()\n else:\n print(\"Invalid choice\")\n\ndef createDir():\n \"\"\"Read a directory name from user and create it.\n \"\"\"\n print(\"Enter name for directory:\", end=\" \")\n name = input()\n try:\n os.mkdir(name)\n except FileExistsError:\n print(f\"Directory '{name}' already exists!\")\n else:\n print(f\"Created directory named '{name}'\")\n\ndef modifyDir():\n \"\"\"Display directory modification options.\n\n Read user choice and call appropriate functions.\n \"\"\"\n name = input(\"Enter the directory to be modified: \")\n while(True):\n \n choice = int(input(\"\"\"1) Rename directory.\n2) Copy directory to another.\n3) Move directory.\n4) Delete directory.\n5) Exit from modify mode.\\n\"\"\"))\n if(choice == 1):\n renameDir(name)\n elif(choice == 2):\n copyDir(name)\n elif(choice == 3):\n moveDir(name)\n elif(choice == 4):\n deleteDir(name)\n else:\n break\n\ndef renameDir(name):\n \"\"\"Rename the specified directory to a name given by user.\n \"\"\"\n newName = input(\"Enter new name for directory: \")\n try:\n os.rename(name, newName)\n except OSError as er:\n print(f\"Couldn't rename {name} to {newName}. Reason: {er.strerror}\")\n else:\n print(f\"Directory '{name}' successfully renamed to '{newName}'.\")\n\ndef copyDir(name):\n \"\"\"Copy the contents of a directory into the specified directory.\n \"\"\"\n targetDir = input(\"Enter target directory: \")\n try:\n shutil.copytree(name, targetDir)\n except FileExistsError as error:\n print(f\"Directory '{name}' couldn't be copied to '{targetDir}'. Reason: {error.strerror}\")\n except FileNotFoundError as error:\n print(f\"Directory '{name}' couldn't be copied to '{targetDir}'. Reason: {error.strerror}\")\n else:\n print(f\"Directory '{name}' copied to '{targetDir}'\")\n \ndef moveDir(name):\n \"\"\"Move the specified directory into the directory given by the user.\n \"\"\"\n targetName = input(\"Enter target directory: \")\n shutil.move(name, targetName)\n print(f\"Moved '{name}' to '{targetName}'\")\n\n\ndef deleteDir(name):\n \"\"\"Delete the specified directory and its contents.\n \"\"\"\n try:\n shutil.rmtree(name)\n except Error as error:\n print(f\"Couldn't delete '{name}' Reason: {error}\")\n else:\n print(f\"Deleted directory '{name}'\")\n\ndef navIntoDir():\n \"\"\"Navigate into the directory specified by the user.\n \"\"\"\n while(True):\n choice = int(input(\"\"\"1) Go to Parent Directory.\n2) Navigate to specific directory.\n3) Exit from Navigate Mode.\\n\"\"\"))\n if(choice == 1):\n os.chdir(os.pardir)\n elif(choice == 2):\n targetDir = input(\"Enter target path: \")\n try:\n os.chdir(targetDir)\n except OSError as error:\n print(f\"Couldn't change directory to '{targetDir}'. Reason: {error.strerror}\")\n else:\n print(f\"Current working directory changed to '{targetDir}'\")\n else:\n break\n \ndef listDirs():\n \"\"\"List the contents of a directory.\n\n Long listing is also available.\n \"\"\"\n while(True):\n choice = int(input(\"\"\"Enter your choice for method of listing :\n1) List of directories.\n2) List of directories and their details.\n3) Exit from List Mode.\\n\"\"\"))\n if(choice == 1):\n for fileName in os.listdir():\n print(fileName)\n elif(choice == 2):\n for dirEntry in os.scandir():\n stat = dirEntry.stat()\n print(\"d\" if dirEntry.is_dir() else \"-\", stat.st_mode, stat.st_uid, stat.st_gid,\n stat.st_size, stat.st_atime, dirEntry.name)\n else:\n break\n\nif __name__ == \"__main__\":\n print(\"-------------Implementing Directory Management------------\")\n while(True):\n print(\"Press the following to :\")\n print(\"1) Create a new directory.\")\n print(\"2) Modify a directory.\")\n print(\"3) Navigate into directory.\")\n print(\"4) Listing directories.\")\n print(\"5) Exit.\")\n choice = int(input())\n if(choice == 5):\n break\n processChoice()\n\n\n\n\n","sub_path":"A3E2/DirectoryManagement.py","file_name":"DirectoryManagement.py","file_ext":"py","file_size_in_byte":4561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"591129567","text":"#------------------------------------------#\r\n# Title: CDInventory.py (Assignment_07)\r\n# Desc: CD Inventory Program.\r\n# Change Log: \r\n# 2/20/21, Eric Hoyle, consolidated code into functions under classes\r\n# updated main to incorporate new class and function calls. \r\n# 2/27/21, Eric Hoyle, Incorporated exception handling and data serialization.\r\n# Further consolidation of main into functions. Minor corrections\r\n# function arguments.\r\n# DBiesinger, 2030-Jan-01, Created File\r\n#------------------------------------------#\r\nimport pickle\r\n# -- DATA -- #\r\nstrChoice = '' # User input\r\nlstTbl = [] # list of lists to hold data\r\ndicRow = {} # dictionary of data row\r\ndatFileName = 'CDInventory.dat' # data storage file\r\nobjFile = None # file object\r\ncdData = None\r\n\r\n# -- PROCESSING -- #\r\nclass DataProcessor:\r\n \"\"\"Processing data supplied by the user\"\"\"\r\n \r\n @staticmethod \r\n def table_append(cdData, table):\r\n \"\"\"Appends new cd entry as dictionary to a list of dictionaries\r\n \r\n Args: \r\n cdData: Information aboout the CD \r\n table: The list that the new dictionary is appended to\r\n \r\n Returns: \r\n List of dictionaries with new dictionary appended to the list\r\n \r\n \"\"\"\r\n dicRow = {'ID': cdData[0], 'Title': cdData[1], 'Artist': cdData[2]}\r\n table.append(dicRow)\r\n return table\r\n\r\n @staticmethod\r\n \r\n def cd_remove(intIDDel, table):\r\n \"\"\"Removes the dictionary with ID key value specified by user in \r\n intIDDel from the list of dictionaries.\r\n \r\n Args:\r\n intIDDel (int): value for key ID specified by the user\r\n table: list of dictionaries the entry is removed from.\r\n \r\n Returns:\r\n Boolean value of cd removal status.\r\n \"\"\"\r\n intRowNr = -1\r\n blnCDRemoved = False\r\n # for row in lstTbl:\r\n for row in table:\r\n intRowNr += 1\r\n if row['ID'] == intIDDel:\r\n # del lstTbl[intRowNr]\r\n del table[intRowNr]\r\n blnCDRemoved = True\r\n break\r\n return blnCDRemoved\r\n \r\n \r\nclass FileProcessor:\r\n \"\"\"Processing the data to and from text file\"\"\"\r\n\r\n @staticmethod\r\n def read_file(file_name, table):\r\n \"\"\"Function to manage data ingestion from file to a list of \r\n dictionaries\r\n\r\n Reads the data from file identified by file_name into a 2D table\r\n (list of dicts) table one line in the file represents one dictionary \r\n row in table.\r\n\r\n Args:\r\n file_name (string): name of file used to read the data from\r\n table (list of dict): 2D data structure (list of dicts) that holds\r\n the data during runtime\r\n\r\n Returns:\r\n None.\r\n \"\"\"\r\n try: \r\n table.clear() # this clears existing data and allows to load data from file\r\n with open(file_name, 'rb') as dataread:\r\n picdata = pickle.load(dataread)\r\n for line in picdata:\r\n table.append(line)\r\n except FileNotFoundError as e:\r\n print('\\n{:*^66}'.format((e.__doc__).upper()),\r\n '\\n{:^66}'.format(' WARNING: Data not loaded').upper())\r\n\r\n @staticmethod\r\n def write_file(file_name, table):\r\n \"\"\"Function to manage data output from a list of dictionaries to a file \r\n\r\n Writes the data from a 2D table (lstTbl) to a file identified by \r\n file_name, one dictionary per row.\r\n \r\n Args:\r\n file_name (string): name of file used to read the data from\r\n table (list of dict): 2D data structure (lstTbl) that holds the \r\n data during runtime\r\n\r\n Returns:\r\n None.\r\n \"\"\"\r\n try:\r\n with open(file_name, 'wb') as datastore:\r\n pickle.dump(table, datastore)\r\n except FileNotFoundError as e:\r\n print('\\n{:*^66}'.format((e.__doc__).upper()),\r\n '\\n{:^66}'.format(' WARNING: Data not saved').upper())\r\n\r\n# -- PRESENTATION (Input/Output) -- #\r\n\r\nclass IO:\r\n \"\"\"Handling Input / Output\"\"\"\r\n @staticmethod\r\n def add_cd():\r\n \"\"\"Ask user for new ID, CD Title and Artist\r\n \r\n Args: \r\n None.\r\n \r\n Returns:\r\n list of information (ID, Title, and Artist) for a new CD entry\"\"\"\r\n \r\n print('Please enter info for the CD you would like to add:\\n')\r\n strID = ''\r\n n=3\r\n while strID == '':\r\n \r\n try:\r\n strID = int(input('Enter ID: ').strip())\r\n except ValueError:\r\n print('\\n* Don\\'t be a dummy! ID must be a number. Please try again *\\n'.upper())\r\n n-=1\r\n if n==0:\r\n input('You seem to be pretty dense. Let\\'s get you back to the main menu.')\r\n break\r\n continue\r\n strTitle = input('What is the CD\\'s title? ').strip()\r\n strArtist = input('What is the Artist\\'s name? ').strip()\r\n cdData =[strID, strTitle, strArtist]\r\n return cdData\r\n \r\n \r\n @staticmethod\r\n def print_menu():\r\n \"\"\"Displays a menu of choices to the user\r\n\r\n Args:\r\n None.\r\n\r\n Returns:\r\n None.\r\n \"\"\"\r\n\r\n print('\\n\\n')\r\n print('{:-^66}'.format(' Menu '),'\\n{:<}'.format('[l] Load Inventory from'),datFileName,\r\n '\\n{:<30}'.format('[a] Add CD'),'\\n{:<30}'.format('[i] Display Current Inventory'),\r\n '\\n{:<30}'.format('[d] Delete CD from Inventory'),\r\n '\\n{:<}'.format('[s] Save Inventory to'),datFileName,\r\n '\\n{:<30}'.format('[x] Exit'),\r\n '\\n{:-^66}'.format('-'))\r\n\r\n @staticmethod\r\n def menu_choice():\r\n \"\"\"Gets user input for menu selection\r\n\r\n Args:\r\n None.\r\n\r\n Returns:\r\n choice (string): a lower case sting of the users input out of the choices l, a, i, d, s or x\r\n\r\n \"\"\"\r\n choice = ' '\r\n while choice not in ['l', 'a', 'i', 'd', 's', 'x']:\r\n choice = input('Which operation would you like to perform? [l, a, i, d, s or x]: ').lower().strip()\r\n print() \r\n return choice\r\n\r\n @staticmethod\r\n def show_inventory(table):\r\n \"\"\"Displays current inventory table\r\n\r\n\r\n Args:\r\n table (list of dict): 2D data structure (list of dicts) that holds the data during runtime.\r\n\r\n Returns:\r\n None.\r\n\r\n \"\"\"\r\n print('\\n\\n')\r\n print('{:=^66}'.format(' The Current Inventory '))\r\n print('{:<6}{:30}{:30}'.format('ID','Title','Artist'))\r\n print('{:-^66}'.format('-'))\r\n for row in table:\r\n print('{:<6}{:30}{:30}'.format(*row.values()))\r\n print('{:=^66}'.format('='))\r\n \r\n @staticmethod\r\n def cd_removed_conf(removed):\r\n \"\"\" uses the status (True/False) of the boolean flag in the cd_remove\r\n function to return a printed statement of the status to the user.\r\n \r\n Args:\r\n None\r\n \r\n Returns:\r\n None\r\n \"\"\"\r\n print()\r\n if removed:\r\n print('The CD was removed\\n')\r\n else:\r\n print('Could not find this CD!\\n')\r\n\r\n @staticmethod\r\n def del_choice():\r\n n=3\r\n while n > 0:\r\n try:\r\n delID = int(input('Which ID would you like to delete? ').strip())\r\n except ValueError:\r\n print('\\n* Don\\'t be a dummy! ID must be a number. Please try again *\\n'.upper())\r\n n-=1\r\n if n==0:\r\n input('You seem to be pretty dense. Let\\'s get you back to the main menu.')\r\n break\r\n continue\r\n else:\r\n return delID\r\n\r\n# 1. When program starts, read in the currently saved Inventory\r\nFileProcessor.read_file(datFileName, lstTbl)\r\n\r\n# 2. start main loop\r\nwhile True:\r\n # 2.1 Display Menu to user and get choice\r\n IO.print_menu()\r\n strChoice = IO.menu_choice()\r\n\r\n # 3. Process menu selection\r\n try:\r\n if strChoice == 'x':# 3.1 process exit first\r\n break\r\n # 3.2 process load inventory\r\n if strChoice == 'l':\r\n print('WARNING: If you continue, all unsaved data will be lost and the Inventory re-loaded from file.')\r\n strYesNo = input('Type \\'yes\\' to continue and reload from {}. \\nPress any key to cancel: '.format(datFileName))\r\n if strYesNo.lower() == 'yes':\r\n print('reloading...')\r\n FileProcessor.read_file(datFileName, lstTbl)\r\n IO.show_inventory(lstTbl)\r\n else:\r\n input('canceling... Inventory data NOT reloaded. Press [ENTER] to continue to the menu.')\r\n IO.show_inventory(lstTbl)\r\n continue\r\n # 3.3 process add a CD\r\n elif strChoice == 'a':\r\n cdData = IO.add_cd()\r\n lstTbl = DataProcessor.table_append(cdData, lstTbl)\r\n IO.show_inventory(lstTbl)\r\n continue \r\n # 3.4 process display current inventory\r\n elif strChoice == 'i':\r\n IO.show_inventory(lstTbl)\r\n continue\r\n # 3.5 process delete a CD\r\n elif strChoice == 'd':\r\n # 3.5.1 get Userinput for which CD to delete\r\n # 3.5.1.1 display Inventory to user\r\n IO.show_inventory(lstTbl)\r\n # 3.5.1.2 ask user which ID to remove\r\n intIDDel = IO.del_choice()\r\n # 3.5.2 search thru table and delete CD\r\n removed = DataProcessor.cd_remove(intIDDel, lstTbl)\r\n IO.cd_removed_conf(removed)\r\n IO.show_inventory(lstTbl)\r\n continue\r\n # 3.6 process save inventory to file\r\n elif strChoice == 's':\r\n # 3.6.1 Display current inventory and ask user for confirmation to save\r\n IO.show_inventory(lstTbl)\r\n strYesNo = input('Save this inventory to file? [y/n] ').strip().lower()\r\n # 3.6.2 Process choice\r\n if strYesNo == 'y':\r\n # 3.6.2.1 save data\r\n FileProcessor.write_file(datFileName, lstTbl)\r\n else:\r\n input('The inventory was NOT saved to file. Press [ENTER] to return to the menu.')\r\n continue\r\n # 3.7 catch-all should not be possible, as user choice gets vetted in IO, but to be save:\r\n else:\r\n print('General Error')\r\n except Exception as e: #Exception for exceptions the propogate beyong function level\r\n print(e.__doc__)\r\n\r\n\r\n\r\n","sub_path":"CDInventory.py","file_name":"CDInventory.py","file_ext":"py","file_size_in_byte":10890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"346793334","text":"import os\nimport sys\nsys.path.append('..')\nsys.path.append('../..')\nimport argparse\nimport utils\nfrom student_utils_sp18 import *\nfrom dijkstra import single_source_dijkstra_path_length\nfrom min_dominating_set import min_weighted_dominating_set\n\n\n\n###### Helper Functions ######\n\ndef graph_creator(adjacency_matrix, number_of_kingdoms):\n\tedge_list = []\n\tfor i in range(number_of_kingdoms):\n\t\tfor j in range(i):\n\t\t\tweight = adjacency_matrix[i][j]\n\t\t\tif weight == \"x\":\n\t\t\t\tcontinue\n\t\t\tedge_list.append((i,j, weight))\n\tG = nx.Graph()\n\tnodelist = range(number_of_kingdoms)\n\tG.add_weighted_edges_from(edge_list, nodelist=nodelist)\n\treturn G\n\n\n\n##### Graph Solve Object Class #######\nclass GraphSolver:\n\n\tdef __init__(self, input_file):\n\t\tinput_data = utils.read_file(input_file)\n\t\tself.number_of_kingdoms, self.list_of_kingdom_names, self.starting_kingdom, self.adjacency_matrix = data_parser(input_data)\n\t\t\n\n\t\tself.source_index = self.list_of_kingdom_names.index(self.starting_kingdom)\n\t\t\n\t\tself.G = Graph(self.adjacency_matrix, self.number_of_kingdoms, self.source_index)\n\t\t\n\t\tself.dijk = self.G.get_dijkstra()\n\n\t\tself.kingdom_dict = dict()\n\t\tself.c_n = []\n\n\t\tfor i in range(self.number_of_kingdoms):\n\t\t\tself.c_n.append(self.adjacency_matrix[i][i])\n\t\t\tself.kingdom_dict[i] = self.list_of_kingdom_names[i]\n\n\t\tself.unconq = set(range(self.number_of_kingdoms))\n\n\t\tself.dominating_set = self.get_dominating_set()\n\t\tprint(self.dominating_set)\n\n\tdef get_conquer_cost(self, node_index):\n\n\t\treturn self.adjacency_matrix[node_index][node_index]\n\n\tdef get_total_neighbor_cost(self, node_index):\n\t\ttotal = 0\n\t\tfor neighbor in self.G.get_neighbors(node_index):\n\t\t\tif neighbor == self.source_index:\n\t\t\t\tcontinue\n\t\t\ttotal += self.get_conquer_cost(neighbor)\n\n\t\treturn total \n\n\tdef get_dom_weight(self):\n\t\tlst = []\n\t\tfor i in range(self.number_of_kingdoms):\n\t\t\tlst.append(self.get_conquer_cost(i))\n\t\treturn lst\n\n\tdef get_dominating_set(self):\n\t\treturn min_weighted_dominating_set(self.G.graph, self.get_dom_weight())\n\n\n\n####### Graph Object Class ####\nclass Graph:\n\tdef __init__(self, adjacency_matrix, number_of_kingdoms, source_index):\n\t\tself.graph = graph_creator(adjacency_matrix, number_of_kingdoms)\n\t\tself.source = source_index\n\n\n\tdef get_dijkstra(self):\n\t\treturn single_source_dijkstra_path_length(self.graph, self.source)\n\n\tdef get_neighbors(self, node):\n\t\treturn self.graph.neighbors(node)\n\n\tdef get_neighbors_levels(self, node, level):\n\t\tneighbors = set()\n\t\tcurr_level = []\n\t\twhile level > 0:\n\t\t\tcurr_level = []\n\n\tdef get_dominating_set():\n\t\treturn 0\n\nsolver = GraphSolver(\"small_test.in\")\n\n\n\n\n","sub_path":"solver_draft.py","file_name":"solver_draft.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"627458465","text":"from flask import Flask, render_template, request, jsonify\nimport sqlite3\n\napp = Flask(__name__)\n\nconnection = sqlite3.connect('database.db')\nprint('Opened database Successfully')\n\nconnection.execute('CREATE TABLE IF NOT EXISTS posts (title TEXT, post TEXT)')\n\nprint('Table created Successfully')\nconnection.close()\n\n@app.route('/')\ndef route():\n\treturn render_template('home.html')\n\n@app.route('/addnew')\ndef addnew():\n\treturn render_template('newmovie.html')\n\n\n@app.route('/addmovie', methods=['POST'])\ndef addmovie():\n\tconnection = sqlite3.connect('database.db')\n\tcursor = connection.cursor()\n\tprint('hi')\n\ttry:\n\t\tname = request.form['name']\n\t\tyear = request.form['year']\n\t\tgenre = request.form['genre']\n\t\tcursor.execute('INSERT INTO movies (name, year, genre) VALUES (?,?,?)', (name, year, genre))\n\t\tconnection.commit()\n\t\tmessage = 'Record succesfully added'\n\texcept:\n\t\tconnection.rollback()\n\t\tmessage = 'error in insert operation'\n\tfinally: \n\t\treturn render_template('result.html', message = message)\n\t\tconnection.close()\n\t\t\n\n\t\t\n\n\n@app.route('/movies')\ndef movies():\n\tconnection = sqlite3.connect('database.db')\n\tcursor = connection.cursor()\n\tcursor.execute('SELECT * FROM movies')\n\tmovie_list = cursor.fetchall()\n\tconnection.close()\n\treturn jsonify(movie_list)\n\napp.run(debug = True)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"3806708","text":"# dic='{\"name\":\"alex\"}'\n# f=open(\"hello\",\"w\")\n# f.write(dic)\n\n# f_read=open(\"hello\",\"r\")\n# data=f_read.read()\n# print(type(data))\n# data=eval(data)\n# print(data[\"name\"])\n\n# import json\n#\n#\n# dic={'name':'alex'}#---->{\"name\":\"alex\"}----->'{\"name\":\"alex\"}'\n# i=8 #---->'8'\n# s='hello' #---->\"hello\"------>'\"hello\"'\n# l=[11,22] #---->\"[11,22]\"\n#\n# f=open(\"new_hello\",\"w\")\n\n# dic_str=json.dumps(dic)\n# f.write(dic_str) #json.dump(dic,f)\n\n\n\n# f_read=open(\"new_hello\",\"r\")\n# data=json.loads(f_read.read()) # data=json.load(f)\n\n#\n# print(data[\"name\"])\n# print(data)\n# print(type(data))\n\n# print(s)\n# print(type(s))\n\n\n# data=json.dumps(dic)\n#\n# print(data) #{\"name\": \"alex\"}\n# print(type(data))\n\n\n#注意:\n# import json\n#\n# with open(\"Json_test\",\"r\") as f:\n# data=f.read()\n# data=json.loads(data)\n# print(data[\"name\"])\n\n#----------------------pickle-------\nimport pickle\n\ndic = {'name': 'alvin', 'age': 23, 'sex': 'male'}\n\nprint(type(dic)) # \n\n# j = pickle.dumps(dic)\n# print(type(j)) # \n#\n# f = open('序列化对象_pickle', 'wb') # 注意是w是写入str,wb是写入bytes,j是'bytes'\n# f.write(j) # -------------------等价于pickle.dump(dic,f)\n#\n# f.close()\n# # -------------------------反序列化\nimport pickle\n\nf = open('序列化对象_pickle', 'rb')\n\ndata = pickle.loads(f.read()) # 等价于data=pickle.load(f)\n\nprint(data['age'])\n# # -------------------------shelve模块---------\nimport shelve\n\nf = shelve.open(r'shelve1') # 目的:将一个字典放入文本 f={}\n#\n# f['stu1_info']={'name':'alex','age':'18'}\n# f['stu2_info']={'name':'alvin','age':'20'}\n# f['school_info']={'website':'oldboyedu.com','city':'beijing'}\n# f.close()\n\nprint(f.get('stu1_info')['age'])\n\n\n\n# dic={}\n#\n# dic[\"name\"]=\"alvin\"\n# dic[\"info\"]={\"name\":\"alex\"}\n\n","sub_path":"Python/day22-os,json,re等模块/json&pickle.py","file_name":"json&pickle.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"91257055","text":"import os\nimport mysql.connector\nfrom flask import Blueprint\nfrom flask import request\nfrom flask import jsonify\nfrom flask import abort\n\nfrom models.emergency import add_emergency\n\nMIN_LUX = 401\nMAX_LUX = 1000\nMAX_VOC = 0.7\nMIN_DEGREE = 15\nMAX_DEGREE = 30\nMAX_HUMIDITY = 60\n\nenv_data_blueprint = Blueprint('env_data', __name__)\n\n@env_data_blueprint.route(\"/add\", methods=[\"POST\"]) #Add a new record\ndef add():\n fields = [\"room_id\", \"lux\", \"voc\", \"degree\", \"humidity\"]\n data = request.json\n\n for field in fields:\n if (not field in data):\n return abort(400)\n\n if (add_env_data(data[\"room_id\"], data[\"lux\"], data[\"voc\"], data[\"degree\"], data[\"humidity\"])):\n return jsonify({\"message\" : \"OK\"})\n else:\n return abort(500)\n\n@env_data_blueprint.route(\"/\")\ndef get_latest():\n result = get_latest_env_data()\n if(result is not None):\n return jsonify(result)\n else:\n return abort(500)\n\n@env_data_blueprint.route(\"/series/\", methods=[\"GET\"])\ndef get_series():\n room_id = request.args.get(\"room_id\", default=None, type=int)\n field = request.args.get(\"field\", default=None, type=str)\n start = request.args.get(\"start\", default=None, type=str)\n end = request.args.get(\"end\", default=None, type=str)\n\n if (room_id is not None) and (field is not None):\n result = get_env_data_series(room_id, field, start, end)\n if(result is not None):\n return jsonify({\"values\" : result})\n else:\n return abort(500)\n else:\n return abort(400)\n\n\n\ndef add_env_data(room_id: int, lux: int, voc: float, degree: float, humidity: int) -> bool:\n try:\n database = mysql.connector.connect(user = os.getenv(\"DATABASE_USER\"), database = os.getenv(\"DATABASE_NAME\"), password = os.getenv(\"DATABASE_PASSWORD\"))\n cursor = database.cursor()\n\n values = (lux, voc, degree, humidity, room_id)\n sql = (\"\"\"INSERT INTO environmental_data (tmstp, lux, voc, degree, humidity, room_id) VALUES (NOW(), %d, %0.1f, %d, %d, %d)\"\"\" % values)\n cursor.execute(sql)\n database.commit()\n id = cursor.lastrowid\n database.close()\n\n emergency_flag = False\n emergency_string = []\n if ((lux < MIN_LUX) and (lux > 0)):\n emergency_flag = True\n emergency_string.append(\"lux-\")\n if (lux > MAX_LUX):\n emergency_flag = True\n emergency_string.append(\"lux+\")\n if (voc > MAX_VOC):\n emergency_flag = True\n emergency_string.append(\"voc+\")\n if (humidity > MAX_HUMIDITY):\n emergency_flag = True\n emergency_string.append(\"humidity+\")\n if ((degree < MIN_DEGREE) and (degree > 0)):\n emergency_flag = True\n emergency_string.append(\"degree-\")\n if (degree > MAX_DEGREE):\n emergency_flag = True\n emergency_string.append(\"degree+\")\n\n if (emergency_flag):\n tags = ';'.join(emergency_string)\n return add_emergency(0, 0, tags, id, None, None)\n else:\n return True\n except Exception as e:\n print(e)\n if (database.is_connected()):\n database.close()\n return False\n\ndef get_latest_env_data() -> list:\n try:\n database = mysql.connector.connect(user = os.getenv(\"DATABASE_USER\"), database = os.getenv(\"DATABASE_NAME\"), password = os.getenv(\"DATABASE_PASSWORD\"))\n cursor = database.cursor()\n\n cursor.execute(\"SELECT * FROM latest_env_data\")\n columns = [column[0] for column in cursor.description]\n data = []\n for row in cursor.fetchall():\n data.append(dict(zip(columns, row)))\n\n room_list = []\n for element in data:\n env_data = {'id' : element['id'], 'tmstp' : element['tmstp'], 'lux' : element['lux'], 'voc' : element['voc'], 'degree' : element['degree'], 'humidity' : element['humidity']}\n room = {'id' : element['room_id'], 'name' : element['name_room'], 'env_data' : env_data}\n room_list.append(room)\n\n database.close()\n return room_list\n except Exception as e:\n print(e)\n if (database.is_connected()):\n database.close()\n return None\n\ndef get_env_data_series(room_id: int, field: str, start: str, end: str) -> list:\n try:\n database = mysql.connector.connect(user = os.getenv(\"DATABASE_USER\"), database = os.getenv(\"DATABASE_NAME\"), password = os.getenv(\"DATABASE_PASSWORD\"))\n cursor = database.cursor()\n\n if (start != None and end != None):\n sql = \"\"\"SELECT %s, tmstp FROM pepperiot.environmental_data WHERE room_id = %d AND tmstp BETWEEN \"%s\" AND \"%s\";\"\"\" % (field, room_id, start, end)\n else:\n sql = \"\"\"SELECT %s, tmstp FROM pepperiot.environmental_data WHERE room_id = %d AND tmstp > DATE_SUB(NOW(), INTERVAL 2 MONTH) AND tmstp <= NOW();\"\"\" % (field, room_id)\n\n cursor.execute(sql)\n columns = [column[0] for column in cursor.description]\n data = []\n for row in cursor.fetchall():\n data.append(dict(zip(columns, row)))\n\n series = []\n for element in data:\n tmstp = element[\"tmstp\"]\n hour = (\"%s:%s\" % (tmstp.hour, tmstp.minute))\n series.append({\"hour\": hour, \"value\": element[field]})\n\n database.close()\n return series\n except Exception as e:\n print(e)\n if (database.is_connected()):\n database.close()\n return None","sub_path":"server/models/env_data.py","file_name":"env_data.py","file_ext":"py","file_size_in_byte":5519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"598213763","text":"import requests\nfrom lxml import etree\nimport csv\nimport time\nimport sql\n# 首页url\nhome_page_url = 'http://www.socom.cn'\n# 详情页url\ndetail_url = 'http://www.socom.cn/company/16001195.html'\ndef get_html(url):\n try:\n resp = requests.get(url)\n if resp.status_code == 200:\n return resp\n else:\n return None\n except TimeoutError:\n get_html(url) \n\n\n \n except ConnectionError:\n get_html(url)\n\n# 获取到城市列表\ndef parse_home_page(home_page_url):\n # /html/body/div[5]/div[2]/a[1]\n citys_url_list = []\n resp = get_html(home_page_url)\n if resp:\n html = resp.text\n root = etree.HTML(html)\n num = len(root.xpath('//body/div[@class=\"contentBox\"][4]/div[@class=\"provinceBox\"]'))\n for i in range(1, num + 1):\n province = root.xpath('//body/div[@class=\"contentBox\"][4]/div[@class=\"provinceBox\"][{}]/a/text()'.format(i))[0]\n # [-452:-4]\n citys = root.xpath('//body/div[@class=\"contentBox\"][4]/div[@class=\"cityBox\"][{}]/a/text()'.format(i))\n citys_url = root.xpath('//body/div[@class=\"contentBox\"][4]/div[@class=\"cityBox\"][{}]/a/@href'.format(i))\n # print(citys)\n citys = []\n for url in citys_url:\n citys.append(home_page_url + url)\n citys_url_list.append(citys)\n return citys_url_list\n\n# 判断地址是不是最终地址(省 -> 地级市 -> 县级市)\ndef city_is_end(city_url):\n resp = get_html(city_url)\n if resp:\n html = resp.text\n root = etree.HTML(html)\n province = len(root.xpath('//*[contains(concat( \" \", @class, \" \" ), concat( \" \", \"contentBox\", \" \" )) and (((count(preceding-sibling::*) + 1) = 3) and parent::*)]//*[contains(concat( \" \", @class, \" \" ), concat( \" \", \"cityBox\", \" \" ))]/a/text()'))\n print(province)\n if province == 35:\n return True\n else:\n return False\n\n# 获取县级市\ndef get_city_part(city_url):\n resp = get_html(city_url)\n if resp:\n html = resp.text\n root = etree.HTML(html)\n city_parts= root.xpath('//body/div[@class=\"contentBox\"][1]/div[@class=\"cityBox\"]/a/@href')\n city_parts_url = []\n for part in city_parts:\n city_parts_url.append(home_page_url + part)\n return city_parts_url\n else:\n return None\n \n# 获取最终地区的企业分类\ndef get_part_url(city_url):\n resp = get_html(city_url)\n if resp:\n html = resp.text\n root = etree.HTML(html)\n corps = root.xpath('//div[@class=\"contentBox\"][2]/div[@class=\"cityBox\"]/a[@class=\"countyBox\"]/@href')\n corps_url = []\n for part in corps:\n corps_url.append(home_page_url + part)\n return corps_url\n else:\n return None\n\n \n# 获取一个分类的所有企业的链接\ndef get_url_of_corp(part_url):\n resp = get_html(part_url)\n if resp:\n html = resp.text\n root = etree.HTML(html)\n parts = root.xpath('//div[@class=\"contentBox\"][3]/div[@class=\"cityBox\"]/a/@href')\n parts_url = []\n for part in parts:\n parts_url.append(home_page_url + part)\n return parts_url\n else:\n return None\n\n# 获取企业分类进入的url\ndef get_all_detail_url(home_page_url):\n # urls_list = parse_home_page(home_page_url)\n # for urls in urls_list:\n # for url in urls:\n # if city_is_end(url):\n # print(url)\n # else:\n # urls.extend(get_city_part(url))\n # urls.remove(url)\n # return urls_list\n last_city_list = []\n urls_list = sum(parse_home_page(home_page_url), [])\n # print(urls_list)\n for url in urls_list:\n print(url, end='\\t')\n if city_is_end(url):\n print('到头了')\n last_city_list.append(url)\n urls_list.remove(url)\n continue\n else:\n urls_list.extend(get_city_part(url))\n print('获取县级市')\n urls_list.remove(url)\n return last_city_list\n\n# 提取详情页的数据\ndef parser_detail(resp, db):\n detail = {}\n if resp:\n html = resp.text\n root = etree.HTML(html)\n info = root.xpath('//*[contains(concat( \" \", @class, \" \" ), concat( \" \", \"cityBox\", \" \" ))]//div[(((count(preceding-sibling::*) + 1) = 1) and parent::*)]/text()') if root.xpath('//*[contains(concat( \" \", @class, \" \" ), concat( \" \", \"cityBox\", \" \" ))]//div[(((count(preceding-sibling::*) + 1) = 1) and parent::*)]/text()') else None\n # print(info)\n if info:\n detail['公司名称'] = root.xpath('//div[@class=\"contentBox\"][2]/div[@class=\"provinceBox\"]/text()')[0]\n detail['地址'] = info[0].strip().split(':')[-1]\n detail['电话'] = info[1].strip().split(':')[-1]\n detail['传真'] = info[2].strip().split(':')[-1]\n detail['手机'] = info[3].strip().split(':')[-1]\n detail['网址'] = info[4].strip().split(':')[-1]\n detail['邮箱'] = info[5].strip().split(':')[-1]\n detail['联系人'] = info[6].strip().split(':')[-1]\n detail['公司人数'] = info[7].strip().split(':')[-1]\n detail['注册资金'] = info[8].strip().split(':')[-1]\n detail['经济类型'] = info[9].strip().split(':')[-1]\n detail['公司产品'] = info[10].strip().split(':')[-1]\n detail['公司简介'] = info[11].strip().split(':')[-1]\n # with open('sqw.csv', 'a', newline='') as csv_file:\n # writer = csv.writer(csv_file)\n # print([info for info in detail.values()])\n # writer.writerow([info for info in detail.values()])\n sql.insert_detail(db, detail)\n return detail\n else:\n return None\n\n\n\ndef main():\n db = sql.init()\n # with open('sqw.csv', 'w', newline='') as csv_file:\n # writer = csv.writer(csv_file)\n # writer.writerow(['公司名称', '地址', '电话', '传真', '手机', '网址', '邮箱', '联系人', '公司人数', '注册资金', '经济类型', '公司产品', '公司简介'])\n detail_urls = get_all_detail_url(home_page_url)\n # 遍历所有的城市\n for url in detail_urls:\n print('下载', url)\n part_url = get_part_url(url)\n # 遍历所有的分类\n for part in part_url:\n print('分类信息', part)\n corp_url = get_url_of_corp(part)\n # 遍历所有的企业\n for corp in corp_url:\n print('公司链接', corp)\n detail = parser_detail(get_html(corp), db)\n time.sleep(1)\n # detail = parser_detail(get_html(url))\n # writer.writerow([info for info in detail.values()])\n # print(city_is_end('http://www.socom.cn/xinjiang/kelamayi/baijiantan/'))\n\nif __name__ == '__main__':\n main()","sub_path":"SQWSpider/sqwSpider.py","file_name":"sqwSpider.py","file_ext":"py","file_size_in_byte":6965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"178415269","text":"import json\nimport requests\n\nfrom modules import settings\nfrom modules.sdr_util import make_pairs\n\nSDR_BUDGET = settings['SDR_BUDGET']\nONEFORGE_API_KEY = settings['ONEFORGE_API_KEY']\n\n\ndef tset():\n pairs = make_pairs('USD')\n print(json.dumps(pairs))\n\n params = {\n \"pairs\": \",\".join(pairs),\n \"api_key\": ONEFORGE_API_KEY\n }\n print(json.dumps(params))\n\n quotes = requests.get(\"https://forex.1forge.com/1.0.3/quotes\", params=params).json()\n print(json.dumps(quotes))\n\n prices = {}\n for q in quotes:\n prices[q['symbol'][3:]] = q['price']\n\n prices['USD'] = 1\n print(json.dumps(prices))\n\n equivalents = {}\n for c, a in SDR_BUDGET.items():\n e = prices[c] * a\n equivalents[c] = e\n\n print(json.dumps(equivalents))\n\n\nif __name__ == '__main__':\n tset()\n","sub_path":"priceserver/modules/sdr/wip/1forge.py","file_name":"1forge.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"161775377","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\nclass CostFunction:\n \"\"\"\n note:\n we call a chromosome, solution vector.\n \"\"\"\n\n def __init__(self, dimensions, lower_bound, upper_bound, name):\n self.__dimensions = dimensions\n # maximum and minimum of response boundary in each dimension:\n self.__lower_bound = lower_bound\n self.__upper_bound = upper_bound\n self.__costFunction_name = name # only a name for plotting\n\n @property\n def dimensions(self):\n return self.__dimensions\n\n @property\n def lower_bound(self):\n return self.__lower_bound\n\n @property\n def upper_bound(self):\n return self.__upper_bound\n\n @staticmethod\n def _discrete_decoding(original_function):\n \"\"\"\n Decorator function that decode continues space to discrete space by sorting.\n for example consider given solution_vector [5.1, 6.3, 4.008, 9.1, 6.02] to the original function.\n\n This decorator make the original function to work with discrete version of the given solution_vector which in this\n example is [2 0 4 1 3].\n This can be used in discrete problems like N-Queen or TSP.\n \"\"\"\n\n def wrapper(self, solution_vectors: np.ndarray, *args, **kwargs):\n if len(solution_vectors.shape) > 1:\n solution_vectors = np.argsort(solution_vectors, axis=1).reshape(-1, self.dimensions)\n else:\n solution_vectors = np.argsort(solution_vectors).reshape(1, self.dimensions).flatten()\n return original_function(self, solution_vectors, *args, **kwargs)\n return wrapper\n\n def compute_cost(self, solution_vectors):\n \"\"\"\n temporary docstring\n :param solution_vectors:\n :return:\n \"\"\"\n raise NotImplementedError\n\n def visual_result(self, solution_vector):\n raise NotImplementedError\n\n def plot_cost_vs_iteration(self, costs):\n plt.plot(costs)\n plt.title(self.__costFunction_name)\n plt.xlabel('iteration')\n plt.ylabel('Cost')\n plt.show()\n\n def print_step_result(self, solution_vector, iteration: int = \"\"):\n \"\"\"\n Use this to print result in each iteration.\n This prints a simple result of the last solution (an chromosome) that its cost has been computed.\n \"\"\"\n cost = self.compute_cost(solution_vector)\n print(f\"solution {solution_vector} with the cost: {cost} in the iteration {iteration}\")\n\n\nclass Sphere(CostFunction):\n\n def __init__(self):\n # set Sphere parameters:\n super().__init__(dimensions=5, lower_bound=-10, upper_bound=+10, name=\"Sphere\")\n\n def compute_cost(self, solution_vectors: np.ndarray):\n solution_vectors = np.array(solution_vectors)\n solution_vectors = solution_vectors.reshape(-1, self.dimensions)\n cost = np.sum(solution_vectors ** 2, axis=1)\n return cost\n\n def visual_result(self, solution_vector):\n pass\n\n\nclass NQueen(CostFunction):\n\n def __init__(self, num_of_queen: int = 8):\n super().__init__(dimensions=num_of_queen, lower_bound=0, upper_bound=1, name=\"N Queen\")\n\n @CostFunction._discrete_decoding\n def visual_result(self, solution_vector):\n \"\"\"\n Show a representation of N Queen problem with the given solution.\n :param solution_vector: solution got by get_print_solution method. its a agent_row or (chromosome)\n :return:\n \"\"\"\n size = len(solution_vector)\n for row in range(size):\n line = \" \"\n for col in range(size):\n if solution_vector[row] == col:\n line += \"👑 \"\n else:\n line += \"⬜ \"\n print(line)\n\n @CostFunction._discrete_decoding\n def compute_cost(self, solution_vectors):\n\n if len(np.shape(solution_vectors)) == 1: # < if there is only one row in agents_rows, do>:\n return self.__compute_cost_of_a_row(solution_vectors)\n\n costs = []\n for agent in solution_vectors:\n # add computed fitness to the list of costs:\n costs = np.append(costs, self.__compute_cost_of_a_row(agent))\n return costs\n\n @CostFunction._discrete_decoding\n def print_step_result(self, solution_vector, iteration: int = \"\"):\n super().print_step_result(solution_vector, iteration)\n\n def __compute_cost_of_a_row(self, agent_row):\n # compute cost for an agent or chromosome\n\n x = list(range(self.dimensions))\n y = agent_row\n # y = np.argsort(agent_row) # change coding representation to discrete number. #todo: did you replaced?\n\n cost = 0\n for i in range(self.dimensions - 1):\n for j in range(i + 1, self.dimensions):\n if np.abs(x[i] - x[j]) == np.abs(y[i] - y[j]):\n cost = cost + 1\n return cost\n\n\nclass TravellingSalesmanProblem(CostFunction):\n\n def __init__(self, num_of_cities: int = None, distance_range: int = None):\n super().__init__(num_of_cities, lower_bound=0, upper_bound=1, name=\"TSP\")\n\n # virtual distance between cites:\n self.x_axises = None\n self.y_axises = None\n\n if distance_range is not None:\n self.distance_range = distance_range\n self.__generate_cities()\n else:\n self.distance_range = None\n\n def __generate_cities(self):\n self.x_axises = np.random.randint(0, self.distance_range, size=self.dimensions)\n self.y_axises = np.random.randint(0, self.distance_range, size=self.dimensions)\n\n self.__compute_distance()\n # self.plot_cities()\n\n def __compute_distance(self):\n # compute distance:s\n self.distance_matrix = np.zeros(\n [self.dimensions, self.dimensions]) # make a empty n×n matrix, __dimension = number of cities\n\n # compute euclidean distance between cities and store it in distance matrix:\n for row in range(0, self.dimensions - 1): # this was dimensions - 1\n for column in range(row + 1, self.dimensions): # this was row + 1\n self.distance_matrix[row, column] = np.sqrt(\n np.exp2(self.x_axises[row] - self.x_axises[column]) + np.exp2(\n self.y_axises[row] - self.y_axises[column])) # upper triangular matrix\n # diagonal is zero:\n # if row == column:\n # self.distance_matrix[column, row] = np.inf\n self.distance_matrix[column, row] = self.distance_matrix[row, column] # and lower triangular matrix..\n # is the same is the upper.\n # join x and y axises, first row: x axises second is y axises:\n self.cities = np.append(self.x_axises.reshape(1, -1), self.y_axises.reshape(1, -1), axis=0)\n\n def plot_cities(self):\n plt.scatter(self.cities[0, :], self.cities[1, :], marker='o')\n plt.show()\n\n @CostFunction._discrete_decoding\n def compute_cost(self, solution_vectors):\n \"\"\"\n\n :param solution_vectors: matrix is a combination order to travel to cities.\n :return: cost of the given order.\n \"\"\"\n # change coding representation to discrete number:\n\n if self.distance_range is None:\n raise Exception(\"There are no cities; Initialize distance_range on \"\n \"object definition whether use create_cities() function to make your own cities.\")\n cost = 0\n\n one_agents = len(solution_vectors.shape) == 1 # do we have just one agent? (agents_rows contain only one row?)\n\n # adding the first element to the last. e.g. 5 > 4 > 3 > [5].\n if one_agents:\n # solution_vectors = np.argsort(solution_vectors)\n solution = np.append(solution_vectors, solution_vectors[0])\n\n # need a loop to travel to the all cities:\n solution = solution.astype(int) # todo: remove this\n for index in range(0, self.dimensions):\n i = solution[index] # distance of the first city to\n ii = solution[index + 1] # the second city is following:\n cost += self.distance_matrix[i, ii]\n else:\n # solution_vectors = np.argsort(solution_vectors, axis=1)\n solution = np.hstack((solution_vectors, solution_vectors[:, 0].reshape(-1, 1)))\n\n # need a loop to travel to the all cities:\n solution = solution.astype(int) # todo: remove this\n for index in range(0, self.dimensions):\n i = solution[:, index] # distance of the first city to\n ii = solution[:, index + 1] # the second city is following:\n cost += self.distance_matrix[i, ii]\n\n return cost\n\n @CostFunction._discrete_decoding\n def visual_result(self, solution_vector):\n \"\"\"\n Show a representation of TSP problem with the given solution.\n :param solution_vector: solution got by get_print_solution method. its a agent_row or (chromosome)\n \"\"\"\n # adding the first element to the last. e.g. 5 > 4 > 3 > [5]:\n solution = np.append(solution_vector, solution_vector[0])\n\n solution = solution.astype(int)\n\n plt.scatter(self.cities[0, :], self.cities[1, :], marker='o')\n # annotate_cities = np.arange(1, self.dimensions + 1)\n\n # add number annotate to cities:\n # for num in annotate_cities:\n # aplot.annotate(num, (self.cities[0, num], self.cities[1, num]))\n\n plt.plot(self.cities[0, solution], self.cities[1, solution])\n plt.show()\n\n @CostFunction._discrete_decoding\n def print_step_result(self, solution_vector, iteration: int = \"\"):\n super().print_step_result(solution_vector, iteration)\n\n\n\"\"\"\n def create_cities(self, x: list, y: list):\n create cities by user.\n :param x: x vector axis parameters\n :param y: y vector axis parameters\n :return:\n\n if len(x) != len(y):\n raise Exception(\"x and y must be same size\")\n self.x_axises = np.array(x)\n self.y_axises = np.array(y)\n\n num_of_cities = len(x)\n self.dimensions = num_of_cities\n\n # maximum and minimum of response boundary in each __dimension:\n self.min_boundary = 0\n self.max_boundary = num_of_cities - 1\n\n self.distance_range = True\n\n self.__compute_distance()\n\"\"\"\n","sub_path":"grasshopper optimization algorithm/cost_functions.py","file_name":"cost_functions.py","file_ext":"py","file_size_in_byte":10520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"405627422","text":"import sys\nimport os\nimport config\nfrom xml.etree import ElementTree\n\n\nclass shottask(object):\n\t\n\ttasks=[]\n\txmlfile=\"\"\n\n\t_shot=\"shot\"\n\t_dstpath=\"dstpath\"\n\t_srcpath=\"srcpath\"\n\t_synpath=\"synpath\"\n\n\tdef __init__(self, xmlfile):\n\t\tself.xmlfile=xmlfile\n\t\tif not os.path.isfile(xmlfile):\n\t\t\tmessage=\"% not exist!\" %xmlfile\n\t\t\tconfig.log.e(message)\n\t\t\tprint(message)\n\n\n\tdef read_attrib(self, dicts, node):\n\t\tattrib=node.attrib\n\t\tdicts[self._shot]=node.tag\n\t\tfor key in attrib:\n\t\t\tdicts[key]=attrib[key]\n\n\tdef read_xml(self):\n\t\ttext=open(self.xmlfile).read()\n\n\t\troot=ElementTree.fromstring(text)\n\t\t#print(root)\n\n\t\tfor child in root:\n\t\t\tshot={}\n\t\t\tself.read_attrib(shot, child)\n\t\t\tself.tasks.append(shot)\n\n\t\tprint(self.tasks)\n\t\treturn self.tasks\n\n\nif __name__=='__main__':\n\tfilename=sys.argv[1]\n\tst=shottask(filename)\n\ttask=st.read_xml()\n\n","sub_path":"pipeline/readshottask.py","file_name":"readshottask.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"471718087","text":"_base_ = [\n '../fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py'\n]\n\nmodel = dict(\n type='Distilling_Fcos',\n \n distill = dict(\n teacher_cfg='./configs/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py',\n teacher_model_path='/workspace/S/duzhixing/workspace/model/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco-511424d6.pth',\n \n distill_warm_step=500,\n distill_feat_weight=0.01,\n distill_cls_weight=0.05,\n # distill_bbox_weight=0.002,\n \n stu_feature_adap=dict(\n type='ADAP',\n in_channels=256,\n out_channels=256,\n num=5,\n kernel=3\n ),\n )\n)\n\n# # optimizer\n# optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)\n# optimizer_config = dict(grad_clip=None)\n# # learning policy\n# lr_config = dict(\n# policy='step',\n# warmup='linear',\n# warmup_iters=500,\n# warmup_ratio=0.001,\n# step=[16, 22])\n# total_epochs = 24\n\n\nseed=520\n\nfind_unused_parameters=True\n\n\n\n","sub_path":"configs/distill_fcos/resnet50_resnet101_fcos_mstrain_max.py","file_name":"resnet50_resnet101_fcos_mstrain_max.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"298332975","text":"from Resources import *\n\n\nclass LifeSprite(pygame.sprite.Sprite):\n PW = None\n PH = None\n\n def __init__(self, loc):\n super().__init__()\n self.PW = lifeim.get_width()\n self.PH = lifeim.get_height()\n self.image = lifeim\n self.rect = self.image.get_rect()\n self.rect.center = loc\n self.rect.centery -= self.PH/2\n self.rect.centerx -= self.PW / 2\n self.mask = pygame.mask.from_surface(self.image)\n\n self.life = 1\n\n def update(self, val):\n if val:\n self.rect.centery += 1\n","sub_path":"LifeSprite.py","file_name":"LifeSprite.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"71316616","text":"'''Trains a simple convnet on the MNIST dataset.\nGets to 99.25% test accuracy after 12 epochs\n(there is still a lot of margin for parameter tuning).\n16 seconds per epoch on a GRID K520 GPU.\n3 seconds per epoch on a Titan X Pascal.\n\nue.exec('mnistKeras.py')\n'''\n\nfrom __future__ import print_function\nimport numpy as np\n\nclass Logwrapper(object):\n def __init__(self):\n self.terminal = ue.log\n\n def write(self, message):\n ue.log(message)\n\n def flush(self):\n ue.log(\"\")\n\nimport unreal_engine as ue\nimport sys\n\nimport _thread as thread\n\n#wrap default logs so we get all print()\nsys.stdout = Logwrapper()\nsys.stderr = Logwrapper()\n\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Convolution2D, MaxPooling2D\nfrom keras.utils import np_utils\nfrom keras import backend as K\n\ndef train():\n print(\"Training started...\")\n\n np.random.seed(1337) # for reproducibility\n batch_size = 128\n nb_classes = 10\n nb_epoch = 12\n\n # input image dimensions\n img_rows, img_cols = 28, 28\n # number of convolutional filters to use\n nb_filters = 32\n # size of pooling area for max pooling\n pool_size = (2, 2)\n # convolution kernel size\n kernel_size = (3, 3)\n\n # the data, shuffled and split between train and test sets\n (X_train, y_train), (X_test, y_test) = mnist.load_data()\n\n if K.image_dim_ordering() == 'th':\n X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)\n X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\n else:\n X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)\n X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\n X_train = X_train.astype('float32')\n X_test = X_test.astype('float32')\n X_train /= 255\n X_test /= 255\n ue.log('X_train shape:' + str(X_train.shape))\n ue.log(str(X_train.shape[0]) + 'train samples')\n ue.log(str(X_test.shape[0]) + 'test samples')\n\n # convert class vectors to binary class matrices\n Y_train = np_utils.to_categorical(y_train, nb_classes)\n Y_test = np_utils.to_categorical(y_test, nb_classes)\n\n model = Sequential()\n\n model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],\n border_mode='valid',\n input_shape=input_shape))\n model.add(Activation('relu'))\n model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=pool_size))\n model.add(Dropout(0.25))\n\n model.add(Flatten())\n model.add(Dense(128))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(nb_classes))\n model.add(Activation('softmax'))\n\n model.compile(loss='categorical_crossentropy',\n optimizer='adadelta',\n metrics=['accuracy'])\n\n print(X_train)\n\n print(Y_train)\n\n #model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,\n # verbose=1, validation_data=(X_test, Y_test))\n #score = model.evaluate(X_test, Y_test, verbose=0)\n #ue.log('Test score:' + str(score[0]))\n #ue.log('Test accuracy:' + str(score[1]))\n\n#start thread\nthread.start_new_thread(train, ())\n\n#train()","sub_path":"Content/Scripts/mnistKeras.py","file_name":"mnistKeras.py","file_ext":"py","file_size_in_byte":3422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"594829156","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom cell import ConvLSTM\nimport numpy as np\nfrom tools.utils import gaussian_mask\n\n\nclass VideoSaliency(nn.Module):\n def __init__(self):\n super(VideoSaliency, self).__init__()\n\n self.size = 400\n\n ############### R1 ###############\n self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=(1, 1) )\n self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=(1, 1) )\n\n self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=(1, 1) )\n self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=(1, 1) )\n\n self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=(1, 1) )\n self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=(1, 1) )\n self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=(1, 1) )\n\n self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=(1, 1) )\n self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=(1, 1) )\n self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=(1, 1) )\n\n self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, dilation=2, padding=(2, 2) )\n self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, dilation=2, padding=(2, 2) )\n self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, dilation=2, padding=(2, 2) )\n\n self.fc6 = nn.Conv2d(512, 4096, kernel_size=4, dilation=4, padding=(6, 6) )\n self.fc7 = nn.Conv2d(4096, 4096, kernel_size=1, dilation=4 )\n self.fc8 = nn.Conv2d(4096, 1, kernel_size=1 )\n\n self.pool4_conv = nn.Conv2d(512, 128, kernel_size=3, padding=(1, 1) )\n self.pool4_fc = nn.Conv2d(128, 128, kernel_size=1 )\n self.pool4_ms_saliency = nn.Conv2d(128, 1, kernel_size=1 )\n\n ############### R2 ###############\n self.conv1_1_r2 = nn.Conv2d(4, 64, kernel_size=3, padding=(1, 1) )\n self.conv1_2_r2 = nn.Conv2d(64, 64, kernel_size=3, padding=(1, 1) )\n\n self.conv2_1_r2 = nn.Conv2d(64, 128, kernel_size=3, padding=(1, 1) )\n self.conv2_2_r2 = nn.Conv2d(128, 128, kernel_size=3, padding=(1, 1) )\n\n self.conv3_1_r2 = nn.Conv2d(128, 256, kernel_size=3, padding=(1, 1) )\n self.conv3_2_r2 = nn.Conv2d(256, 256, kernel_size=3, padding=(1, 1) )\n self.conv3_3_r2 = nn.Conv2d(256, 256, kernel_size=3, padding=(1, 1) )\n\n self.conv4_1_r2 = nn.Conv2d(256, 512, kernel_size=3, padding=(1, 1) )\n self.conv4_2_r2 = nn.Conv2d(512, 512, kernel_size=3, padding=(1, 1) )\n self.conv4_3_r2 = nn.Conv2d(512, 512, kernel_size=3, padding=(1, 1) )\n\n self.conv5_1_r2 = nn.Conv2d(512, 512, kernel_size=3, dilation=2, padding=(2, 2) )\n self.conv5_2_r2 = nn.Conv2d(512, 512, kernel_size=3, dilation=2, padding=(2, 2) )\n self.conv5_3_r2 = nn.Conv2d(512, 512, kernel_size=3, dilation=2, padding=(2, 2) )\n\n self.fc6_r2 = nn.Conv2d(512, 4096, kernel_size=4, dilation=4, padding=(6, 6) )\n self.fc7_r2 = nn.Conv2d(4096, 4096, kernel_size=1, dilation=4 )\n self.fc8_r2 = nn.Conv2d(4096, 1, kernel_size=1 )\n\n self.pool4_conv_r2 = nn.Conv2d(512, 128, kernel_size=3, padding=(1, 1) )\n self.pool4_fc_r2 = nn.Conv2d(128, 128, kernel_size=1 )\n self.pool4_ms_saliency_r2 = nn.Conv2d(128, 1, kernel_size=1 )\n\n self.convLSTM = ConvLSTM((400, 400), 4, [1], (3, 3), 1, batch_first=True, return_all_layers=False)\n self.c3d = nn.Conv3d(4, 1, kernel_size=3, padding=(1, 1, 1))\n\n self.pool4_saliency_ST = nn.Conv2d(2, 1, kernel_size=1 )\n self.fc8_saliency_ST = nn.Conv2d(2, 1, kernel_size=1 )\n\n self.loc_estimate = nn.Linear(2500, 4)\n\n self.attention_first = nn.Conv2d(6, 256, kernel_size=3, padding=(1, 1) )\n self.attention_second = nn.Conv2d(256, 6, kernel_size=1 )\n\n def forward(self, input, input_prior):\n\n ############### R1 ###############\n x = F.relu(self.conv1_1(input))\n x = F.relu(self.conv1_2(x))\n x = F.max_pool2d(x, 2)\n\n x = F.relu(self.conv2_1(x))\n x = F.relu(self.conv2_2(x))\n x = F.max_pool2d(x, 2)\n\n x = F.relu(self.conv3_1(x))\n x = F.relu(self.conv3_2(x))\n x = F.relu(self.conv3_3(x))\n x = F.max_pool2d(x, 2)\n\n x = F.relu(self.conv4_1(x))\n x = F.relu(self.conv4_2(x))\n x = F.relu(self.conv4_3(x))\n x = F.max_pool2d(x, 1)\n\n branch_pool4 = x.clone()\n\n x = F.relu(self.conv5_1(x))\n x = F.relu(self.conv5_2(x))\n x = F.relu(self.conv5_3(x))\n x = F.max_pool2d(x, 1)\n\n x = F.dropout(F.relu(self.fc6(x)), 0.5)\n x = F.dropout(F.relu(self.fc7(x)), 0.5)\n x = self.fc8(x)\n\n branch_pool4 = F.dropout(F.relu(self.pool4_conv(branch_pool4)), 0.5)\n branch_pool4 = F.dropout(F.relu(self.pool4_fc(branch_pool4)), 0.5)\n branch_pool4 = self.pool4_ms_saliency(branch_pool4)\n\n up_fc8 = F.upsample_bilinear(x, size=[self.size, self.size])\n up_pool4 = F.upsample_bilinear(branch_pool4, size=[self.size, self.size])\n\n ############### R2 ###############\n x_r2 = F.relu(self.conv1_1_r2(input_prior))\n x_r2 = F.relu(self.conv1_2_r2(x_r2))\n x_r2 = F.max_pool2d(x_r2, 2)\n\n x_r2 = F.relu(self.conv2_1_r2(x_r2))\n x_r2 = F.relu(self.conv2_2_r2(x_r2))\n x_r2 = F.max_pool2d(x_r2, 2)\n\n x_r2 = F.relu(self.conv3_1_r2(x_r2))\n x_r2 = F.relu(self.conv3_2_r2(x_r2))\n x_r2 = F.relu(self.conv3_3_r2(x_r2))\n x_r2 = F.max_pool2d(x_r2, 2)\n\n x_r2 = F.relu(self.conv4_1_r2(x_r2))\n x_r2 = F.relu(self.conv4_2_r2(x_r2))\n x_r2 = F.relu(self.conv4_3_r2(x_r2))\n x_r2 = F.max_pool2d(x_r2, 1)\n\n branch_pool4_r2 = x_r2.clone()\n\n x_r2 = F.relu(self.conv5_1_r2(x_r2))\n x_r2 = F.relu(self.conv5_2_r2(x_r2))\n x_r2 = F.relu(self.conv5_3_r2(x_r2))\n x_r2 = F.max_pool2d(x_r2, 1)\n\n x_r2 = F.dropout(F.relu(self.fc6_r2(x_r2)), 0.5)\n x_r2 = F.dropout(F.relu(self.fc7_r2(x_r2)), 0.5)\n x_r2 = self.fc8_r2(x_r2)\n\n branch_pool4_r2 = F.dropout(F.relu(self.pool4_conv_r2(branch_pool4_r2)), 0.5)\n branch_pool4_r2 = F.dropout(F.relu(self.pool4_fc_r2(branch_pool4_r2)), 0.5)\n branch_pool4_r2 = self.pool4_ms_saliency_r2(branch_pool4_r2)\n\n up_fc8_r2 = F.upsample_bilinear(x_r2, size=[self.size, self.size])\n up_pool4_r2 = F.upsample_bilinear(branch_pool4_r2, size=[self.size, self.size])\n\n rnn_inputs = torch.cat((up_pool4, up_pool4_r2, up_fc8, up_fc8_r2), 1)\n rnn_inputs = rnn_inputs.unsqueeze(0)\n rnn_list, state = self.convLSTM(rnn_inputs)\n rnn_output = rnn_list[0].squeeze(0)\n\n c3d_inputs = rnn_inputs.transpose(1, 2)\n c3d_output = self.c3d(c3d_inputs)\n c3d_output = c3d_output.transpose(1, 2)\n c3d_output = c3d_output.squeeze(0)\n\n pool4_saliency_cancat = torch.cat((branch_pool4, branch_pool4_r2), 1)\n pool4_saliency_ST = self.pool4_saliency_ST(pool4_saliency_cancat)\n up_pool4_ST = F.upsample_bilinear(pool4_saliency_ST, size=[self.size, self.size])\n\n fc8_saliency_cancat = torch.cat((x, x_r2), 1)\n fc8_saliency_ST = self.fc8_saliency_ST(fc8_saliency_cancat)\n up_fc8_ST = F.upsample_bilinear(fc8_saliency_ST, size=[self.size, self.size])\n\n # fc8_saliency_ST = F.upsample_bilinear(fc8_saliency_ST, size=[60, 60])\n pool4_saliency_ST = pool4_saliency_ST.view(pool4_saliency_ST.size(0), -1)\n\n local_poc = F.sigmoid(self.loc_estimate(pool4_saliency_ST))\n # local_poc = self.loc_estimate(fc8_saliency_ST)\n # cap_feats = self.generate_local_bbox(local_poc)\n cap_feats = self.generate_local_gaussian(local_poc)\n # rnn_output = F.upsample_bilinear(rnn_output, size=[self.size, self.size])\n\n # up_fc8_ST = up_fc8_ST + rnn_output\n global_saliency = torch.cat((up_pool4_ST, up_fc8_ST), 1)\n\n local_poo4_ST = torch.mul(up_pool4_ST, cap_feats)\n local_fc8_ST = torch.mul(up_fc8_ST, cap_feats)\n # local_rnn_output = torch.mul(rnn_output, cap_feats)\n local_saliency = torch.cat((local_poo4_ST, local_fc8_ST), 1)\n\n final_saliency = torch.cat((global_saliency, local_saliency, rnn_output, c3d_output), 1)\n\n #channel-wise attention\n atten_weights = F.relu(self.attention_first(final_saliency))\n atten_weights = F.softmax(self.attention_second(atten_weights))\n\n # atten_weights = F.upsample_bilinear(atten_weights, size=[480, 480])\n final_saliency = torch.mul(final_saliency, atten_weights)\n final_saliency = torch.mean(final_saliency, 1, keepdim=True)\n\n # return final_saliency, cap_feats, local_poc, cap_feats2\n return final_saliency, cap_feats, local_poc\n\n def generate_local_bbox(self, local_poc):\n size = 400\n points = local_poc.data.cpu().numpy()\n # points_val = np.zeros_like(points, dtype=points.dtype)\n cap_map_batch = np.zeros([points.shape[0], 1, size, size], dtype=np.float16)\n for i in range(0, points.shape[0]):\n point = points[i, :]\n if point[0] < point[2] and point[1] < point[3] \\\n and (point[2] - point[0]) < 0.95 \\\n and (point[3] - point[1]) < 0.95 \\\n and (point[2] - point[0]) > 0.05 \\\n and (point[3] - point[1]) > 0.05:\n # suitable point\n print(point)\n print('area:' + str((point[2] - point[0]) * (point[3] - point[1])))\n point = point * size\n point = point.astype(np.int16)\n cap_map = np.ones([point[2] - point[0], point[3] - point[1]], dtype=np.float16)\n cap_map = np.pad(cap_map, ([point[0], size - point[2]], [point[1], size - point[3]]), 'constant')\n cap_map_batch[i, 0, :, :] = cap_map\n else:\n # not suitable, choose center crop\n cap_map = np.ones([int(size / 2), int(size / 2)], dtype=np.float16)\n cap_map = np.pad(cap_map, ([int(size / 4), int(size / 4)], [int(size / 4), int(size / 4)]), 'constant')\n cap_map_batch[i, 0, :, :] = cap_map\n\n cap_map_batch = torch.from_numpy(cap_map_batch)\n cap_map_batch = cap_map_batch.type(torch.cuda.FloatTensor)\n\n return cap_map_batch\n\n def generate_local_gaussian(self, local_poc):\n size = 400\n points = local_poc.data.cpu().numpy()\n # points_val = np.zeros_like(points, dtype=points.dtype)\n cap_map_batch = np.zeros([points.shape[0], 1, size, size], dtype=np.float16)\n for i in range(0, points.shape[0]):\n point = points[i, :]\n if point[0] < point[2] and point[1] < point[3] \\\n and (point[2] - point[0]) < 0.95 \\\n and (point[3] - point[1]) < 0.95 \\\n and (point[2] - point[0]) > 0.05 \\\n and (point[3] - point[1]) > 0.05:\n # suitable point\n print(point)\n # print(':' + str((point[2] - point[0]) * (point[3] - point[1])))\n # point = point * size\n # point = point.astype(np.int16)\n center_x = (point[3] - point[1]) / 2 + point[1]\n center_y = (point[2] - point[0]) / 2 + point[0]\n print('center point:(' + str(center_x) + ',' + str(center_y) + ')')\n cap_map = gaussian_mask(center_x, center_y, sigma=0.75)\n # cap_map = np.pad(cap_map, ([point[0], size - point[2]], [point[1], size - point[3]]), 'constant')\n cap_map_batch[i, 0, :, :] = cap_map\n else:\n # not suitable, choose center gaussian\n cap_map = gaussian_mask(0.5, 0.5, sigma=0.75)\n # cap_map = np.pad(cap_map, ([int(size / 4), int(size / 4)], [int(size / 4), int(size / 4)]), 'constant')\n cap_map_batch[i, 0, :, :] = cap_map\n\n cap_map_batch = torch.from_numpy(cap_map_batch)\n cap_map_batch = cap_map_batch.type(torch.cuda.FloatTensor)\n\n return cap_map_batch","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":12093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"190423400","text":"#Autor: Cecilia Daniela Olivares Hernández, a01745727\r\n#Descripción: Calcula el pago semanal de un trabajador más sus horas extras y muestra su pago total.\r\n\r\n#Esta funcion calcula el pago de las horas normales\r\ndef calcularPagoNormal(horasNormales, horasExtras, pagoHora):\r\n pagoNormal = horasNormales * pagoHora\r\n return pagoNormal\r\n\r\n#Esta funcion calcula el pago de las horas extras\r\ndef calcularPagoExtra(horasNormales, horasExtras, pagoHora):\r\n pagoExtra = (horasExtras * pagoHora) + ((pagoHora * .65)* horasExtras)\r\n return pagoExtra\r\n\r\n#Funcion principal que resuelve el problema\r\ndef main():\r\n horasNormales = int(input(\"Teclea las horas normales trabajadas: \"))\r\n horasExtras = int(input(\"Teclea las horas extras trabajadas: \"))\r\n pagoHora = int(input(\"Teclea el pago por hora: \"))\r\n pagoNormal = calcularPagoNormal(horasNormales, horasExtras, pagoHora)\r\n pagoExtra = calcularPagoExtra(horasNormales, horasExtras, pagoHora)\r\n pagoTotal = pagoNormal + pagoExtra\r\n print(\"\"\"\r\nPago normal: \\x1b[1;30m $%.2f\"\"\" % (pagoNormal))\r\n print(\"\\x1b[0;mPago extra: \\x1b[1;30m $%.2f\" % (pagoExtra))\r\n print(\"-----------------------\")\r\n print(\"\\x1b[0;mPago total: \\x1b[1;30m $%.2f\" % (pagoTotal))\r\n\r\nmain()","sub_path":"Pago.py","file_name":"Pago.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"498078750","text":"import json\nimport sys\n\nfrom acpc_python_client import wrappers\nfrom acpc_python_client.data.action_type import ActionType\nfrom acpc_python_client.data.betting_type import BettingType\n\n_NUMBERS = (int, float)\n\n\ndef wrapper_to_str(wrapper_object, formatted=True, contents_only=False):\n if isinstance(wrapper_object, str):\n return wrapper_object\n elif isinstance(wrapper_object, _NUMBERS):\n return str(wrapper_object)\n elif isinstance(wrapper_object, bool):\n return 'true' if wrapper_object else 'false'\n elif hasattr(wrapper_object, '_length_'):\n # Object is special C array wrapper class\n return '[ %s ]' % ', '.join([wrapper_to_str(e, False, True) for e in wrapper_object])\n else:\n # Object is wrapped structure\n has_contents = hasattr(wrapper_object, 'contents')\n type_fields = wrapper_object.contents._fields_ if has_contents else wrapper_object._fields_\n\n # Create strings containing \"name\": value from fields on the object in json format\n attribute_names = [field[0] for field in type_fields]\n attribute_vals = [getattr(wrapper_object.contents if has_contents else wrapper_object, field[0])\n for field in type_fields]\n attribute_vals_strings = [wrapper_to_str(attr_val, False, True) for attr_val in attribute_vals]\n attribute_strings = ['\"%s\": %s' % attr for attr in zip(attribute_names, attribute_vals_strings)]\n\n # Pretty print it with json module\n json_string = '{ %s }' % ', '.join(attribute_strings)\n if formatted:\n try:\n json_object = json.loads(json_string)\n except:\n print('Unexpected error:', sys.exc_info()[0])\n print('Error while json parsing following json string:')\n print(json_string)\n raise\n json_string = json.dumps(json_object, sort_keys=False, indent=4)\n if contents_only:\n return json_string\n else:\n object_name = \\\n (wrapper_object._type_ if hasattr(wrapper_object, '_type_') else type(wrapper_object)).__name__\n return '%s: %s' % (object_name, json_string)\n\n\ndef action_type_enum_to_int(action_type):\n if action_type == ActionType.FOLD:\n return wrappers.a_fold\n elif action_type == ActionType.CALL:\n return wrappers.a_call\n elif action_type == ActionType.RAISE:\n return wrappers.a_raise\n else:\n raise ValueError('Unknown action type')\n\n\ndef action_type_int_to_enum(action_type_int):\n if action_type_int == wrappers.a_fold:\n return ActionType.FOLD\n elif action_type_int == wrappers.a_call:\n return ActionType.CALL\n elif action_type_int == wrappers.a_raise:\n return ActionType.RAISE\n else:\n raise ValueError('Unknown action type')\n\n\ndef betting_type_int_to_enum(betting_type):\n if betting_type == wrappers.limitBetting:\n return BettingType.LIMIT\n elif betting_type == wrappers.noLimitBetting:\n return BettingType.NO_LIMIT\n else:\n raise ValueError('Unknown betting type')\n","sub_path":"acpc-python-client-master/acpc_python_client/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"44229202","text":"#!/usr/bin/env python3.7\nimport argparse\nimport time \nimport sys\n\nimport logging\nfrom colorlog import ColoredFormatter\nfrom ppadb.client import Client as AdbClient\n\nfrom PIL import Image\nfrom io import BytesIO\nfrom pyocr import pyocr\nfrom pyocr import builders\nimport yaml\n\nfrom multiprocessing import Pool\n\nimport tenacity\n\ndef create_console_handler(verbose_level):\n clog = logging.StreamHandler()\n formatter = ColoredFormatter(\n \"%(log_color)s[%(levelname)-8s%(module)s]%(reset)s \"\n \"%(white)s%(message)s\",\n reset=True,\n log_colors={\n 'DEBUG': 'cyan',\n 'INFO': 'green',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'red',\n })\n clog.setFormatter(formatter)\n if verbose_level == 0:\n clog.setLevel(logging.WARN)\n elif verbose_level == 1:\n clog.setLevel(logging.INFO)\n else:\n clog.setLevel(logging.DEBUG)\n return clog\n\nclass TradeError(Exception):\n \"\"\"Raised when an error is detected\"\"\"\n def __init__(self, arg):\n self.strerror = arg\n self.args = {arg}\n\ndef scrap_screencap(dev_name, img, location):\n crop = img.crop(config[dev_name]['locations'][location])\n return tool.image_to_string(crop).replace(\"\\n\", \" \")\n\ndef tap(dev,location):\n x, y = config[dev.name]['locations'][location]\n dev.shell(\"input tap \" + str(x) + \" \" + str(y))\n logger.info(dev.name + ' | Tap location ' + str(location) + 'succeeded')\n\ndef check_known_errors(dev_name, img):\n errors= [\n (\"error_box\",[\"est trop loin\", \"expiration\", \"inconnue\"])\n ]\n for err_set in errors:\n box, msgs = err_set\n text = scrap_screencap(dev_name, img, box)\n for msg in msgs:\n if text in msg:\n return False\n return True\n\ndef waiting(location):\n time.sleep(config['waits'][location])\n\n@tenacity.retry(wait=tenacity.wait_fixed(5), stop=tenacity.stop_after_attempt(5), reraise=True)\ndef clic_trade(dev):\n logger.info(\"Check and clic on trade button device {}\".format(dev.name))\n img = Image.open(BytesIO(dev.screencap()))\n if 'ECHANGER' in scrap_screencap(dev.name, img,\"trade_button_label\"):\n logger.info(dev.name + ' | TRADE button found')\n tap(dev,'trade_button')\n waiting('trade_button')\n return\n check_known_errors(dev.name, img)\n raise TradeError('Error Clic Trade {}'.format(dev.name))\n\n@tenacity.retry(wait=tenacity.wait_fixed(2), stop=tenacity.stop_after_attempt(5), reraise=True)\ndef select_pokemon(dev):\n search_string = config[dev.name]['search_string']\n logger.info(\"Check device {} Pokemon selection screen\".format(dev.name))\n img = Image.open(BytesIO(dev.screencap()))\n if 'POKEMON' in scrap_screencap(dev.name, img,\"pokemon_to_trade_box\"):\n logger.info(dev.name + ' | Selection screen found')\n tap(dev,'search_button')\n waiting('location')\n dev.shell(\"input text \" + search_string)\n # tap 2 times the pokemon, once to get of keyboard entry, 2nd to select pokemon\n tap(dev,'first_pokemon')\n waiting('first_pokemon')\n tap(dev,'first_pokemon')\n return\n raise TradeError('Select Pokemon failed on {}'.format(dev.name))\n\n@tenacity.retry(wait=tenacity.wait_fixed(2), stop=tenacity.stop_after_attempt(5), reraise=True)\ndef check_screen(dev):\n name_check = config[dev.name]['name_check']\n logger.info(\"Check device {} NEXT screen\".format(dev.name))\n img = Image.open(BytesIO(dev.screencap()))\n if 'SUIVANT' in scrap_screencap(dev.name, img,\"next_button_box\"):\n logger.info(dev.name + ' | Next screen found')\n #if name_check not in scrap_screencap(dev.name, img,\"name_at_next_screen_box\"):\n # raise namecheckfail\n tap(dev,'next_button')\n return\n raise TradeError('Select Pokemon failed on {}'.format(dev.name))\n\n@tenacity.retry(wait=tenacity.wait_fixed(2), stop=tenacity.stop_after_attempt(5), reraise=True)\ndef confirm_screen(dev):\n logger.info(\"Check device {} CONFIRM screen\".format(dev.name))\n img = Image.open(BytesIO(dev.screencap()))\n if 'CONFIRMER' in scrap_screencap(dev.name, img,\"confirm_button_box\"):\n logger.debug(dev.name + ' | Confirm screen found, doing checks')\n if ( '100' in scrap_screencap(dev.name, img,\"trade_star\") and\n config[dev.name]['search_string'] in scrap_screencap(dev.name, img,\"trade_name_box\")\n ):\n tap(dev,'confirm_button')\n return\n else:\n logger.warning(dev.name + ' | Confirm checks failed')\n raise TradeError('Confirm screen failed on {}'.format(dev.name))\n\n@tenacity.retry(wait=tenacity.wait_fixed(2), stop=tenacity.stop_after_attempt(5), reraise=True)\ndef trade_end(dev):\n logger.info(\"Check device {} trade ended\".format(dev.name))\n img = Image.open(BytesIO(dev.screencap()))\n weight_text = str(scrap_screencap(dev.name, img,\"weight_box\"))\n logger.debug('scap_weight: {}'.format(weight_text))\n if 'POIDS' in weight_text:\n logger.info(dev.name + ' | traded pokemon screen found')\n tap(dev,'close_pokemon_button')\n return\n weight_text = str(scrap_screencap(dev.name, img,\"weight_box_lucky\"))\n logger.debug('lucky scap_weight: {}'.format(weight_text))\n if 'POIDS' in weight_text:\n logger.warning('LUCKY Pokemon !!')\n logger.info(dev.name + ' | traded pokemon screen found')\n tap(dev,'close_pokemon_button')\n return\n raise TradeError('Trade ending failed on {}'.format(dev.name))\n\ndef do_trade(num, p):\n try:\n p.map(clic_trade, [dev_id1,dev_id2])\n p.map(select_pokemon, [dev_id1,dev_id2])\n waiting('next_button')\n\n p.map(check_screen, [dev_id1,dev_id2])\n waiting('confirm_button')\n\n p.map(confirm_screen, [dev_id1,dev_id2])\n waiting('trade_anim')\n\n p.map(trade_end, [dev_id1,dev_id2])\n waiting('trade_ends')\n\n except Exception as e:\n logger.error(\"ERROR: Canceling trade:\" + str(e))\n return False\n\n return True\n \n\nif __name__ == '__main__':\n # get params from command line\n parser = argparse.ArgumentParser(description='Pokemon GO trader')\n parser.add_argument('--config', type=str, default='config.yaml',\n help=\"Config file location.\")\n parser.add_argument('--stop-after', default=1, type=int,\n help='Stop after exchanging pokemon')\n args = parser.parse_args()\n\n # load params from config file\n with open(args.config, \"r\") as f:\n config = yaml.safe_load(f)\n tools = pyocr.get_available_tools()\n tool = tools[0]\n\n # magic number for randomizing crop \n i = 2\n\n verbose_level=1\n logger = logging.getLogger()\n if verbose_level > 0:\n logger.addHandler(create_console_handler(verbose_level))\n logger.setLevel(logging.DEBUG)\n\n # Connecting on local adb server\n try:\n client = AdbClient(host=\"127.0.0.1\", port=5037)\n except:\n logger.error(\"Unable to connect to adb server\")\n logger.error(\"Please check your configuration and run ``adb start-server''\")\n sys.exit(1)\n \n if len(client.devices()) < 2:\n logger.error(\"This program needs 2 phones connected with ADB\")\n \n # instanciate 2 pogo games\n dev_id1 = client.device(config['app1']['device_id'])\n dev_id1.name = 'app1'\n dev_id2 = client.device(config['app2']['device_id'])\n dev_id2.name = 'app2'\n\n\n # trading\n p = Pool(2)\n for trade in range(args.stop_after):\n logger.warning(\"Trade num {}/{} engaged\".format(str(trade+1),str(args.stop_after)))\n if not do_trade(trade, p):\n sys.exit(0)\n\n","sub_path":"trade.py","file_name":"trade.py","file_ext":"py","file_size_in_byte":7721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"393246918","text":"#!/usr/bin/env python3\r\n#\r\n\r\n\r\n'''\r\n绑定回调,在task执行完成的时候可以获取执行的结果,回调的最后一个参数是future对象,通过该对象可以获取协程返回值。\r\n'''\r\n\r\nimport time\r\nimport asyncio\r\nimport functools\r\n\r\nnow = lambda: time.time()\r\n\r\n\r\nasync def do_some_work(x):\r\n print('waiting: ', x)\r\n return 'Done after {}s'.format(x)\r\n\r\n\r\n'''\r\n通过add_done_callback方法给task任务添加回调函数,当task(也可以说是coroutine)执行完成的时候,就会调用回调函数。\r\n并通过参数future获取协程执行的结果。这里我们创建 的task和回调里的future对象实际上是同一个对象\r\nwhat callback do is reading the task.result(), task is the subclass of Future. \r\n'''\r\n\r\n\r\n# callback without argv\r\ndef callback(future: asyncio.Future):\r\n print('callback: ', future.result())\r\n\r\n\r\nstart = now()\r\ncoroutine = do_some_work(2)\r\nloop = asyncio.get_event_loop()\r\ntask = asyncio.ensure_future(coroutine)\r\nprint(\"\\ncallback without argv:\")\r\nprint(task)\r\ntask.add_done_callback(callback)\r\nprint(task)\r\nloop.run_until_complete(task)\r\nprint('time:', now() - start)\r\n\r\n\r\n# callback with argv\r\ndef callback(argv, future: asyncio.Future):\r\n print('argv: {}, callback: {}'.format(argv, future.result()))\r\n\r\n\r\nstart = now()\r\ncoroutine = do_some_work(2)\r\nloop = asyncio.get_event_loop()\r\ntask = asyncio.ensure_future(coroutine)\r\nprint(\"\\ncallback with argv:\")\r\nprint(task)\r\ntask.add_done_callback(functools.partial(callback, 123))\r\nprint(task)\r\nloop.run_until_complete(task)\r\nprint('time:', now() - start)\r\n\r\nloop.close()\r\n","sub_path":"basic_/asyncio_/03_asyncio_callback.py","file_name":"03_asyncio_callback.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"119948999","text":"\n\nimport json\nimport qiskit as q\nimport qiskit_aqua\nfrom math import ceil, log2\nfrom typing import Dict, Tuple\n\nfrom .grover import Grover\nfrom .simulator import Simulator\nfrom .load_graph import load_graph\nfrom .get_logger import get_logger\n\nfrom .encode_graph import *\n\n\nlogger = get_logger(__name__) \n\n\nclass EdgeFinder():\n\n def __init__(self):\n logger.info(\"#\"*100)\n self.__name__ = str(self.__class__).split(\".\")[-1][:-2]\n logger.info(\"Starting workflow of %s\"%(self.__name__))\n self.graph = load_graph()\n self.n_of_qbits = ceil(log2(max((e[0] for e in self.graph))))\n logger.info(\"To encode a vertex we need %d qbits \"%self.n_of_qbits)\n self._setup_registers()\n self._setup_circuit()\n\n def _setup_registers(self):\n\n # Quantum registers\n self.start = q.QuantumRegister(self.n_of_qbits, name=\"start\")\n self.end = q.QuantumRegister(self.n_of_qbits, name =\"end\")\n self.weight= q.QuantumRegister(self.n_of_qbits, name =\"weight\")\n self.flags = q.QuantumRegister(3, name =\"flags\")\n\n self.ancillas_dim = (3 * self.n_of_qbits) - 1\n self.ancillas = q.QuantumRegister(self.ancillas_dim, name=\"ancillas\")\n\n self.qregisters = [self.start, self.end, self.weight, self.flags, self.ancillas]\n\n logger.info(\"The simulation needs {} qbits\".format(sum((register.size for register in self.qregisters))))\n\n # Setup the classical registers to save the result of the measurements\n self.classical_start = q.ClassicalRegister(self.n_of_qbits, 'classical_start')\n self.classical_end = q.ClassicalRegister(self.n_of_qbits, 'classical_end')\n self.classical_weight= q.ClassicalRegister(self.n_of_qbits, 'classical_weight')\n self.classical_flags = q.ClassicalRegister(3, 'classical_flags')\n \n self.cregisters = [self.classical_start, self.classical_end, self.classical_weight, self.classical_flags]\n\n\n def _setup_circuit(self):\n self.circuit = q.QuantumCircuit(*self.qregisters)\n\n def _add_measure_gates(self):\n self.circuit.add(*self.cregisters)\n logger.info(\"Adding measure gates\")\n for q, c in zip(self.qregisters, self.cregisters):\n self.circuit.measure(q, c)\n\n def initialize_circuit(self):\n \"\"\"Prepare the initial superposition\"\"\"\n for register in [self.start, self.end, self.weight]:\n self.circuit.h(register)\n\n\n def setup_oracle(self):\n self.oracle = None\n raise NotImplementedError(\"This method is met to be overvritten by a subclass therfore is not callable.\")\n\n def get_MLE(self, counts : Dict[int, int]) -> Tuple[int, int, int]:\n \"\"\"Find the most frequent result with the flags setted to 1 (MLE)\"\"\"\n logger.info(\"Finding the Most Likley result\")\n \n # Convert it to a list\n result_list = [list(reversed(encoding.split(\" \"))) + [times] for encoding, times in counts.items()]\n\n result_list.sort(key=lambda x: x[-1], reverse=False)\n\n logger.debug(\"The results are: \")\n\n result_list = [(int(values[0],base=2),int(values[1],base=2),int(values[2],base=2),int(values[3],base=2),int(values[5])) for values in result_list]\n\n for values in result_list:\n logger.debug(\"\\t{:d} -> {:d} w: {} flags: {:02b} times: {:d}\".format(*values))\n\n MLE = max(result_list,key=lambda x: x[-1]) # if x[-2] == 1 else 0\n\n logger.info(\"The Most Likley result is {} -> {} w: {} flags: {:02b} times: {}\".format(*MLE))\n return MLE\n\n def run(self, n_of_shots : int = 100*(2**7), local : bool = False):\n\n self.initialize_circuit()\n\n self.setup_oracle()\n oracle_circuit_path = self.__name__ + \"_oracle.qasm\"\n logger.info(\"Saving the oracle circuit to %s\"%oracle_circuit_path)\n with open(oracle_circuit_path, \"w\") as f:\n f.write(self.oracle.qasm())\n\n image_path = self.__name__ + \".png\"\n logger.info(\"Saving the oracle as an image at %s\"%(image_path))\n self.oracle.draw(filename=image_path, output=\"mpl\")\n\n logger.info(\"Checking if the oracle is correct:\")\n simulator = Simulator()\n oracle_sym_results = simulator.symbolic_simulation(self.circuit + self.oracle)\n\n oracle_sym_path = self.__name__ + \"_oracle_sym.log\"\n logger.info(\"Saving the oracle symbolic symulation at %s\"%(oracle_sym_path))\n with open(oracle_sym_path, \"w\") as f:\n for value in oracle_sym_results:\n f.write(value + \"\\n\")\n\n self.circuit = Grover([self.start, self.end, self.flags], self.ancillas).run(self.circuit, self.oracle, number_of_expected_results=4)\n\n self._add_measure_gates()\n\n logger.info(\"The final circuit has {} gates and is {} depth.\".format(self.circuit.size, self.circuit.depth))\n\n circuit_path = self.__name__ + \".qasm\"\n logger.info(\"Saving the circuit to %s\"%circuit_path)\n with open(circuit_path, \"w\") as f:\n f.write(self.circuit.qasm())\n\n logger.info(\"Starting a batch of %d simulations.\"%n_of_shots)\n\n if local:\n results = simulator.parallel_simulation(self.circuit, n_of_shots)\n else:\n results = simulator.distribuited_simulation(self.circuit, n_of_shots, [(\"::1\",10000,4,False), (\"169.254.75.69\",10001,8,True)])\n\n results_path = self.__name__ + \".json\"\n logger.info(\"Saving the results to %s\"%results_path)\n simulator.save_results(results_path)\n\n return self.get_MLE(results)\n","sub_path":"Minimum_Spanning_Tree/qiskit_implementation/weight_encoding/edge_finder/edge_finder.py","file_name":"edge_finder.py","file_ext":"py","file_size_in_byte":5553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"96712061","text":"from setuptools import setup, Extension\n\nimport os\nimport sys\nimport setuptools\nimport glob\n\n__version__ = '0.2.0'\n\nextra_compile_args_dict = {\n 'linux' : ['-w', '-ftemplate-backtrace-limit=0', '-std=c++14'],\n 'linux2' : ['-w', '-ftemplate-backtrace-limit=0', '-std=c++14'],\n 'darwin' : ['-w', '-ftemplate-backtrace-limit=0', '-std=c++14', '-stdlib=libc++'],\n}\n\next_modules = [\n Extension(\n \"_sparsepy\",\n glob.glob('src/*.cpp'),\n include_dirs = ['lib/parallel-hashmap', 'lib/pybind11/include', 'lib/cereal/include'],\n language = 'c++',\n extra_compile_args = extra_compile_args_dict[sys.platform],\n extra_link_args = ['-lz'],\n define_macros = [('DOCTEST_CONFIG_DISABLE', None)]\n )\n]\n\nsetup(\n name = 'sparsepy',\n version = __version__,\n author = 'Adam Moyer',\n author_email = 'atom.moyer@gmail.com',\n url = None,\n description = 'A Fast and Memory Efficient Hash Map for Python',\n packages = ['sparsepy'],\n package_dir={'sparsepy': 'sparsepy'},\n package_data={},\n ext_modules = ext_modules,\n install_requires = ['pytest', 'pytest-timeout', 'pytest-memprof', 'pybind11'],\n include_package_data=True,\n zip_safe = False,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"372926632","text":"class Solution:\n def fill(self, image: List[List[int]], sr: int, sc: int, newColor: int) -> List[List[int]]:\n neighbours=[[0,-1],[-1,0],[0,1],[1,0]]\n prevColor=image[sr][sc]\n image[sr][sc]=newColor\n cell=[sr,sc]\n for i in neighbours:\n r=cell[0]+i[0]\n c=cell[1]+i[1]\n if(r>=0 and r=0 and c List[List[int]]:\n if(image[sr][sc]==newColor):\n return image\n self.fill(image,sr,sc,newColor)\n return image\n \n \n \n","sub_path":"FloodFill.py","file_name":"FloodFill.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"450136482","text":"#!/usr/bin/python\nimport logging\nimport os.path\nimport sys\nfrom optparse import OptionParser\nimport pdb\n\n\npathname = os.path.dirname(sys.argv[0])\nsys.path.append(pathname)\n\nimport callMPsfunctions\n\n\n \ninOptions = OptionParser()\ninOptions.add_option(\"-i\", \"--vcffile\", dest=\"bsFile\", help=\"VCF file for methylation calls\", type=\"string\")\ninOptions.add_option(\"-o\", \"--output\", dest=\"outFile\", help=\"Output file with the probability scores\", type=\"string\")\ninOptions.add_option(\"-v\", \"--verbose\", dest=\"logDebug\", help=\"show verbose debugging output\", action=\"store_true\", default=False)\n(options, args) = inOptions.parse_args()\n\n\n\ncallMPsfunctions.setLog(options.logDebug)\n\n\nif not options.bsFile:\n callMPsfunctions.die(\"input file not given!\")\nif not options.outFile:\n callMPsfunctions.die(\"output file not given!\")\nif not os.path.isfile(options.bsFile):\n callMPsfunctions.die(\"input bs vcf file does not exist: \" + options.bsFile)\n\n\ncallMPsfunctions.getMPsVCF(options.bsFile, options.outFile, options.logDebug)\n\n\n\n\n\n\n\n","sub_path":"004.bsseq_pipeline/002.callMPs/01.callMPs_fromVCF.py","file_name":"01.callMPs_fromVCF.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"}
+{"seq_id":"136328484","text":"from matplotlib import pyplot as plt\r\nimport numpy as np\r\n\r\n\r\ndef plot_rectangles(long_x, x_labels, resources, y_label, title, legends, save_direction=None):\r\n fig, ax = plt.subplots()\r\n l_s = len(resources)\r\n width = ((100/l_s)/100)-0.005 # el -0.005 es un ajuste para que las barras no se peguen\r\n ind = np.arange(long_x)\r\n rects = []\r\n w = 0\r\n colors = ['r', 'g', 'b', 'y', 'c', 'm']\r\n for i in range(l_s):\r\n rects.append(ax.bar(ind + w, resources[i], width, color=colors[i]))\r\n w += width\r\n ax.set_ylabel(y_label)\r\n ax.set_title(title)\r\n ax.set_xticks((ind + (width*l_s)/2))\r\n ax.set_xticklabels(x_labels)\r\n lab = []\r\n for c in rects:\r\n lab.append(c[0])\r\n ax.legend((lab), legends)\r\n\r\n def autolabel(rect):\r\n # attach some text labels\r\n for r in rect:\r\n height = r.get_height()\r\n ax.text(r.get_x() + r.get_width() / 2., 0.05 * height,\r\n '%.3f' % height,\r\n ha='center', va='bottom')\r\n\r\n for rec in rects:\r\n autolabel(rec)\r\n plt.show() if save_direction is None else plt.savefig(save_direction+'/means.png')\r\n\r\n\r\ndef plot_single_rec(long_x, x_labels, resources, y_label, title):\r\n fig, ax = plt.subplots()\r\n width = 0.25\r\n ind = np.arange(long_x)\r\n rects = ax.bar(ind, resources, width, color='r')\r\n ax.set_ylabel(y_label)\r\n ax.set_title(title)\r\n ax.set_xticks(ind + width)\r\n ax.set_xticklabels(x_labels)\r\n\r\n def autolabel(rect):\r\n # attach some text labels\r\n for r in rect:\r\n height = r.get_height()\r\n ax.text(r.get_x() + r.get_width() / 2., 1.05 * height,\r\n '%i' % height,\r\n ha='center', va='bottom')\r\n\r\n autolabel(rects)\r\n plt.show()","sub_path":"AnalysisCV/auxiliar/Plotters.py","file_name":"Plotters.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"}
+{"seq_id":"67849664","text":"from basehandler import BaseHandler\nfrom storybooklib import MAX_PLAYERS\n\nclass WaitingToStart(BaseHandler):\n def get(self):\n if not self.user:\n self.render(u'login_screen')\n else:\n game_id = self.request.get('game_id')\n user_id = self.user.user_id\n self.render(u'waiting_to_start', game_id=game_id, MAX_PLAYERS=MAX_PLAYERS, user_id=user_id)\n\n return\n","sub_path":"waitingtostart.py","file_name":"waitingtostart.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"}
+{"seq_id":"35007009","text":"class Phone():\n def __init__(self,brand,color,phone_number):\n self.brand=brand\n self.color=color\n self.number=phone_number\n self.on=False\n self.sended_messages=0\n def turn_on(self):\n self.on=True\n def turn_off(self):\n self.on=False\n def send_sms(self,other_number,sms):\n if self.on and len(other_number)>=9:\n print(\"Wiadomość została wysłana\")\n self.sended_messages+=1\n else:\n print(\"Włącz telefon lub wprowadź poprawny numer telefonu\") \n def __str__(self):\n if self.on:\n kom=\"włączony\"\n else:\n kom=\"wyłączony\" \n return (f\"Telefon marki {self.brand} o numerze {self.number} jest {kom} , wysłano {self.sended_messages} SMS \\n\") \ntel1=Phone(\"Samsung\",\"white\",\"123456789\") \ntel1.turn_on()\ntel1.send_sms(\"789456123\",\"Hi\")\nprint(tel1) \ntel1.turn_off()\ntel1=Phone(\"Apple\",\"black\",\"789456123\") \ntel1.turn_on()\ntel1.send_sms(\"78945\",\"Hi\")\nprint(tel1) \ntel1.turn_off()\n \n","sub_path":"07-ObjectOrientedProgramming/14_phone.py","file_name":"14_phone.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"}
+{"seq_id":"298800611","text":"import numpy as np\nimport torch\nfrom ax.service.managed_loop import optimize\n\nfrom model import AutoEncoder\nfrom main import load_dataset\n\ndef train(dataloader, parameters, device):\n model = AutoEncoder(input_dim=1900, nlayers=parameters.get('nlayers', 5), latent=100)\n model = model.to(device)\n\n model.train()\n train_loss = 0\n\n optimizer = torch.optim.Adam(model.parameters(), lr=parameters.get('lr', 1e-5), \n weight_decay=parameters.get('weight_decay', 0.))\n loss_func = torch.nn.MSELoss()\n\n for epoch in range(parameters.get('epochs', 1000)):\n for index, (data, ) in enumerate(dataloader, 1):\n optimizer.zero_grad()\n output = model(data)\n loss = loss_func(output, data)\n train_loss += loss.item()\n loss.backward()\n optimizer.step()\n\n return model\n\ndef test(dataloader, model):\n model.eval()\n test_loss = 0\n\n loss_func = torch.nn.MSELoss()\n\n for index, (data, ) in enumerate(dataloader, 1):\n with torch.no_grad():\n output = model(data)\n loss = loss_func(output, data)\n test_loss += loss.item()\n\n return test_loss / index\n\ndef train_test(parameterization):\n dtype = torch.float\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n batch_size = 100\n train_dataloader, test_dataloader = load_dataset('data/aponc_sda.npz', batch_size, device)\n\n net = train(train_dataloader, parameterization, device)\n\n return test(test_dataloader, net)\n\ndef tune():\n best_parameters, values, experiment, model = optimize(\n parameters=[\n {'name': 'lr', 'type': 'range', 'bounds': [1e-6, 0.4], 'log_scale': True},\n {'name': 'weight_decay', 'type': 'range', 'bounds': [0.0, 1.0], 'log_scale': False},\n {'name': 'nlayers', 'type': 'range', 'bounds': [2, 6], 'log_scale': False},\n #{'name': 'momentum', 'type': 'range', 'bounds': [0.0, 1.0]},\n ],\n evaluation_function=train_test,\n objective_name='mse_loss',\n )\n\n print(best_parameters)\n print('means, covariances', values)\n\n return experiment\n\ndef best(experiment):\n df = experiment.fetch_data().df\n best_arm_name = df.arm_name[df['mean'] == df['mean'].min()].values[0]\n best_arm = experiment.arms_by_name[best_arm_name]\n\n print(best_arm)\n\n dtype = torch.float\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n batch_size = 100\n train_dataloader, test_dataloader = load_dataset('data/aponc_sda.npz', batch_size, device)\n\n combined_train_test_set = torch.utils.data.ConcatDataset([\n train_dataloader.dataset, \n test_dataloader.dataset,\n ])\n\n combined_train_test_loader = torch.utils.data.DataLoader(\n combined_train_test_set, \n batch_size=batch_size, \n shuffle=True,\n )\n\n net = train(train_dataloader, best_arm.parameters, device)\n\n test_mse_loss = test(test_dataloader, net)\n\n print('MSE loss (test set): %f' % (test_mse_loss))\n\ndef main():\n torch.manual_seed(123)\n\n experiment = tune()\n best(experiment)\n\nif __name__ == '__main__':\n main()\n","sub_path":"tune.py","file_name":"tune.py","file_ext":"py","file_size_in_byte":3161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"}
+{"seq_id":"407409006","text":"\"\"\"Each ListNode holds a reference to its previous node\nas well as its next node in the List.\"\"\"\nclass ListNode:\n def __init__(self, value, prev=None, next=None):\n self.value = value\n self.prev = prev\n self.next = next\n\n \"\"\"Wrap the given value in a ListNode and insert it\n after this node. Note that this node could already\n have a next node it is point to.\"\"\"\n def insert_after(self, value):\n current_next = self.next\n self.next = ListNode(value, self, current_next)\n if current_next:\n current_next.prev = self.next\n\n \"\"\"Wrap the given value in a ListNode and insert it\n before this node. Note that this node could already\n have a previous node it is point to.\"\"\"\n def insert_before(self, value):\n current_prev = self.prev\n self.prev = ListNode(value, current_prev, self)\n if current_prev:\n current_prev.next = self.prev\n\n \"\"\"Rearranges this ListNode's previous and next pointers\n accordingly, effectively deleting this ListNode.\"\"\"\n def delete(self):\n if self.prev:\n self.prev.next = self.next\n if self.next:\n self.next.prev = self.prev\n\n\n\"\"\"Our doubly-linked list class. It holds references to\nthe list's head and tail nodes.\"\"\"\nclass DoublyLinkedList:\n def __init__(self, node=None):\n self.head = node\n self.tail = node\n self.length = 1 if node is not None else 0\n\n def __len__(self):\n return self.length\n\n \"\"\"Wraps the given value in a ListNode and inserts it \n as the new head of the list. Don't forget to handle \n the old head node's previous pointer accordingly.\"\"\"\n def add_to_head(self, value):\n # creating node\n new_node = ListNode(value)\n # add value reflecting # of items in LL\n self.length += 1\n # link node to head if list is empty. Becomes both head and tail\n if not self.head and not self.tail:\n self.head = new_node\n self.tail = new_node\n # list is populated\n else:\n # update the locations of head and tail\n # current head is being linked to new_head\n new_node.next = self.head\n # updating old head to have a prev link\n self.head.prev = new_node\n # updating new head to new-node\n self.head = new_node\n\n \"\"\"Removes the List's current head node, making the\n current head's next node the new head of the List.\n Returns the value of the removed Node.\"\"\"\n def remove_from_head(self):\n # store value before we delete the node, so it can be returned\n value = self.head.value\n # delete head\n self.delete(self.head)\n # return value of deleted node\n return value\n\n \"\"\"Wraps the given value in a ListNode and inserts it \n as the new tail of the list. Don't forget to handle \n the old tail node's next pointer accordingly.\"\"\"\n def add_to_tail(self, value):\n new_node = ListNode(value)\n # add value reflecting # of items in LL\n self.length += 1\n # link node to head if list is empty. Becomes both head and tail\n if not self.head and not self.tail:\n self.head = new_node\n self.tail = new_node\n # list is populated\n else:\n # update the locations of head and tail\n # opposite of add_new_head\n # the prev-prop of new node will be linking to previous tail\n new_node.prev = self.tail\n # current tail's next-prop will link to new node being added\n self.tail.next = new_node\n # updating new tail to new-node\n self.tail = new_node\n\n \"\"\"Removes the List's current tail node, making the \n current tail's previous node the new tail of the List.\n Returns the value of the removed Node.\"\"\"\n def remove_from_tail(self):\n # opposite of remove from head\n value = self.tail.value\n self.delete(self.tail)\n return value\n\n \"\"\"Removes the input node from its current spot in the \n List and inserts it as the new head node of the List.\"\"\"\n def move_to_front(self, node):\n # self refers to list\n # node is value passed in as arg\n # -----\n # don't perform if node already is head\n if node is self.head:\n return\n # store node value so is safe to delete and available for adding to head\n value = node.value\n # delete node from current location\n self.delete(node)\n # add to head\n self.add_to_head(value)\n\n \"\"\"Removes the input node from its current spot in the \n List and inserts it as the new tail node of the List.\"\"\"\n def move_to_end(self, node):\n # opposite of move_to_front \n if node is self.tail:\n return\n value = node.value\n self.delete(node)\n self.add_to_tail(value)\n\n \"\"\"Removes a node from the list and handles cases where\n the node was the head or the tail\"\"\"\n def delete(self, node):\n # TODO: Catch errors if list is empty or node is not in list\n\n # assuming node is in list\n # reduce # of items in DLL\n self.length -= 1\n # if head & tail, ass\n if self.head is self.tail:\n self.head = None\n self.tail = None\n # if head\n elif node is self.head:\n # next node after head becomes new head\n self.head = self.head.next\n node.delete()\n\n # if tail\n # opposite of head\n elif node is self.tail: \n self.tail = self.tail.prev\n node.delete()\n\n # if regular node, call existing delete function\n else:\n node.delete()\n \"\"\"Returns the highest value currently in the list\"\"\"\n def get_max(self):\n # start at head\n if not self.head:\n return None\n # store the head.value in max_val\n max_value = self.head.value\n # create a var for iteration, beginning with first value (head node)\n current = self.head\n # iterate through each node and compare\n while current:\n if current.value > max_value:\n max_value = current.value\n current = current.next\n\n return max_value\n \n # if i > max_val, max_val = i\n # return max_val\n\n\n","sub_path":"doubly_linked_list/doubly_linked_list.py","file_name":"doubly_linked_list.py","file_ext":"py","file_size_in_byte":6388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"236785941","text":"\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import pyplot as plt \n\nimport sys\nimport time\nimport os \nimport pathlib\nimport pickle\n\nCWD = pathlib.Path.cwd()\n\nsys.path.append('../../ml4seas/')\n\n# scipy \nimport numpy as np \nimport pandas as pd \nimport xarray as xr\n\n# tensorflow and keras \nimport tensorflow as tf\n\ntf.debugging.set_log_device_placement(True)\n\nimport tensorflow.keras as keras\nfrom tensorflow.keras.layers import *\nfrom tensorflow.keras.models import Model\nimport tensorflow.keras.backend as K\n\n# import NN utilities \nfrom NN import *\n\n# checks if the GPU is available \nif len(tf.config.list_physical_devices('GPU')) >= 1: \n compute = 'GPU'\nelse: \n compute = 'CPU'\n\nprint(f\"using the {compute}\")\n\n# ----------------------------------------------------------------------------\n# define parameters here \nbatch_size=32\npadd = 8\ninput_shape = (181, 360, 1) # last for the number of channels \nresize_shape = (176, 360) # to be evenly divided by the padd\nn_epochs = 20 # number of epochs \ndpath = pathlib.Path('/media/nicolasf/END19101/data/GCMs/processed/hindcasts/CDS/ECMWF/T2M/')\n# ----------------------------------------------------------------------------\n\n### list the files \nlfiles = list(dpath.glob(\"ECMWF_T2M_seasonal_anomalies_????_??.nc\"))\nlfiles.sort()\n\n# opens the dataset \ndset = xr.open_mfdataset(lfiles, concat_dim='time', combine='nested', parallel=True)\n\n### selects the training set \ndset_train = dset.sel(time=slice('1993','2010'))\n\n### selects the validation set \ndset_val = dset.sel(time=slice('2011',None))\n\n### select the correct lead time (3 = e.g. SON for A initialisation)\ndset_train = dset_train[['t2m']].sel(step=3)\ndset_val = dset_val[['t2m']].sel(step=3)\n\ndset_train = dset_train.stack(instance=('time','member'))\ndset_val = dset_val.stack(instance=('time','member'))\n\n### get the repeated datetimes (will be useful to sample repeatedly in Yds)\n\nrdatetimes_train = dset_train.indexes[\"instance\"].get_level_values(0)\nrdatetimes_val = dset_val.indexes[\"instance\"].get_level_values(0)\n\n# transpose \ndset_train = dset_train.transpose('instance','lat','lon')\ndset_val = dset_val.transpose('instance','lat','lon')\n\n### Generate data for tensorflow \ndata_train = XrDataGenerator(dset_train, dset_train, {'t2m':None}, 't2m', norm=True, batch_size=batch_size, mean=None, std=None, shuffle=True, load=False)\ndata_val = XrDataGenerator(dset_val, dset_val, {'t2m':None}, 't2m', norm=True, batch_size=batch_size, mean=data_train.mean, std=data_train.std, shuffle=True, load=False)\n\n# ----------------------------------------------------------------------------\n### build the model \n\n# encoder \n\n# Input placeholder\noriginal = Input(shape=input_shape)\n\n# Resize to have dimensions divisible by 8\nresized = ResizeLayer(newsize=(176,360))(original)\n\n# # Wrap-around in longitude for periodic boundary conditions\n\npadded = PeriodicPadding2D(padd)(resized)\n\n# Encoding layers\nx = Conv2D(16, (3, 3), padding='same')(padded)\nx = LeakyReLU()(x)\nx = MaxPooling2D((2, 2), padding='same')(x)\nx = Conv2D(8, (3, 3), padding='same')(x)\nx = LeakyReLU()(x)\nx = MaxPooling2D((2, 2), padding='same')(x)\nx = Conv2D(8, (3, 3), padding='same')(x)\nx = LeakyReLU()(x)\n\nencoded = MaxPooling2D((2, 2), padding='same')(x)\n\n### decoder \n\n# Decoding layers\nx = Conv2D(8, (3, 3), padding='same')(encoded)\nx = LeakyReLU()(x)\nx = UpSampling2D((2, 2))(x)\nx = Conv2D(8, (3, 3), padding='same')(x)\nx = LeakyReLU()(x)\nx = UpSampling2D((2, 2))(x)\nx = Conv2D(16, (3, 3), padding='same')(x)\nx = LeakyReLU()(x)\nx = UpSampling2D((2, 2))(x)\ndecoded = Conv2D(1, (3, 3), padding='same')(x)\n\n# Strip the longitude wrap-around\npruned = PrunePeriodicPadding2D(padd)(decoded)\n\noutsize = ResizeLayer(newsize=input_shape[:2])(pruned)\n\nautoencoder = Model(original,outsize)\n\n### run ID \nrun_id = time.strftime(\"run_%Y_%m_%d-%H_%M_%S\")\n\n### ----------------------------------------------------------------------------\n### callbacks \n\n# checkpoints \n\ncheckpoint_cb = keras.callbacks.ModelCheckpoint(f\"./autoencoder_checkpoint_{run_id}_{compute}.h5\", save_best_only=True)\n\n# early stopping \n\nearly_stopping_cb = keras.callbacks.EarlyStopping(patience=10, restore_best_weights=True)\n\nroot_logdir = os.path.join(os.curdir, \"my_logs\")\n\ndef get_run_logdir(run_id):\n return os.path.join(root_logdir, run_id)\n\nrun_logdir = get_run_logdir(run_id)\n\nkeras.backend.clear_session()\nnp.random.seed(42)\ntf.random.set_seed(42)\n\n# tensorboard callback \ntensorboard_cb = keras.callbacks.TensorBoard(run_logdir, profile_batch=0)\n\n### ---------------------------------------------------------------------------------------------------------------------\n### compile \n\nautoencoder.compile(optimizer='adam', loss='mean_squared_error')\n\nautoencoder.summary(line_length=120)\n\n### ---------------------------------------------------------------------------------------------------------------------\n### fit \n\n# ### Note: run tensorboard with: \n# \n# ```\n# tensorboard --logdir=./my_logs --port=6006\n# ```\n\n\nhistory = autoencoder.fit(data_train, validation_data=data_val, epochs=n_epochs, callbacks=[checkpoint_cb, early_stopping_cb, tensorboard_cb])\n\n### ---------------------------------------------------------------------------------------------------------------------\n### save model \n\nsaved_model = CWD / f\"saved_autoencoder_{run_id}_{n_epochs}_epochs_{compute}\" \n\nkeras.models.save_model(autoencoder, saved_model)\n\n### save history \nsaved_history = CWD / f\"saved_history_{run_id}_{n_epochs}_epochs_{compute}.pkl\" \n\npickle.dump(autoencoder.history.history, open(saved_history, \"wb\"))\n\n### ---------------------------------------------------------------------------------------------------------------------\n### Some plots \n\ni = 10\n\nX = data_val[0][0][i:i+1,:,:,:]\n\npred = autoencoder.predict(X)\n\npred = pred.squeeze()\n\nf, axes = plt.subplots(nrows=2, figsize=(10,16))\n\naxes = axes.flatten() \n\nax = axes[0]\n\nim = ax.imshow(data_val[0][0][i,::-1,:,0], vmin=-5, vmax=5, cmap=plt.cm.RdBu_r)\n\nax = axes[1]\n\nim = ax.imshow(pred[::-1,:], vmin=-5, vmax=5, cmap=plt.cm.RdBu_r) \n\nf.savefig(f'./preds_vs_inputs_{run_id}.png', dpi=200, bbox_inches='tight')\n\nf, ax = plt.subplots()\npd.DataFrame(history.history).plot(ax=ax, marker='o')\nax.grid(ls=':')\n\nf.savefig(f'./history_{run_id}.png', dpi=200, bbox_inches='tight')","sub_path":"features-extractors/dimensionality_reduction/CONV_AE/Convolutional_AE.py","file_name":"Convolutional_AE.py","file_ext":"py","file_size_in_byte":6292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"78666593","text":"# -*- coding: utf-8 -*-\nfrom openerp import api, fields, models\n\nclass WebsiteSupportTicketInheritTimesheets(models.Model):\n\n _inherit = \"website.support.ticket\"\n \n timesheet_ids = fields.One2many('website.support.ticket.timesheet', 'wst_id', string=\"Timesheet\")\n\n @api.multi\n def invoice_client(self):\n self.ensure_one()\n\n invoiced_state = self.env['ir.model.data'].sudo().get_object('website_support_timesheets', 'website_ticket_state_invoiced')\n self.state = invoiced_state\n \n invoice_account = self.env['account.account'].search([('user_type_id', '=', self.env.ref('account.data_account_type_receivable').id)], limit=1).id\n new_invoice = self.env['account.invoice'].create({'name': '', 'type': 'out_invoice', 'partner_id': self.partner_id.id, 'account_id': invoice_account, 'comment': 'Support Ticket #' + str(self.id) + \" \" + self.subject.encode(\"UTF-8\") })\n \n for timesheet in self.timesheet_ids:\n time_string = \"\"\n \n if timesheet.hours == 1:\n time_string += \"1 hour\"\n else:\n time_string += str(timesheet.hours) + \" hours\"\n\n time_string += \" and \"\n\n if timesheet.minutes == 1:\n time_string += \"1 minute\"\n else:\n time_string += str(timesheet.minutes) + \" minutes\" \n \n new_invoice.invoice_line_ids.create({'invoice_id': new_invoice.id, 'name': 'Support Ticket Service (' + time_string + ')', 'account_id': invoice_account, 'price_unit': '0'})\n \n return {\n\t 'name':\"Support Ticket Invoice\",\n\t 'view_mode': 'form',\n\t 'view_type': 'form',\n\t 'res_model': 'account.invoice',\n\t 'type': 'ir.actions.act_window',\n\t 'res_id': new_invoice.id,\n\t }\n \nclass WebsiteSupportTicketTimesheet(models.Model):\n\n _name = \"website.support.ticket.timesheet\"\n \n wst_id = fields.Many2one('website.support.ticket', string=\"Support Ticket\")\n hours = fields.Integer(string=\"Hours\")\n minutes = fields.Integer(sring=\"Minutes\")\n project_id = fields.Many2one('project.project', string=\"Project\")","sub_path":"website_support_timesheets/models/website_support_ticket.py","file_name":"website_support_ticket.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"532619402","text":"import glob\nimport os\nimport cv2\nimport torch\nfrom albumentations import Compose, Resize,Lambda\nimport segmentation_models_pytorch as smp\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"\"\n\n# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nLOAD_MODEL_DEPLOY_PATH = \"./model_ear/best_model_ear_v1_43.pth\"\nENCODER = 'resnet18'\nENCODER_WEIGHTS = 'imagenet'\nCLASSES = ['ear']\nACTIVATION = 'sigmoid'\nDEVICE = \"cpu\"\n\ndef get_validation_augmentation():\n \"\"\"Add paddings to make image shape divisible by 32\"\"\"\n test_transform = [\n Resize(height=320, width=480, always_apply=True),\n ]\n return Compose(test_transform)\n\ndef to_tensor(x, **kwargs):\n return x.transpose(2, 0, 1).astype('float32')\n\ndef get_preprocessing(preprocessing_fn):\n \"\"\"Construct preprocessing transform\n Args:\n preprocessing_fn (callbale): data normalization function \n (can be specific for each pretrained neural network)\n Return:\n transform: albumentations.Compose\n \"\"\"\n _transform = [\n Lambda(image=preprocessing_fn),\n Lambda(image=to_tensor),\n ]\n return Compose(_transform)\n\nif __name__ == \"__main__\":\n\n # create segmentation model with pretrained encoder\n model = smp.Unet(\n encoder_name=ENCODER,\n encoder_weights=ENCODER_WEIGHTS,\n classes=len(CLASSES),\n activation=ACTIVATION,\n )\n\n model = torch.load(LOAD_MODEL_DEPLOY_PATH, map_location=DEVICE)\n model.eval()\n model.to(DEVICE)\n \n preprocessing_fn = smp.encoders.get_preprocessing_fn(ENCODER, ENCODER_WEIGHTS)\n preprocessing = get_preprocessing(preprocessing_fn)\n\n data_dir = [\"./test-images\"]\n\n data_samples = []\n for _dir in data_dir:\n # JPEG\n _list_tif = glob.glob(_dir + '/*.jpg')\n data_samples += _list_tif\n\n for path in data_samples:\n\n img = cv2.imread(path)\n img = cv2.resize(img, (480,320))\n h, w = img.shape[:2]\n image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n with torch.no_grad():\n \n #tensor_img = my_transforms(image=image)['image'].unsqueeze(0)\n #predictions = model.forward(tensor_img.to(DEVICE))\n \n sample = preprocessing(image=image)\n image = sample['image']\n\n x_tensor = torch.from_numpy(image).to(DEVICE).unsqueeze(0)\n \n pr_mask = model.predict(x_tensor)\n pr_mask = (pr_mask.squeeze().cpu().numpy().round())\n \n cv2.imshow('Ear Mask',pr_mask)\n cv2.imshow('Ear Image',img)\n cv2.waitKey(0)\n\n cv2.destroyAllWindows()\n","sub_path":"Deploy_ear_segmentation_image.py","file_name":"Deploy_ear_segmentation_image.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"262767637","text":"from io import BytesIO\nfrom PIL import Image\n\n\nclass CropShrinkImageMixin:\n\n def shrink_image(self, field_name, resize_shape):\n img: Image = Image.open(getattr(self, field_name))\n img.thumbnail(self.get_shrinked_size(field_name, resize_shape), Image.ANTIALIAS)\n image_file = BytesIO()\n img.save(image_file, 'png')\n getattr(self, field_name).file = image_file\n\n def get_shrinked_size(self, field_name, resize_shape):\n actual_img_width, actual_img_height = getattr(self, field_name).width, getattr(self, field_name).height\n ratio = min(resize_shape[0] / actual_img_width, resize_shape[1] / actual_img_height)\n return int(actual_img_width * ratio), int(actual_img_height * ratio)\n\n def crop_image(self, field_name, resize_shape):\n img: Image = Image.open(getattr(self, field_name))\n new_width = resize_shape[0]\n new_height = resize_shape[1]\n left = (img.width - new_width) / 2\n top = (img.height - new_height) / 2\n right = (img.width + new_width) / 2\n bottom = (img.height + new_height) / 2\n img = img.crop((left, top, right, bottom))\n image_file = BytesIO()\n img.save(image_file, format='png')\n getattr(self, field_name).file = image_file\n","sub_path":"blog/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"641573321","text":"from __future__ import unicode_literals\n\nfrom uuid import uuid4\n\nfrom carteblanche.mixins import NounView\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponseRedirect\nfrom django.views.generic.base import TemplateView\nfrom django.views.generic.edit import FormView\nfrom django.views.generic.list import ListView\n\n# from core.verbs import NounView\nimport core.models as cm\nimport core.forms as cf\nfrom actstream import action\nimport actstream.models as am\nfrom django.contrib.auth import login, logout\nimport decimal\nimport forms_builder.forms.models as fm\nfrom django.views.generic.edit import CreateView, UpdateView\nimport datetime, time\nfrom dateutil.relativedelta import relativedelta\nfrom django.utils import timezone\n\nfrom utils.utils import retrieve_leaf_dimensions\n\n# do weird stuff to mAake user names nou usernames show up\n\n\ndef user_new_unicode(self):\n return self.get_full_name()\n\n\n# Replace the __unicode__ method in the User class with out new implementation\nUser.__unicode__ = user_new_unicode\n\n\ndef decimal_default(obj):\n if isinstance(obj, decimal.Decimal):\n return float(obj)\n\n\nclass SiteRootView(NounView):\n def get_noun(self, **kwargs):\n siteroot = cm.SiteRoot()\n return siteroot\n\n\nclass MessageView(SiteRootView, TemplateView):\n template_name = 'base/messages.html'\n message = 'Message goes here.'\n\n def get_context_data(self, **kwargs):\n # Call the base implementation first to get a context\n context = super(MessageView, self).get_context_data(**kwargs)\n # Add in a QuerySet of all the books\n context['message'] = self.message\n return context\n\n\nclass LandingView(SiteRootView, TemplateView):\n template_name = 'base/bootstrap.html'\n\n def get(self, request, **kwargs):\n # if the user has no payment methods, redirect to the view where one can be created\n if request.user.is_authenticated():\n return HttpResponseRedirect(reverse(viewname='location_list', current_app='core'))\n else:\n return super(LandingView, self).get(request, **kwargs)\n\n logout(self.request)\n\n\nclass BootstrapView(TemplateView):\n template_name = 'grid.html'\n\n\nclass AjaxableResponseMixin(object):\n \"\"\"\n Mixin to add AJAX support to a form.\n Must be used with an object-based FormView (e.g. CreateView)\n \"\"\"\n\n def render_to_json_response(self, context, **response_kwargs):\n data = json.dumps(context)\n response_kwargs['content_type'] = 'application/json'\n return HttpResponse(data, **response_kwargs)\n\n def form_invalid(self, form):\n response = super(AjaxableResponseMixin, self).form_invalid(form)\n if self.request.is_ajax():\n return self.render_to_json_response(form.errors, status=400)\n else:\n return response\n\n def form_valid(self, form):\n # We make sure to call the parent's form_valid() method because\n # it might do some processing (in the case of CreateView, it will\n # call form.save() for example).\n response = super(AjaxableResponseMixin, self).form_valid(form)\n if self.request.is_ajax():\n data = {\n 'pk': self.noun.pk,\n }\n return self.render_to_json_response(data)\n else:\n return response\n\n\nclass UserCreateView(SiteRootView, FormView):\n model = User\n template_name = 'base/form.html'\n form_class = cf.RegistrationForm\n\n def form_valid(self, form):\n user = User.objects.create_user(uuid4().hex[:30], form.cleaned_data['email'],\n form.cleaned_data['password1'])\n user.first_name = form.cleaned_data['first_name']\n user.last_name = form.cleaned_data['last_name']\n user.save()\n self.object = user\n locations = form.cleaned_data['locations']\n for l in locations:\n l.members.add(user)\n l.save()\n return super(UserCreateView, self).form_valid(form)\n\n def get_success_url(self):\n action.send(self.request.user, verb='created user', action_object=self.object,\n target=self.request.user)\n return reverse(viewname='make_new_user', current_app='core')\n\n def get_success_message(self, cleaned_data):\n first_name = cleaned_data['first_name']\n last_name = cleaned_data['last_name']\n locations = list(cleaned_data['locations'])\n location_names = \"\"\n if len(locations) > 0:\n for l in locations[:-1]:\n location_names += l.title + \", \"\n location_names += \" and \" + locations[-1].title + \".\"\n else:\n location_names = \"no locations.\"\n return first_name + \" \" + last_name + \" now has an account. They are assigned to \" + location_names + \" Make another new user or return to the indicator.\"\n\n\nclass ProgressListView(SiteRootView, TemplateView):\n template_name = 'base/progress.html'\n\n def get_context_data(self, **kwargs):\n # Call the base implementation first to get a context\n context = super(ProgressListView, self).get_context_data(**kwargs)\n # Add in a QuerySet of all the books\n context['locations'] = cm.Location.objects.filter(\n title__icontains=self.kwargs['tag']).order_by('title')\n return context\n\n\nclass UserLoginView(SiteRootView, FormView):\n template_name = 'base/form.html'\n form_class = cf.LoginForm\n success_url = '/'\n\n def form_valid(self, form):\n user = form.user_cache\n login(self.request, user)\n form.instance = user\n\n if self.request.is_ajax():\n context = {\n 'status': 'success',\n 'userid': user.id,\n 'sessionid': self.request.session.session_key\n }\n data = json.dumps(context, default=decimal_default)\n out_kwargs = {'content_type': 'application/json'}\n return HttpResponse(data, **out_kwargs)\n else:\n return super(UserLoginView, self).form_valid(form)\n\n def form_invalid(self, form):\n response = super(UserLoginView, self).form_invalid(form)\n if self.request.is_ajax():\n return self.render_to_json_response({\"errors\": form.errors, \"status\": \"failure\",})\n else:\n return response\n\n def render_to_json_response(self, context, **response_kwargs):\n data = json.dumps(context)\n response_kwargs['content_type'] = 'application/json'\n return HttpResponse(data, **response_kwargs)\n\n\nclass UserView(NounView):\n def get_noun(self, **kwargs):\n user = User.objects.get(id=self.kwargs['pk'])\n coreuser = cm.CoreUser(user)\n user.required_verbs = coreuser.verb_classes\n user.get_verbs = coreuser.get_verbs\n user.get_available_verbs = coreuser.get_available_verbs\n user.conditions = coreuser.conditions\n return user\n\n\nclass UserDetailView(UserView, TemplateView):\n model = User\n template_name = 'base/bootstrap.html'\n\n\nclass UserPasswordResetView(UserView, FormView):\n model = User\n template_name = 'base/form.html'\n form_class = cf.PasswordResetForm\n\n def form_valid(self, form):\n user = User.objects.get(id=self.kwargs['pk'])\n password = form.cleaned_data['password1']\n user.set_password(password)\n user.save()\n return super(UserPasswordResetView, self).form_valid(form)\n\n def get_success_url(self):\n return reverse(viewname='user_list', current_app='core')\n\n def get_success_message(self, cleaned_data):\n return \"Password reset.\"\n\n\nclass UserLogoutView(SiteRootView, TemplateView):\n template_name = 'bootstrap.html'\n\n def get(self, request, **kwargs):\n # if the user has no payment methods, redirect to the view where one can be created\n logout(self.request)\n return HttpResponseRedirect(reverse(viewname='location_list', current_app='core'))\n\n\nclass UserListView(SiteRootView, TemplateView):\n template_name = 'user/list.html'\n\n def get_context_data(self, **kwargs):\n context = super(UserListView, self).get_context_data(**kwargs)\n users = User.objects.filter(is_active=True).order_by('first_name', 'last_name')\n context['users'] = users\n locationusers = []\n for u in users:\n u.locations_volatile = u.location_set.all().order_by('title')\n locationusers.append(u)\n context['locationusers'] = locationusers\n return context\n\n\nfrom django.views.generic.edit import DeleteView\nfrom django.core.urlresolvers import reverse_lazy\n\n\nclass UserDeactivateView(UserView, DeleteView):\n model = User\n template_name = 'user/deactivate.html'\n success_url = reverse_lazy('user_list')\n\n def delete(self, request, *args, **kwargs):\n \"\"\"\n Replaces the delete() method, deactivates the user instead\n \"\"\"\n self.object = self.get_object()\n success_url = self.get_success_url()\n self.object.is_active = False\n self.object.save()\n return HttpResponseRedirect(success_url)\n\n\nclass UserUpdateView(UserView, UpdateView):\n model = User\n template_name = 'base/form.html'\n\n def get_form_class(self):\n return cf.get_user_form_class(self.get_noun())\n\n def form_valid(self, form):\n user = self.get_noun()\n new_locations = form.cleaned_data['locations']\n current_locations = user.location_set.all()\n for l in current_locations:\n if l not in new_locations:\n l.members.remove(user)\n for l in new_locations:\n if l not in current_locations:\n l.members.add(user)\n return super(UserUpdateView, self).form_valid(form)\n\n def get_success_url(self):\n return reverse('user_list')\n\n\nclass LocationCreateView(SiteRootView, CreateView):\n model = cm.Location\n template_name = 'base/form.html'\n fields = '__all__'\n form_class = cf.LocationForm\n\n def get(self, request, *args, **kwargs):\n supes = super(LocationCreateView, self).get(request, *args, **kwargs)\n context = self.get_context_data(**kwargs)\n if self.request.is_ajax():\n data = json.dumps(context, default=decimal_default)\n out_kwargs = {'content_type': 'application/json'}\n return HttpResponse(data, **out_kwargs)\n\n return supes\n\n def get_success_url(self):\n action.send(self.request.user, verb='created location', action_object=self.object)\n return reverse(viewname='location_detail', args=(self.object.id,), current_app='core')\n\n\nclass LocationListView(SiteRootView, TemplateView):\n model = cm.Location\n template_name = 'overview/map.html'\n\n def get_context_data(self, **kwargs):\n context = super(LocationListView, self).get_context_data(**kwargs)\n\n output = []\n if self.request.user.is_staff:\n locations = cm.Location.objects.all().order_by('title')\n else:\n locations = self.request.user.location_set.all()\n\n if self.request.is_ajax():\n for l in locations:\n blob = {\n 'id': l.id,\n 'lattitude': l.position.latitude,\n 'longitude': l.position.longitude,\n 'title': l.title,\n 'indicator_ids': l.get_indicator_ids()\n }\n output.append(blob)\n context['locations'] = output\n else:\n context['locations'] = locations\n dimensions_qs = cm.Dimension.objects.select_related('parent') \\\n .all().order_by('name')\n # '#' stands for no parent(root) in jstree plugin\n context['dimensions'] = map(lambda obj: dict(\n id=obj.id,\n parent=obj.parent.id if obj.parent else '#',\n text=obj.name,\n name=obj.name, # alias\n icon='no-icon' # class for avoiding icon\n ), dimensions_qs)\n context['stream'] = []\n # context['stream'] = am.Action.objects.all()[:40]\n return context\n\n def get(self, request, *args, **kwargs):\n if self.request.is_ajax():\n context = self.get_context_data(**kwargs)\n data = json.dumps(context, default=decimal_default)\n out_kwargs = {'content_type': 'application/json'}\n return HttpResponse(data, **out_kwargs)\n\n return super(LocationListView, self).get(request, *args, **kwargs)\n\n\nclass PlainLocationListView(SiteRootView, TemplateView):\n model = cm.Location\n template_name = 'overview/map.html'\n\n def get_context_data(self, **kwargs):\n context = super(PlainLocationListView, self).get_context_data(**kwargs)\n dimension_id = self.request.GET.get('dimension', None)\n output = []\n if self.request.user.is_staff:\n locations = cm.Location.objects.all()\n else:\n locations = self.request.user.location_set.all()\n\n if dimension_id:\n locations = locations.filter(dimensionpath__dimension=dimension_id)\n\n locations = locations.order_by('title')\n\n if self.request.is_ajax():\n for l in locations:\n blob = {\n 'id': l.id,\n 'lattitude': l.position.latitude if l.position else '0',\n 'longitude': l.position.longitude if l.position else '0',\n 'title': l.title,\n }\n output.append(blob)\n context['locations'] = output\n else:\n context['locations'] = locations\n context['stream'] = []\n # context['stream'] = am.Action.objects.all()[:40]\n return context\n\n def get(self, request, *args, **kwargs):\n if self.request.is_ajax():\n context = self.get_context_data(**kwargs)\n data = json.dumps(context, default=decimal_default)\n out_kwargs = {'content_type': 'application/json'}\n return HttpResponse(data, **out_kwargs)\n\n return super(PlainLocationListView, self).get(request, *args, **kwargs)\n\n\nclass LocationListStreamView(SiteRootView, ListView):\n model = am.Action\n template_name = 'overview/map.html'\n paginate_by = 10\n context_object_name = 'stream'\n queryset = am.Action.objects.all().select_related('actor', 'action_object', 'target')\n\n\nclass LocationView(NounView):\n def get_noun(self, **kwargs):\n return cm.Location.objects.get(id=self.kwargs['pk'])\n\n def get_context_data(self, **kwargs):\n context = super(LocationView, self).get_context_data(**kwargs)\n context[\"background_image_url\"] = self.get_noun().get_background_image_url()\n return context\n\n\nclass LocationUpdateView(LocationView, UpdateView):\n model = cm.Location\n template_name = 'base/form.html'\n success_url = '/'\n form_class = cf.LocationForm\n\n def get_success_url(self):\n action.send(self.request.user, verb='updated location', action_object=self.get_noun())\n return reverse(viewname='location_detail', args=(self.noun.id,), current_app='core')\n\n\nclass LocationDetailView(LocationView, TemplateView):\n model = cm.Location\n template_name = 'location/detail.html'\n\n def get_context_data(self, **kwargs):\n context = super(LocationDetailView, self).get_context_data(**kwargs)\n most_recent_image = self.noun.get_most_recent_image()\n if most_recent_image != None:\n context[\"most_recent_image_url\"] = most_recent_image.get_file_url()\n # context[\"stream\"] = self.noun.get_action_stream()[:40]\n context['stream'] = []\n return context\n\n\nclass LocationDetailStreamView(LocationView, ListView):\n model = am.Action\n template_name = 'location/detail.html'\n paginate_by = 10\n context_object_name = 'stream'\n\n def get_queryset(self, **kwargs):\n return self.noun.get_action_stream().select_related('actor', 'action_object', 'target')\n\n\nclass LocationPhotoListView(LocationView, ListView):\n template_name = 'location/photos.html'\n model = cm.Image\n paginate_by = 5\n\n def get_queryset(self):\n return self.get_noun().images.all()\n\n\nclass LocationIndicatorListlView(LocationView, TemplateView):\n model = cm.Location\n template_name = 'location/indicators.html'\n\n def get_context_data(self, **kwargs):\n context = super(LocationIndicatorListlView, self).get_context_data(**kwargs)\n # context['stream'] = self.noun.get_action_stream()[:40]\n context['stream'] = []\n context['indicators'] = self.noun.indicators.all().order_by('form_number', 'title')\n context['ILLEGAL_FIELD_LABELS'] = cm.ILLEGAL_FIELD_LABELS\n return context\n\n def get(self, request, *args, **kwargs):\n supes = super(LocationIndicatorListlView, self).get(request, *args, **kwargs)\n context = self.get_context_data(**kwargs)\n if self.request.is_ajax():\n data = json.dumps(context, default=decimal_default)\n out_kwargs = {'content_type': 'application/json'}\n return HttpResponse(data, **out_kwargs)\n return supes\n\n\ntry:\n import xlwt\n\n XLWT_INSTALLED = True\n XLWT_DATETIME_STYLE = xlwt.easyxf(num_format_str='MM/YYYY')\nexcept ImportError:\n XLWT_INSTALLED = False\nfrom io import BytesIO\nfrom forms_builder.forms.utils import slugify\n\nimport re\n\n\nclass EntriesFilterView(SiteRootView, FormView):\n model = cm.Location\n template_name = 'base/form.html'\n form_class = cf.SavedFilterForm\n worksheet_names = {}\n\n def sanitize_worksheet_name(self, incoming):\n stripped_name = re.sub(r'[\\W_]+', ' ', incoming[:31])\n if stripped_name in self.worksheet_names:\n self.worksheet_names[stripped_name] += 1;\n return stripped_name[:25] + \" \" + str(self.worksheet_names[stripped_name])\n else:\n self.worksheet_names[stripped_name] = 1\n return stripped_name\n\n def add_indicator_to_workbook(self, indicator, workbook, columns, saved_filter):\n sheet = workbook.add_sheet(self.sanitize_worksheet_name(indicator.get_title()))\n for c, col in enumerate(columns):\n sheet.write(0, c, col)\n for r, row in enumerate(indicator.get_filtered_entries(saved_filter, csv=True)):\n for c, item in enumerate(row):\n if isinstance(item, datetime.datetime):\n item = item.replace(tzinfo=None)\n sheet.write(r + 2, c, item, XLWT_DATETIME_STYLE)\n else:\n sheet.write(r + 2, c, item)\n\n return workbook\n\n def form_valid(self, form):\n try:\n show_hidden_fields = form.cleaned_data['show_hidden']\n except Exception as e:\n show_hidden = False\n\n try:\n indicator = form.cleaned_data['indicator']\n columns = indicator.get_column_headers(show_hidden=show_hidden)\n except Exception as e:\n indicator = None\n\n if form.cleaned_data['export'] == True:\n response = HttpResponse(mimetype=\"application/vnd.ms-excel\")\n fname = \"%s-%s.xls\" % (\"QI Data Export\", slugify(now().ctime()))\n attachment = \"attachment; filename=%s\" % fname\n response[\"Content-Disposition\"] = attachment\n queue = BytesIO()\n workbook = xlwt.Workbook(encoding='utf8')\n if indicator == None:\n for i in cm.Indicator.objects.all().order_by(\"form_number\"):\n columns = i.get_column_headers(show_hidden=show_hidden)\n workbook = self.add_indicator_to_workbook(i, workbook, columns,\n form.cleaned_data)\n else:\n workbook = self.add_indicator_to_workbook(indicator, workbook, columns,\n form.cleaned_data)\n workbook.save(queue)\n data = queue.getvalue()\n response.write(data)\n return response\n else:\n context = {\n \"columns\": columns,\n \"entries\": indicator.get_filtered_entries(form.cleaned_data, csv=False,\n show_hidden=show_hidden),\n \"available_verbs\": self.noun.get_available_verbs(self.request.user),\n \"filter\": form.cleaned_data\n }\n return render_to_response('indicator/entries.html',\n context,\n context_instance=RequestContext(self.request))\n\n def get_form_kwargs(self):\n kwargs = super(EntriesFilterView, self).get_form_kwargs()\n kwargs['ajax_location'] = True\n return kwargs\n\n\nclass ScoresDetailView(SiteRootView, FormView):\n template_name = 'overview/scores.html'\n form_class = cf.DateForm\n\n def form_valid(self, form):\n the_date = form.cleaned_data['date']\n\n return HttpResponseRedirect(reverse(viewname='scores_date_list',\n kwargs={'month': the_date.month, 'year': the_date.year},\n current_app='core'))\n\n def get_context_data(self, **kwargs):\n context = super(ScoresDetailView, self).get_context_data(**kwargs)\n NOT_ASSIGNED_STRING = \"N/A\"\n NO_DATA_STRING = \"N/D\"\n try:\n month = int(self.kwargs['month'])\n year = int(self.kwargs['year'])\n except Exception as e:\n d = datetime.datetime.now()\n month = d.month\n year = d.year\n queryset = cm.Indicator.objects.all().order_by(\"form_number\")\n columns = list(queryset.values_list('title', flat=True))\n indicator_ids = list(queryset.values_list('id', flat=True))\n # get all scores for this month\n rows = {}\n for l in cm.Location.objects.select_related('indicators').all():\n l_assignments = l.get_indicator_ids()\n # rows[l.id] = [l.title]+([NOT_ASSIGNED_STRING]*len(columns))\n l_cols = []\n for lc in indicator_ids:\n # if the column is assigned to this l, fill it with N/D\n if lc in l_assignments:\n l_cols.append(NO_DATA_STRING)\n else:\n # else fill it with N/A\n l_cols.append(NOT_ASSIGNED_STRING)\n rows[l.id] = [l.title] + (l_cols)\n # add space to the begininbg of columns for the location names\n columns = [\"Location\"] + columns\n for s in cm.Score.objects.filter(month=str(month), year=year):\n # add the score object to the table if it exists\n indicator_index = indicator_ids.index(s.indicator.id) + 1\n if type(rows[s.location.id][indicator_index]) == unicode:\n rows[s.location.id][indicator_index] = s\n else:\n rows[s.location.id][indicator_index].merge(s)\n\n this_month = datetime.date(year, month, 1)\n\n # raise Exception(rows)\n context['this_month'] = this_month\n context['last_month'] = this_month - relativedelta(months=1)\n context['next_month'] = this_month + relativedelta(months=1)\n context['columns'] = columns\n context['entries'] = rows.values()\n return context\n\n\nclass LocationImageCreateView(LocationView, CreateView):\n model = cm.Image\n template_name = 'base/form.html'\n fields = ['original_file']\n\n def get_form(self, form_class):\n return cf.ImageForm(self.request.POST or None, self.request.FILES or None,\n initial=self.get_initial())\n\n def form_valid(self, form):\n return super(LocationImageCreateView, self).form_valid(form)\n\n def get_success_url(self):\n self.noun.images.add(self.object)\n action.send(self.request.user, verb='uploaded image', action_object=self.object,\n target=self.noun)\n return reverse(viewname='location_detail', args=(self.noun.id,), current_app='core')\n\n\nclass IndicatorCreateView(SiteRootView, CreateView):\n model = cm.Indicator\n template_name = 'base/form.html'\n form_class = cf.IndicatorForm\n\n def form_valid(self, form):\n new_form = fm.Form.objects.create(title=form.cleaned_data['title'][0:50])\n location_field = fm.Field.objects.create(form=new_form, field_type=1, label=\"Location\",\n visible=False, order=-2)\n location_field = fm.Field.objects.create(form=new_form, field_type=1, label=\"User\",\n visible=False, order=-1)\n location_field = fm.Field.objects.create(form=new_form, field_type=13, label=\"Score\",\n visible=False, order=0)\n form.instance.form = new_form\n self.instance = form.instance\n # action.send(self.request.user, verb='created', action_object=self.object, target=self.object)\n return super(IndicatorCreateView, self).form_valid(form)\n\n def get_success_url(self):\n action.send(self.request.user, verb='created indicator', action_object=self.instance)\n return reverse(viewname='field_create', args=(self.instance.id,), current_app='core')\n\n\nclass IndicatorView(NounView):\n def get_noun(self, **kwargs):\n return cm.Indicator.objects.get(id=self.kwargs['pk'])\n\n\nclass IndicatorUpdateView(IndicatorView, UpdateView):\n model = cm.Indicator\n template_name = 'base/form.html'\n success_url = '/'\n form_class = cf.IndicatorForm\n\n def get_success_url(self):\n self.get_noun().updated_at = datetime.datetime.now()\n action.send(self.request.user, verb='updated indicator', action_object=self.get_noun())\n return reverse(viewname='indicator_detail', args=(self.noun.id,), current_app='core')\n\n\nclass IndicatorDetailView(IndicatorView, TemplateView):\n model = cm.Indicator\n template_name = 'indicator/list.html'\n\n def get_context_data(self, **kwargs):\n context = super(IndicatorDetailView, self).get_context_data(**kwargs)\n context['ILLEGAL_FIELD_LABELS'] = cm.ILLEGAL_FIELD_LABELS\n # context['stream'] = self.noun.get_action_stream()[:40]\n context['stream'] = []\n context['ILLEGAL_FIELD_LABELS'] = cm.ILLEGAL_FIELD_LABELS\n return context\n\n def get_context_data(self, **kwargs):\n context = super(IndicatorDetailView, self).get_context_data(**kwargs)\n indicators = []\n indicators.append(self.noun.get_serialized())\n context['indicators'] = indicators\n context['ILLEGAL_FIELD_LABELS'] = cm.ILLEGAL_FIELD_LABELS\n\n return context\n\n def get(self, request, *args, **kwargs):\n supes = super(IndicatorDetailView, self).get(request, *args, **kwargs)\n context = self.get_context_data(**kwargs)\n if self.request.is_ajax():\n data = json.dumps(context, default=decimal_default)\n out_kwargs = {'content_type': 'application/json'}\n return HttpResponse(data, **out_kwargs)\n\n return supes\n\n\nclass IndicatorListView(SiteRootView, TemplateView):\n model = cm.Indicator\n template_name = 'indicator/list.html'\n\n def get_context_data(self, **kwargs):\n context = super(IndicatorListView, self).get_context_data(**kwargs)\n indicators = []\n for l in cm.Indicator.objects.all().order_by('form_number'):\n blob = l.get_serialized()\n indicators.append(blob)\n context['indicators'] = indicators\n context['ILLEGAL_FIELD_LABELS'] = cm.ILLEGAL_FIELD_LABELS\n return context\n\n def get(self, request, *args, **kwargs):\n supes = super(IndicatorListView, self).get(request, *args, **kwargs)\n context = self.get_context_data(**kwargs)\n if self.request.is_ajax():\n data = json.dumps(context, default=decimal_default)\n out_kwargs = {'content_type': 'application/json'}\n return HttpResponse(data, **out_kwargs)\n\n return supes\n\n\nclass FieldCreateView(IndicatorView, FormView):\n model = fm.Field\n template_name = 'base/form.html'\n\n def get_form(self, form_class):\n return cf.FieldForm(self.request.POST or None, self.request.FILES or None,\n initial=self.get_initial())\n\n def form_valid(self, form):\n form.instance.form = self.noun.form\n form.instance.required = False\n self.object = form.instance.save()\n self.instance = form.instance\n return super(FieldCreateView, self).form_valid(form)\n\n def get_success_url(self):\n action.send(self.request.user, verb='created field', action_object=self.instance,\n target=self.noun)\n return reverse(viewname='field_create', args=(self.noun.id,), current_app='core')\n\n def get_success_message(self, cleaned_data):\n return \"Your field was created. Make another new field or return to the indicator.\"\n\n\nclass FieldUpdateView(IndicatorView, UpdateView):\n model = fm.Field\n template_name = 'base/form.html'\n success_url = '/'\n\n def get_noun(self, **kwargs):\n return cm.Indicator.objects.get(id=self.kwargs['indicator_pk'])\n\n def get_object(self):\n output = get_object_or_404(fm.Field, id=self.kwargs[\"pk\"])\n return output\n\n def get_form(self, form_class):\n return cf.FieldForm(self.request.POST or None, self.request.FILES or None,\n initial=self.get_initial(), instance=self.get_object())\n\n def get_success_url(self):\n action.send(self.request.user, verb='updated field', action_object=self.get_object(),\n target=self.noun)\n return reverse(viewname='indicator_detail', args=(self.noun.id,), current_app='core')\n\n\nimport json\n\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404, render_to_response\nfrom django.template import RequestContext\nfrom django.views.generic.base import TemplateView\n\nfrom forms_builder.forms.forms import FormForForm\nfrom forms_builder.forms.models import Form\n\nfrom forms_builder.forms.signals import form_invalid, form_valid\n\nfrom django.contrib import messages\n\n\nclass IndicatorRecordCreateView(LocationView, TemplateView):\n template_name = \"base/form.html\"\n\n def get_noun(self, **kwargs):\n return cm.Location.objects.get(id=self.kwargs['location_pk'])\n\n def prep_form(self, form):\n # form.fields.__delitem__('location')\n # form.fields.__delitem__('user')\n return form\n\n def get_context_data(self, **kwargs):\n context = super(IndicatorRecordCreateView, self).get_context_data(**kwargs)\n published = Form.objects.published(for_user=self.request.user)\n indicator = get_object_or_404(cm.Indicator, id=kwargs[\"pk\"])\n form = indicator.get_form()\n form = self.prep_form(form)\n context[\"form\"] = form\n context[\"indicator\"] = indicator\n return context\n\n def get(self, request, *args, **kwargs):\n context = self.get_context_data(**kwargs)\n return self.render_to_response(context)\n\n def post(self, request, *args, **kwargs):\n # throw an error if this user is not authorized\n # TODO: find a way to do this with carteblanche\n if (self.request.user.is_staff != True) and (\n self.noun.members.filter(id=self.request.user.id).count() == 0):\n raise Exception(\n \"You tried to create a record with a location you're not assigned to. You must be an Admin or a member of \" + self.noun.title + \" to create a new record.\")\n indicator = get_object_or_404(cm.Indicator, id=kwargs[\"pk\"])\n builder_form_object = indicator.get_builder_form_object()\n form = FormForForm(builder_form_object, RequestContext(request),\n request.POST or None,\n request.FILES or None)\n if not form.is_valid():\n form_invalid.send(sender=request, form=self.form_for_form)\n else:\n # Attachments read must occur before model save,\n # or seek() will fail on large uploads.\n attachments = []\n for f in form.files.values():\n f.seek(0)\n attachments.append((f.name, f.read()))\n indicator = get_object_or_404(cm.Indicator, id=kwargs[\"pk\"])\n location = get_object_or_404(cm.Location, id=kwargs[\"location_pk\"])\n form.cleaned_data[\"user\"] = request.user.get_full_name()\n form.cleaned_data[\"location\"] = location.__str__()\n entry = form.save()\n form_valid.send(sender=request, form=form, entry=entry)\n form = self.prep_form(form)\n score = indicator.score_entry(entry)\n context = self.get_context_data(**kwargs)\n if score >= indicator.passing_percentage:\n messages.success(request, 'Passing score of ' + str(score))\n action.send(self.request.user, verb='entered passing record',\n action_object=context.get(\"indicator\"), target=self.noun)\n else:\n messages.error(request, 'Not passing score of ' + str(score))\n action.send(self.request.user, verb='entered failing record',\n action_object=context.get(\"indicator\"), target=self.noun)\n return HttpResponseRedirect(reverse(viewname='indicator_record_create',\n args=(kwargs['location_pk'], kwargs['pk'],),\n current_app='core'))\n\n context = {\"builder_form_object\": builder_form_object, \"form\": form}\n return self.render_to_response(context)\n\n def render_to_response(self, context, **kwargs):\n if self.request.is_ajax():\n json_context = json.dumps({\n \"errors\": context[\"form_for_form\"].errors,\n \"form\": context[\"form_for_form\"].as_p(),\n \"message\": context[\"form\"].response,\n })\n return HttpResponse(json_context, content_type=\"application/json\")\n return super(IndicatorRecordCreateView, self).render_to_response(context, **kwargs)\n\n\nform_detail = IndicatorRecordCreateView.as_view()\n\nfrom forms_builder.forms.utils import now\n\n\nclass IndicatorRecordUploadView(LocationView, FormView):\n template_name = 'base/form.html'\n form_class = cf.JSONUploadForm\n success_url = '/'\n\n def get_noun(self, **kwargs):\n return cm.Location.objects.get(id=self.kwargs['location_pk'])\n\n def form_valid(self, form):\n try:\n # throw an error if this user is not authorized\n # TODO: find a way to do this with carteblanche\n if (self.request.user.is_staff != True) and (\n self.noun.members.filter(id=self.request.user.id).count() == 0):\n raise Exception(\n \"You tried to synchronize a record with a location you're not assigned to. You must be an Admin or a member of \" + self.noun.title + \" to upload a new record.\")\n json_string = form.cleaned_data['json']\n\n data = json.loads(json_string, parse_float=decimal.Decimal)\n day = 1\n try:\n day = int(data.get(\"day\"))\n except Exception as e:\n pass\n new_entry_time = timezone.datetime(year=int(data.get(\"year\")),\n month=int(data.get(\"month\")), day=day)\n\n # create field entries for incoming data. Don't save them until we're done\n fieldEntries = []\n for f in data.get(\"values\"):\n field_id = f.get(\"field_id\")\n new_value = f.get(\"value\")\n if new_value == True:\n new_value = u\"True\"\n elif new_value == False:\n new_value = u\"False\"\n new_fieldEntry = fm.FieldEntry(value=new_value, field_id=field_id)\n fieldEntries.append(new_fieldEntry)\n if fieldEntries.__len__() > 0:\n # if there are entries, create a new record\n form_id = fm.Field.objects.get(id=field_id).form_id\n new_record = fm.FormEntry(entry_time=new_entry_time, form_id=form_id)\n new_record.save()\n for f in fieldEntries:\n # connect the entries to the record\n f.entry_id = new_record.id\n f.save()\n # create entries for location and user data\n score = float(data.get(\"score\"))\n builder_form = fm.Form.objects.get(id=form_id)\n new_locationEntry = fm.FieldEntry(value=self.get_noun().__str__(),\n field_id=builder_form.fields.get(\n label=\"Location\").id, entry_id=new_record.id)\n new_locationEntry.save()\n new_userEntry = fm.FieldEntry(value=self.request.user.get_full_name(),\n field_id=builder_form.fields.get(label=\"User\").id,\n entry_id=new_record.id)\n new_userEntry.save()\n new_scoreEntry = fm.FieldEntry(value=score,\n field_id=builder_form.fields.get(label=\"Score\").id,\n entry_id=new_record.id)\n new_scoreEntry.save()\n\n # take the score from the json and create an action\n indicator = cm.Indicator.objects.get(form__id=form_id)\n if score == 100:\n messages.success(self.request, 'Passing score of ' + str(score))\n action.send(self.request.user, verb='PASS ' + str(score),\n action_object=indicator, target=self.noun)\n else:\n messages.error(self.request, 'Not passing score of ' + str(score))\n action.send(self.request.user, verb='FAIL ' + str(score),\n action_object=indicator, target=self.noun)\n context = {\n \"status\": \"success\",\n \"record_id\": new_record.id\n }\n except Exception as e:\n context = {\n \"status\": \"failure\",\n \"error\": e\n }\n messages.error(self.request, e)\n if self.request.is_ajax():\n data = json.dumps(context, default=decimal_default)\n out_kwargs = {'content_type': 'application/json'}\n return HttpResponse(data, **out_kwargs)\n else:\n\n return super(IndicatorRecordUploadView, self).form_valid(form)\n\n def get(self, request, *args, **kwargs):\n\n supes = super(IndicatorRecordUploadView, self).get(request, *args, **kwargs)\n context = self.get_context_data(**kwargs)\n if self.request.is_ajax():\n data = json.dumps(context, default=decimal_default)\n out_kwargs = {'content_type': 'application/json'}\n return HttpResponse(data, **out_kwargs)\n\n return supes\n\n\n'''\nincoming json looks like:\n{\n \"title\":\"blah blah blah\",\n \"scores\":[\n {\n \"percentage\":100.00,\n \"indicator_id\":0,\n \"location_id\":0,\n \"passing\":true,\n \"total_record_count\":0,\n \"passing_record_count\":0\n }\n ]\n}\n'''\n\n\nclass LocationScoreUploadView(LocationView, FormView):\n template_name = 'base/form.html'\n form_class = cf.JSONUploadForm\n success_url = '/'\n\n def get_noun(self, **kwargs):\n return cm.Location.objects.get(id=self.kwargs['location_pk'])\n\n def form_valid(self, form):\n json_string = form.cleaned_data['json']\n\n try:\n data = json.loads(json_string, parse_float=decimal.Decimal)\n new_scores = []\n for s in data.get(\"scores\"):\n # print type(s)\n # check to make sure the location matches\n\n if int(s.get(\"location_id\")) != self.noun.id:\n raise Exception(\"wrong score for this location\")\n indicator_id = s.get(\"indicator_id\")\n indicator = cm.Indicator.objects.get(id=indicator_id)\n # create but don't save untill all are created\n t = datetime.datetime(year=s.get(\"year\"), month=s.get(\"month\"), day=1)\n new_score = cm.Score(indicator=indicator, passing=s.get(\"passing\"),\n entry_count=s.get(\"total_record_count\"),\n passing_entry_count=s.get(\"passing_record_count\"),\n month=str(s.get(\"month\")), year=s.get(\"year\"),\n score=s.get(\"percentage\"), location=self.noun,\n user=self.request.user, datetime=t)\n new_scores.append(new_score)\n if settings.CACHING:\n self.noun.invalidate_cached_series(indicator)\n # if nothing blew up, lets save these and invalidate the cached series data\n for s in new_scores:\n s.save()\n context = {\n \"status\": \"success\",\n \"score_id\": 0\n }\n except Exception as e:\n context = {\n \"status\": \"failure\",\n \"error\": e\n }\n if self.request.is_ajax():\n data = json.dumps(context, default=decimal_default)\n out_kwargs = {'content_type': 'application/json'}\n return HttpResponse(data, **out_kwargs)\n else:\n return super(LocationScoreUploadView, self).form_valid(form)\n\n def get(self, request, *args, **kwargs):\n supes = super(LocationScoreUploadView, self).get(request, *args, **kwargs)\n context = self.get_context_data(**kwargs)\n if self.request.is_ajax():\n data = json.dumps(context, default=decimal_default)\n out_kwargs = {'content_type': 'application/json'}\n return HttpResponse(data, **out_kwargs)\n\n return supes\n\n\nclass LocationIndicatorVisualize(LocationView, TemplateView):\n template_name = \"location/visualize.html\"\n\n def get_noun(self, **kwargs):\n return cm.Location.objects.get(id=self.kwargs['location_pk'])\n\n def get(self, request, *args, **kwargs):\n supes = super(LocationIndicatorVisualize, self).get(request, *args, **kwargs)\n context = self.get_context_data(**kwargs)\n if self.request.is_ajax():\n t = datetime.datetime.now()\n year_ago = t - relativedelta(months=12)\n indicator = get_object_or_404(cm.Indicator, id=kwargs[\"pk\"])\n # get all scores for this location/indicator from the last year\n scores = cm.Score.objects.filter(indicator__id=kwargs[\"pk\"],\n location__id=kwargs['location_pk'],\n datetime__gte=year_ago).order_by('datetime')\n # iterate over scores averaging them if there are more than one per month\n\n data = []\n for s in scores:\n # multiplied by 1000 because apparently js doesn't understand utc\n blob = [time.mktime(s.datetime.timetuple()) * 1000, s.score]\n data.append(blob)\n output = {\n \"name\": self.noun.title,\n \"id\": self.noun.id,\n \"data\": data\n }\n context = self.get_context_data(**kwargs)\n context[\"series\"] = [output]\n context[\"noun\"] = {\"title\": self.noun.title}\n data = json.dumps(context, default=decimal_default)\n out_kwargs = {'content_type': 'application/json'}\n return HttpResponse(data, **out_kwargs)\n\n return supes\n\n\nclass LocationVisualize(LocationView, TemplateView):\n template_name = \"location/visualize.html\"\n\n def get(self, request, *args, **kwargs):\n if self.request.is_ajax():\n # store these results in a new series\n # add the series to\n context = self.get_context_data(**kwargs)\n context[\"series\"] = self.noun.get_all_series()\n context[\"noun_title\"] = self.noun.title\n context[\"location_id\"] = self.noun.id\n context[\"noun\"] = {\"title\": self.noun.title}\n data = json.dumps(context, default=decimal_default)\n out_kwargs = {'content_type': 'application/json'}\n return HttpResponse(data, **out_kwargs)\n\n return super(LocationVisualize, self).get(request, *args, **kwargs)\n\n\nclass LocationListVisualizeView(SiteRootView, TemplateView):\n template_name = \"overview/visualize.html\"\n\n def get_context_data(self, **kwargs):\n\n # Call the base implementation first to get a context\n context = super(LocationListVisualizeView, self).get_context_data(**kwargs)\n # Add in a QuerySet of all the books\n context['indicators'] = cm.Indicator.objects.all().order_by('form_number')\n return context\n\n def get(self, request, *args, **kwargs):\n if self.request.is_ajax():\n all_series = []\n # for every location, get all_series\n for l in cm.Location.objects.filter(id=21).prefetch_related('indicators'):\n all_series.append(l.get_all_series())\n # store these results in a new series\n # add the series to\n context = self.get_context_data(**kwargs)\n context[\"series\"] = all_series\n context[\"noun_title\"] = \"Overview\"\n context[\"location_id\"] = \"-2\"\n context[\"noun\"] = {\"title\": \"Overview\"}\n data = json.dumps(context, default=decimal_default)\n out_kwargs = {'content_type': 'application/json'}\n return HttpResponse(data, **out_kwargs)\n\n dimension_qs = retrieve_leaf_dimensions().order_by('name')\n kwargs['dimensions'] = dimension_qs\n\n return super(LocationListVisualizeView, self).get(request, *args, **kwargs)\n","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":46507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"39822189","text":"def modify(arr, n): \n \n # Nothing to do when array size is 1 \n if n <= 1: \n return\n \n # store current value of arr[0] and update it \n prev = arr[0] \n arr[0] = arr[0] * arr[1] \n \n # Update rest of the array elements \n for i in range(1, n-1): \n \n # Store current value of next interation \n curr = arr[i]; \n \n # Update current value using previos value \n arr[i] = prev * arr[i+1] \n \n # Update previous value \n prev = curr \n \n \n # Update last array element \n arr[n-1] = prev * arr[n-1] \n \n \n# Driver program \narr = [2, 3, 4, 5, 6] \nn = len(arr) \nmodify(arr, n) \nfor i in range (0, n): \n print(arr[i],end=\" \") ","sub_path":"data/docker-generated-data/code_clones/p28.py","file_name":"p28.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"409446656","text":"# -*- coding: utf-8 -*-\nimport datetime\nimport os.path\nimport urllib\nfrom requests.sessions import Session\n\nfrom bottle import redirect\nfrom cony.utils import force_str\nfrom xml.etree import ElementTree as ET\n\nxhtml = '{http://www.w3.org/1999/xhtml}'\n\n\ndef cmd_translate(term):\n \"\"\"Translates the text using Google Translate.\"\"\"\n if len(term) < len(term.encode('utf-8')):\n direction = 'ru|en'\n else:\n direction = 'en|ru'\n redirect('http://translate.google.com/#%s|%s' % (direction, term.encode('utf-8')))\n\ncmd_tr = cmd_translate\n\n\ndef cmd_save_word(term):\n \"\"\"Saves word and it's translation into the ~/.words.csv\n\n These files could be used to import words into the FlashCards ToGo.\n \"\"\"\n if ';' not in term:\n return cmd_search_word(term)\n\n filename = '~/.words.csv'\n\n template = \"\"\"\n
Translation \"{{ word }}\" was saved to %s
\n %%rebase layout title='Translation saved'\n \"\"\" % filename\n\n filename = os.path.expanduser(filename)\n dirname = os.path.dirname(filename)\n\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n\n with open(filename, 'a+') as f:\n f.write(term.encode('utf-8'))\n f.write('\\n')\n return dict(template=template, word=term)\n\n\ndef cmd_search_word(term):\n \"\"\"Searches word translations at the http://slovari.yandex.ru.\n\n This command requires `simplejson` module to be installed.\n \"\"\"\n import simplejson\n\n template = \"\"\"\n
\n %rebase layout title='Word translation'\n \"\"\"\n\n variants = {}\n\n internet = Session()\n\n for i in reversed(range((len(term) + 1) / 2, len(term) + 1)):\n url = 'http://suggest-slovari.yandex.ru/suggest-lingvo?v=2&lang=en&' + \\\n urllib.urlencode(dict(part=term[:i].encode('utf-8')))\n response = internet.get(url)\n data = simplejson.loads(response.content)\n\n if data[0]:\n for trans, link in zip(*data[1:]):\n en, ru = trans.split(' - ', 1)\n variants[en] = dict(en=en, ru=ru, link=link)\n if len(variants) > 5:\n break\n\n\n def get_spelling(value):\n url = 'http://lingvo.yandex.ru/' + force_str(value['en']).replace(' ', '%20') + '/%D1%81%20%D0%B0%D0%BD%D0%B3%D0%BB%D0%B8%D0%B9%D1%81%D0%BA%D0%BE%D0%B3%D0%BE/'\n data = internet.get(url).content\n\n xml = ET.fromstring(force_str(data))\n transcript = xml.find('*//{x}span[@class=\"b-translate__tr\"]'.format(x=xhtml))\n\n if transcript is None:\n value['transcript'] = ''\n else:\n value['transcript'] = transcript.text\n\n has_audio = xml.find('*//{x}h1[@class=\"b-translate__word\"]//{x}span[@class=\"b-audio g-js\"]'.format(x=xhtml))\n value['has_audio'] = has_audio is not None\n return value\n\n variants = dict((key, get_spelling(value)) for key, value in variants.iteritems())\n\n return dict(template=template, variants=sorted(variants.values()))\n\ncmd_wo = cmd_search_word\n","sub_path":"cony/repo/lang.py","file_name":"lang.py","file_ext":"py","file_size_in_byte":4091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"76560985","text":"import matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\n\nfrom WeatherDL.data_maker import dataset_maker\nfrom WeatherDL.model_maker import model_3\n\n# Extract data from data_maker\nX, y = dataset_maker(window=5, forecast_day=1)\n(X_train, X_test, y_train, y_test) = train_test_split(X, y, test_size=0.2, shuffle=False)\n\n# Open model from model_maker\nmodel = model_3((5, 8, 20, 6))\nprint(model.summary())\n\n# Fit model, and extract training & validation metrics\nhistory = model.fit(X_train, y_train,\n validation_data=(X_test, y_test),\n batch_size=5,\n epochs=30,\n verbose=2,\n shuffle=False)\n\n# Prediction\ny_pred = model.predict(X_test)\n\n# Data Visualization\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('MSE')\nplt.legend(['Train', 'Test'], loc='upper right')\nplt.show()\nplt.plot(history.history['mean_absolute_error'])\nplt.plot(history.history['val_mean_absolute_error'])\nplt.title('MAE')\nplt.legend(['Train', 'Test'], loc='upper right')\nplt.show()\nplt.plot(history.history['mean_absolute_percentage_error'])\nplt.plot(history.history['val_mean_absolute_percentage_error'])\nplt.title('MAPE')\nplt.legend(['Train', 'Test'], loc='upper right')\nplt.show()\n","sub_path":"WeatherDL/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"399683933","text":"# -*- coding: utf-8 -*-\n# pylint: disable=C0103\n\"\"\"\n======================\nPlot cluster centroids\n======================\n\nThis example shows how to plot centroids of the clusters of rhythmic patterns.\n\"\"\"\n\n# Code source: Martín Rocamora\n# License: MIT\n\n##############################################\n# Imports\n# - matplotlib for visualization\n#\nimport matplotlib.pyplot as plt\nfrom carat import audio, util, annotations, features, clustering, display\n\n##############################################\n# We group rhythmic patterns into clusters and plot their centroids.\n#\n# First, we'll load one of the audio files included in `carat`.\naudio_path = util.example(\"ansina_audio\")\n\ny, sr = audio.load(audio_path)\n\n##############################################\n# Next, we'll load the annotations provided for the example audio file.\nannotations_path = util.example(\"ansina_beats\")\n\nbeats, beat_labs = annotations.load_beats(annotations_path)\ndownbeats, downbeat_labs = annotations.load_downbeats(annotations_path)\n\n##############################################\n# Then, we'll compute the accentuation feature.\n#\n# **Note:** This example is tailored towards the rhythmic patterns of the lowest\n# sounding of the three drum types taking part in the recording, so the analysis\n# focuses on the low frequencies (20 to 200 Hz).\nacce, times, _ = features.accentuation_feature(y, sr, minfreq=20, maxfreq=200)\n\n##############################################\n# Next, we'll compute the feature map.\nn_beats = int(round(beats.size/downbeats.size))\nn_tatums = 4\n\nmap_acce, _, _, _ = features.feature_map(acce, times, beats, downbeats, n_beats=n_beats,\n n_tatums=n_tatums)\n\n##############################################\n# Then, we'll group rhythmic patterns into clusters. This is done using the classical\n# K-means method with Euclidean distance (but other clustering methods and distance\n# measures can be used too).\n#\n# **Note:** The number of clusters n_clusters has to be specified as an input parameter.\nn_clusters = 4\n\ncluster_labs, centroids, _ = clustering.rhythmic_patterns(map_acce, n_clusters=n_clusters)\n\n##############################################\n# Finally we plot the centroids of the clusters of rhythmic patterns.\n\nfig = plt.figure(figsize=(8, 8))\ndisplay.centroids_plot(centroids, n_tatums=n_tatums)\n\nplt.tight_layout()\n\nplt.show()\n","sub_path":"docs/source/examples/plot_cluster_centroids.py","file_name":"plot_cluster_centroids.py","file_ext":"py","file_size_in_byte":2380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"433486854","text":"\"\"\"\nReport generator\n\nThis module provides the following report generation features\n 1. Load and categorize transactions\n 2. Build latency distribution histograms for each category of transactions\n 3. Build html report with (stats, flots, transaction list) for each category, route combination\n 4. Generate environment reports\n\nAuthor: Manikandan Dhamodharan, Morgan Stanley\n\"\"\"\n\nimport time\nimport numpy\nimport logging\nfrom xpedite.report.reportbuilder import ReportBuilder\nfrom xpedite.report.env import EnvReportBuilder\nfrom xpedite.report.histogram import (\n formatLegend, formatBuckets, buildFlotHistograms,\n buildBuckets, buildDistribution, Flot\n )\nfrom xpedite.util import timeAction, formatHumanReadable\nfrom xpedite.containers import ProbeMap\nfrom xpedite.report.profile import Profiles, Profile\nfrom xpedite.analytics import Analytics, CURRENT_RUN\n\nLOGGER = logging.getLogger(__name__)\n\nclass ReportGenerator(object):\n \"\"\"Generates reports for the current profile session\"\"\"\n\n def __init__(self, reportName):\n \"\"\"\n Constructs an instance of report generator\n\n :param reporName: Name of the generated report\n\n \"\"\"\n self.reportName = reportName\n self.analytics = Analytics()\n\n def generateFlots(self, repo, classifier, runId):\n \"\"\"\n Generates latency distribuion histograms for each category/route combination\n\n :param repo: Repository of transaction collection\n :type repo: xpedite.transaction.TransactionRepo\n :param classifier: Classifier to categorize transactions into various types\n :param runId: Epoch time stamp to uniquely identify a profiling session\n\n \"\"\"\n flots = {}\n transactionCollections = [repo.getCurrent()] + repo.getBenchmarks().values()\n if not transactionCollections[0].isCurrent() or transactionCollections[0].name != CURRENT_RUN:\n from xpedite.types import InvariantViloation\n raise InvariantViloation(\n 'expecing transactions for current run at index 0 in the repository. '\n 'instead found {}'.format(transactionCollections[0].name)\n )\n\n elapsedTimeBundles = self.analytics.buildElapsedTimeBundles(transactionCollections, classifier)\n\n for category, elaspsedTimeBundle in elapsedTimeBundles.iteritems():\n buckets = buildBuckets(elaspsedTimeBundle[0], 35)\n if not buckets:\n LOGGER.debug('category %s has not enough data points to generate flot', category)\n continue\n\n LOGGER.debug('Buckets:\\n%s', buckets)\n\n yaxis = []\n conflatedCounts = []\n LOGGER.debug('Bucket values:')\n for i, elapsedTimeList in enumerate(elaspsedTimeBundle):\n bucketValues, conflatedCountersCount = timeAction('building counter distribution',\n lambda bkts=buckets, etl=elapsedTimeList: buildDistribution(bkts, etl)\n )\n conflatedCounts.append(conflatedCountersCount)\n LOGGER.debug('%s', bucketValues)\n title = transactionCollections[i].name\n legend = formatLegend(\n title, min(elapsedTimeList), max(elapsedTimeList), numpy.mean(elapsedTimeList), numpy.median(elapsedTimeList),\n numpy.percentile(elapsedTimeList, 95), numpy.percentile(elapsedTimeList, 99)\n )\n yaxis.append((legend, bucketValues))\n\n benchmarkConflatedCounts = sum(conflatedCounts, 1)\n if conflatedCounts[0] + benchmarkConflatedCounts > 0:\n LOGGER.debug(\n 'conflation - due to narrow bucket range [%s to %s] - (%d) in current run and (%d) in all '\n 'bencmark counter values are conflated',\n buckets[0], buckets[len(buckets)-1],\n conflatedCounts[0], benchmarkConflatedCounts\n )\n\n buckets = formatBuckets(buckets)\n options, data = buildFlotHistograms(buckets, yaxis, False)\n title = '{} - latency distribution benchmark'.format(category)\n description = 'Latency distribution (current run ID #{} vs chosen benchmarks)'.format(runId)\n flots.update({category: Flot(title, description, data, options)})\n return flots\n\n\n @staticmethod\n def getReportProbes(route, userProbes):\n \"\"\"\n Creates probes with human friendly name for reporting\n\n :param userProbes: List of probes enabled for a profiling session\n\n \"\"\"\n reportProbes = []\n userProbeMap = ProbeMap(userProbes)\n for probe in route.probes:\n if probe in userProbeMap:\n reportProbes.append(userProbeMap[probe])\n else:\n reportProbes.append(probe)\n return reportProbes\n\n @staticmethod\n def generateEnvironmentReport(app, result, repo, resultOrder, classifier, txnFilter, benchmarkPaths):\n \"\"\"\n Generates report with environment details\n\n :param app: an instance of xpedite app, to interact with target application\n :param result: Handle to gather and store profiling results\n :param repo: Repository of loaded transactions\n :param resultOrder: Sort order of transactions in latency constituent reports\n :param classifier: Predicate to classify transactions into different categories\n :param txnFilter: Lambda to filter transactions prior to report generation\n :param benchmarkPaths: List of stored reports from previous runs, for benchmarking\n\n \"\"\"\n envReport = EnvReportBuilder().buildEnvironmentReportFile(\n app, repo, resultOrder, classifier, txnFilter, benchmarkPaths\n )\n description = \"\"\"\n Test environment report (cpu clock frequency, kernel configuration etc.)\n \"\"\"\n envReportTitle = 'Test Environment Report'\n if envReport:\n result.attachXpediteReport(envReportTitle, envReportTitle, description, envReport)\n\n @staticmethod\n def addTestResult(reportName, result, timelineStats, benchmarkTlsMap):\n \"\"\"\n Adds report on perfromance regressions to profile results\n\n :param reportName: Name of the generated report\n :param result: Handle to gather and store profiling results\n :param timelineStats: Time line statistics for the current run\n :param benchmarkTlsMap: Time line statistics collection for benchmarks\n\n \"\"\"\n currentRunMedian = timelineStats.getTotalDurationSeries().getMedian()\n for benchmarkName, benchmarkTls in benchmarkTlsMap.iteritems():\n benchmarkMedian = benchmarkTls.getTotalDurationSeries().getMedian()\n threshold = max(benchmarkMedian * .05, .9)\n result.le(benchmarkMedian + threshold)(\n currentRunMedian, '{} Median latency threshold for current run vs benchmark {}'.format(\n reportName, benchmarkName\n )\n )\n\n def generateProfiles(self, transactionRepo, classifier):\n \"\"\"\n Generates profiles for the current profile session\n\n :param transactionRepo: Repository of loaded transactions\n :param classifier: Predicate to classify transactions into different categories\n\n \"\"\"\n transactionTree, benchmarkCompositeTree = self.analytics.buildTransactionTree(transactionRepo, classifier)\n profiles = Profiles(transactionRepo)\n\n for category, categoryNode in transactionTree.getChildren().iteritems():\n i = 1\n for route, transactionNode in categoryNode.children.iteritems():\n routeName = ' [route - {}]'.format(i) if len(categoryNode.children) > 1 else ''\n profileName = '{} - {}{}'.format(self.reportName, category, routeName)\n begin = time.time()\n LOGGER.info('generating profile %s (txns - %d) -> ', profileName, len(transactionNode.collection))\n\n benchmarkTransactionsMap = benchmarkCompositeTree.getCollectionMap([category, route])\n reportProbes = self.getReportProbes(route, transactionRepo.getCurrent().probes)\n timelineStats, benchmarkTimelineStats = self.analytics.computeStats(\n transactionRepo, category, route, reportProbes, transactionNode.collection, benchmarkTransactionsMap\n )\n profiles.addProfile(Profile(profileName, timelineStats, benchmarkTimelineStats))\n elapsed = time.time() - begin\n LOGGER.completed('completed in %0.2f sec.', elapsed)\n i += 1\n return profiles\n\n def generateLatencyReports(self, profiles, flots, result, resultOrder, reportThreshold):\n \"\"\"\n Generates latency breakup reports for a list of profiles\n\n :param profiles: Profile data for the current profile session\n :param flots: Latency distribuion histograms for each category/route combination\n :param result: Handle to gather and store profiling results\n :param resultOrder: Sort order of transactions in latency constituent reports\n :param reportThreshold: Threshold for number of transactions rendered in html reports.\n\n \"\"\"\n flotTracker = set()\n for profile in profiles:\n begin = time.time()\n reportTitle = '{} latency statistics [{} transactions]'.format(profile.name, len(profile.current))\n LOGGER.info('generating report %s -> ', reportTitle)\n\n category = profile.category\n if category not in flotTracker and category in flots:\n flots[category].attach(result)\n flotTracker.add(category)\n self.addTestResult(profile.name, result, profile.current, profile.benchmarks)\n report = ReportBuilder().buildReport(profile.current, profile.benchmarks, profile.reportProbes, profile.name,\n resultOrder, reportThreshold)\n reportSize = formatHumanReadable(len(report))\n reportTitle = '{} - ({})'.format(reportTitle, reportSize)\n description = '\\n\\t{}\\n\\t'.format(reportTitle)\n elapsed = time.time() - begin\n LOGGER.completed('completed %s in %0.2f sec.', reportSize, elapsed)\n result.attachXpediteReport(profile.name, reportTitle, description, report)\n\n def generateReport(self, app, repo, result, classifier, resultOrder, reportThreshold, txnFilter, benchmarkPaths):\n \"\"\"\n Generates statistics for the current profile session and attaches reports to the given result object\n\n :param app: An instance of xpedite app, to interact with target application\n :param repo: Repository of transaction collection\n :type repo: xpedite.transaction.TransactionRepo\n :param result: Handle to gather and store profiling results\n :param classifier: Predicate to classify transactions into different categories (Default value = DefaultClassifier()\n :param resultOrder: Sort order of transactions in latency constituent reports\n :param reportThreshold: Threshold for number of transactions rendered in html reports.\n :param txnFilter: Lambda to filter transactions prior to report generation\n :param benchmarkPaths: List of stored reports from previous runs, for benchmarking\n\n \"\"\"\n try:\n if txnFilter:\n self.analytics.filterTransactions(repo, txnFilter)\n flots = self.generateFlots(repo, classifier, app.runId)\n profiles = self.generateProfiles(repo, classifier)\n self.generateLatencyReports(profiles, flots, result, resultOrder, reportThreshold)\n self.generateEnvironmentReport(app, result, repo, resultOrder, classifier, txnFilter, benchmarkPaths)\n LOGGER.info('\\nTo recreate the report run - \"xpedite report -p profileInfo.py -r %s\"\\n', app.runId)\n result.commitXpediteReport(app, profiles, self.reportName)\n return profiles\n except Exception as ex:\n LOGGER.exception('failed to generate report')\n raise ex\n","sub_path":"scripts/lib/xpedite/reportgenerator.py","file_name":"reportgenerator.py","file_ext":"py","file_size_in_byte":11341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"639603949","text":"\"\"\"\nP024 Lexicographic permutations\n\nA permutation is an ordered arrangement of objects. For example, 3124 is one possible permutation of the digits 1, 2, 3 and 4. If all of the permutations are listed numerically or alphabetically, we call it lexicographic order. The lexicographic permutations of 0, 1 and 2 are:\n\n012 021 102 120 201 210\n\nWhat is the millionth lexicographic permutation of the digits 0, 1, 2, 3, 4, 5, 6, 7, 8 and 9?\n\"\"\"\n\nfrom itertools import permutations\n\n\ndef lexi_perm(str_int, nth):\n \"\"\"return the nth lexicographic permutation of a string of digits\"\"\"\n all = permutations(str_int)\n for i in range(nth):\n target = next(all)\n return int(\"\".join(target))\n\n\nif __name__ == \"__main__\":\n assert lexi_perm(\"012\", 4) == 120\n print(lexi_perm(\"0123456789\", 1000000))\n # >>> 2783915460\n # passed\n","sub_path":"AlgorithmTraining/ProjectEuler/p024_lexicographic_permutations.py","file_name":"p024_lexicographic_permutations.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"8243458","text":"#saving the following data into a set com{}\n#----------\n# 'Google'\n# 'Tmall'\n# 'Facebook'\n#---------\nS = {'Google','Tmall','Facebool'}\n#define a empty set com{}\n#s = set()\n#insert above data\n\n#1.insert a data ('Tencent')\n#2.remove a data ('Tmall')\n#3.clear the set\n#4.determine whether the str('Google') is in the set\n#s.add('Tecent')\n#s.remove('Tmall')\n#s.clear()\nif 'Google' in S:\n\tprint(\"1\")\nelse:\n\tprint('0')\n\n","sub_path":"practice/practice5/2set.py","file_name":"2set.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"340941071","text":"MODE = \"TRAIN\"\n#MODE = \"SUMBIT\"\n#MODE = \"TEST\"\n\nfrom keras.layers import Input\nfrom keras.layers.core import Activation, Flatten, Reshape\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.models import Model\nfrom keras.utils import np_utils\n\nimport os\nimport sys\nif(MODE == \"TRAIN\" or MODE == \"TEST\"):\n\t#\thttps://github.com/qubvel/segmentation_models\n\t#\thttps://github.com/aleju/imgaug\n\t#\thttps://www.github.com/keras-team/keras-contrib\n\tos.system('pip install --upgrade imgaug')\n\tos.system('pip install -U segmentation-models==0.2.1')\n\tos.system('pip install git+https://www.github.com/keras-team/keras-contrib.git')\n\tos.system('pip install -U scikit-learn')\n\timport imgaug as ia\n\tfrom imgaug import augmenters as iaa\n\tfrom imgaug.augmentables.segmaps import SegmentationMapOnImage\n\timport imgaug.imgaug\n\tfrom sklearn.model_selection import train_test_split\n\tfrom sklearn import preprocessing\nelse:\n\tPATH = \"/kaggle/input/efficientnet/\"\n\tsys.path.insert(0, PATH)\n\tPATH = \"/kaggle/input/segmentation-models/\"\n\tsys.path.insert(0, PATH)\n\tPATH = \"/kaggle/input/classification-models/\"\n\tsys.path.insert(0, PATH)\n\t\nfrom keras import metrics\nimport segmentation_models as sm\nfrom glob import glob\nfrom PIL import Image\nimport numpy as np \nimport pandas as pd \nimport random\nimport time\nimport gc\nfrom keras.models import *\nimport keras\n\n#\thttps://www.kaggle.com/aleksandradeis/steel-defect-detection-eda\ndef rle_2_mask_resize(rle, defined=False):\n\t# CONVERT RLE TO MASK \n\tif (pd.isnull(rle))|(rle=='')|(rle=='-1'): \n\t\tif not defined:\n\t\t\treturn np.zeros((256,1600) ,dtype=np.uint8)\n\t\treturn False\n\tif(defined):\n\t\treturn True\n\n\theight = 256\n\twidth = 1600\n\tmask = np.zeros( width*height ,dtype=np.uint8)\n\n\tarray = np.asarray([int(x) for x in rle.split()])\n\tstarts = array[0::2]-1\n\tlengths = array[1::2] \n\tfor index, start in enumerate(starts):\n\t\tmask[int(start):int(start+lengths[index])] = 1\n\treturn mask.reshape( (height,width), order='F' )\n\ndef smallest_length(rle):\n\tif (pd.isnull(rle))|(rle=='')|(rle=='-1'): \n\t\treturn None, None\n\n\tarray = np.asarray([int(x) for x in rle.split()])\n\tlengths = array[1::2]\n\tmin_val = np.amin(lengths) \n\treturn min_val, np.count_nonzero(lengths == min_val)\n#\treturn lengths.sum()/len(lengths)\t\n\n#\thttps://github.com/catalyst-team/mlcomp/blob/85a8849c87040d19a5aed61e72cfd7ad518d8c9b/mlcomp/contrib/transform/rle.py\ndef mask_2_rle(img):\n\t\"\"\"\n\timg: numpy array, 1 - mask, 0 - background\n\tReturns run length as string formatted\n\t\"\"\"\n\tpixels = img.T.flatten()\n\tpixels = np.concatenate([[0], pixels, [0]])\n\truns = np.where(pixels[1:] != pixels[:-1])[0] + 1\n\truns[1::2] -= runs[::2]\n\treturn ' '.join(str(x) for x in runs)\n\n\n\nclass DataGenerator(keras.utils.Sequence):\n\tdef __init__(self, batch_size=6, model=None, single_input=False):\n\t\tself.TRAIN_PATH = '/kaggle/input/severstal-steel-defect-detection/train_images/'\n\t\tself.TEST_PATH = '/kaggle/input/severstal-steel-defect-detection/test_images/'\n\n\t\tself.train_df = pd.read_csv('/kaggle/input/severstal-steel-defect-detection/train.csv')\n\t\tself.train_fns = sorted(glob(self.TRAIN_PATH + '*.jpg'))\n\t\tself.test_fns = sorted(glob(self.TEST_PATH + '*.jpg'))\n\n\t\tsplit_df = self.train_df[\"ImageId_ClassId\"].str.split(\"_\", n = 1, expand = True)\n\t\tself.train_df['Image'] = split_df[0]\n\t\tself.train_df['Label'] = split_df[1]\n\n\t\tself.samples = 6 * 30\n\t\tself.samples_index = 0\n\t\tself.roll_over = False\n\t\tself.last_shared_index = 0\n\n\t\tself.batch_size = batch_size\n\n\t\tself.single_input = single_input\n\n\t\tself.on_epoch_end()\n\n\n\t\tself.model = model\n\n\t\tself.shared = [\n\t\t\t[],\n\t\t\t[],\n\t\t\t[],\n\t\t\t[],\n\t\t\t[],\n\t\t\t[]\n\t\t]\t\n\n\t\tself.round = 0\n\t\tself.most_elements = 0\n\t\tfor index in range(0, len(self.train_df), 4):\n\t\t\tmask_1_def = rle_2_mask_resize(self.train_df.iloc[index]['EncodedPixels'], defined=True)\n\t\t\tmask_2_def = rle_2_mask_resize(self.train_df.iloc[index + 1]['EncodedPixels'], defined=True)\n\t\t\tmask_3_def = rle_2_mask_resize(self.train_df.iloc[index + 2]['EncodedPixels'], defined=True)\n\t\t\tmask_4_def = rle_2_mask_resize(self.train_df.iloc[index + 3]['EncodedPixels'], defined=True)\n\n\t\t\tmoved = []\n\t\t\tif mask_1_def:\n\t\t\t\tmoved.append(0)\n\t\t\tif mask_2_def:\n\t\t\t\tmoved.append(1)\n\t\t\tif mask_3_def:\n\t\t\t\tmoved.append(2)\n\t\t\tif mask_4_def:\n\t\t\t\tmoved.append(3)\n\t\t\n\t\t\tif(len(moved) == 0):\n\t\t\t\tself.shared[4].append(index)\n\t\t\t\tself.most_elements = max(self.most_elements, len(self.shared[4]))\n\t\t\telif(1 < len(moved)):\n\t\t\t\tself.shared[5].append(index)\n\t\t\t\tself.most_elements = max(self.most_elements, len(self.shared[5]))\n\t\t\telse:\n\t\t\t\tself.shared[moved[0]].append(index)\n\t\t\t\tself.most_elements = max(self.most_elements, len(self.shared[moved[0]]))\n\n\tdef __len__(self):\n\t\treturn int(self.most_elements // self.batch_size)\n\t\t#return np.floot(self.train_df.shape[0]//len(shared))\n\t\t#return int(np.floor(self.train_df.shape[0] / self.batch_size))\n\n\n\tdef __getitem__(self, index):\n\t\tif(self.model == None):\n\t\t\tX, y = self.__data_generation()\n\n\t\t\t#X = (X - np.min(X))/np.ptp(X)\n\t\t\t#X = (X - np.mean(X)) / np.std(X)\n\t\t\tif not self.single_input:\n\t\t\t\treturn [X, X], y\n\t\t\treturn X, y\n\t\telse:\n\t\t\tX, y_2 = self.__data_generation()\n\t\t\tY = self.model.predict(X)\n\t\t\tY = Y.reshape(Y.shape[:-1])\n\t\t\ty = np.zeros(Y.shape + (3, ))\n#\t\t\tprint(y.shape)\n#\t\t\tprint(Y.shape)\n\t\t\ty[:, :, :, 0] = Y \n\t\t\ty[:, :, :, 1] = Y\n\t\t\ty[:, :, :, 2] = Y\n\t\t#\ty = resolve_model_one_input(y)\n\t\t\treturn y, y_2\n\n\t\t\t#Y, y#y, Y\n\n\tdef on_epoch_end(self):\n\t\tpass\n\n\tdef __data_generation(self):\n\t\tself.round += 1\n\n\t\tX = np.zeros((self.batch_size, 256, 1600, 3), dtype=np.float64)\n\t\ty = np.zeros((self.batch_size, 256, 1600, 4), dtype=np.float64)\n\n\t\telements_per_class = max((self.batch_size//len(self.shared)), 1)\n\t\tcurrent_index = 0\n\t\tself.samples_index += elements_per_class\n\t\tif(self.train_df.shape[0] < (self.samples_index * len(self.shared))):\n\t\t\tself.samples_index = 0\n\t\t\tself.roll_over = True\n\n\t\twhile current_index < self.batch_size:\n\t\t\tfor i in range(self.last_shared_index, len(self.shared)):\n\t\t\t\tfor j in range(elements_per_class):\n\t\t\t\t\tif not current_index < self.batch_size:\n\t\t\t\t\t\tbreak\n\t\t\t\t\ttry:\n\t\t\t\t\t\tindex = (self.samples_index + j) % len(self.shared[i])\n\t\t\t\t\t\t#\tbased off https://www.kaggle.com/aleksandradeis/steel-defect-detection-eda\n\t\t\t\t\t\tseq = iaa.Sequential([\n\t\t\t\t\t\t\tiaa.Sometimes(0.5,\n\t\t\t\t\t\t\t\tiaa.GaussianBlur(sigma=(0, 0.5))\n\t\t\t\t\t\t\t),\n\t\t\t\t\t\t\t#iaa.ContrastNormalization((0.75, 1.5)),\n\t\t\t\t\t\t\tiaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5),\n\t\t\t\t\t\t\tiaa.Multiply((0.8, 1.2), per_channel=0.2),\n\t\t\t\t\t\t], random_order=True) \n\n\t\t\t\t\t\timage = np.asarray(Image.open(self.TRAIN_PATH + self.train_df.iloc[index]['Image']))\n\n\t\t\t\t\t\tmask_1 = rle_2_mask_resize(self.train_df.iloc[index]['EncodedPixels'])\n\t\t\t\t\t\tmask_2 = rle_2_mask_resize(self.train_df.iloc[index + 1]['EncodedPixels'])\n\t\t\t\t\t\tmask_3 = rle_2_mask_resize(self.train_df.iloc[index + 2]['EncodedPixels'])\n\t\t\t\t\t\tmask_4 = rle_2_mask_resize(self.train_df.iloc[index + 3]['EncodedPixels'])\n\n\t\t\t\t\t\tsegmap = np.zeros((image.shape[0], image.shape[1], 1), dtype=np.int32)\t\t\t\n\t\t\t\t\t\tsegmap[1 < mask_1] = 1\n\t\t\t\t\t\tsegmap[2 < mask_2] = 2\n\t\t\t\t\t\tsegmap[3 < mask_3] = 3\n\t\t\t\t\t\tsegmap[4 < mask_4] = 4\n\n\n\t#\t\t\t\t\tsegmap = np.zeros((4, image.shape[0], image.shape[1], 1), dtype=np.int32)\t\t\t\n\t#\t\t\t\t\tsegmap[0, 1 < mask_1] = 1\n\t#\t\t\t\t\tsegmap[1, 2 < mask_2] = 1\n\t#\t\t\t\t\tsegmap[2, 3 < mask_3] = 1\n\t#\t\t\t\t\tsegmap[3, 4 < mask_4] = 1\n\n\t\t\t\t#\t\tprint(segmap.shape)\n\t\t\t\t#\t\tprint(segmap.shape)\n\t#\t\t\t\t\tsegmap = np.swapaxes(segmap, 0, -1) \n\t#\t\t\t\t\tsegmap = segmap.reshape(segmap.shape[1:])\n\t\t\t\t#\t\tprint(segmap.shape)\n\n\t\t\t\t#\t\tassert(mask_2_rle(segmap[:, :, 0]) == self.train_df.iloc[index]['EncodedPixels'])\n\t\t\t\t#\t\tassert(mask_2_rle(segmap[:, :, 1]) == self.train_df.iloc[index + 1]['EncodedPixels'])\n\t\t\t\t#\t\tassert(mask_2_rle(segmap[:, :, 2]) == self.train_df.iloc[index + 2]['EncodedPixels'])\n\t\t\t\t#\t\tassert(mask_2_rle(segmap[:, :, 3]) == self.train_df.iloc[index + 3]['EncodedPixels'])\n\n#\t\t\t\t\t\tdouble_check_map = np.zeros((image.shape[0], image.shape[1], 1))\n#\t\t\t\t\t\tdouble_check_map[segmap[:, :, 0] == 1] = 1\n#\t\t\t\t\t\tdouble_check_map[segmap[:, :, 1] == 2] = 2\n#\t\t\t\t\t\tdouble_check_map[segmap[:, :, 2] == 3] = 3\n#\t\t\t\t\t\tdouble_check_map[segmap[:, :, 3] == 4] = 4\n\n#\t\t\t\t\t\tmask_2_rle(segmap[:, :, 1])\n#\t\t\t\t\t\tmask_2_rle(segmap[:, :, 2])\n#\t\t\t\t\t\tmask_2_rle(segmap[:, :, 3])\n\n\t\t\t\t\t\tsegmap_on_image = SegmentationMapOnImage(segmap, shape=image.shape)\n\t\t\t\t\t\timage_aug = image\n\t\t\t\t\t\tif(random.randint(0, 3) == 2):\n\t\t\t\t\t\t\timage_aug, _ = seq(image=image, segmentation_maps=segmap_on_image)\n\n\t\t\t\t\t\tX[current_index, :] = image_aug\n\t\t\t\t\t\ty[current_index, :] = segmap\n\n\t\t\t\t\t\tcurrent_index += 1\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tprint(e)\n\t\t\t\t\t\tcurrent_index += 1\n\t\t\t\tself.last_shared_index += 1\n\t\t\tself.last_shared_index = 0\n\t\treturn X, y\n\ndef save_model_own(model, name=\"model\"):\n\tmodel_json = model.to_json()\n\twith open(\"{}.json\".format(name), \"w\") as json_file:\n\t\tjson_file.write(model_json)\n\tmodel.save_weights(\"{}.h5\".format(name))\n\n\n'''\n\thttps://www.tensorflow.org/guide/keras/custom_callback\n'''\nclass EarlyStoppingAtMinLoss(keras.callbacks.Callback):\n\tdef __init__(self, patience=10, timeout=5):\n\t\tsuper(EarlyStoppingAtMinLoss, self).__init__()\n\n\t\tself.start = time.time()\n\t\tself.traning_time = 60 * timeout #60 * 1#0\n\t\tself.round = 0\n\n\t#\tself.prediction = tf.Variable(0., validate_shape=False)\n\n\t\tself.patience = patience\n\t\tself.best = np.Inf\n\t\tself.best_weights = None\n\t\tself.wait = 0\n\n\tdef on_batch_end(self, batch, logs={}):\n#\t\tprint(self.model.outputs[0].eval(session=K.get_session()))\n\t\tif(self.traning_time < (time.time() - self.start)):\n\t\t\tprint(\"tiden er ute...\")\n\t\t\tself.model.stop_training = True\n\n\t\tif(self.round % 5 == 0):\n\t\t\tprint(logs)\n\t\t\tprint(\"accuracy : {} mae:{}\".format(logs.get(\"accuracy\", \"None?\"), logs.get(\"mean_absolute_error\", \"None?\")))\n\n\t\tcurrent = logs.get('loss')\n\t\tif np.less(current, self.best):\n\t\t\tself.best = current\n\t\t\tself.wait = 0\n\t\t\tself.best_weights = self.model.get_weights()\n\t\telif self.patience <= self.wait and self.best_weights != None:\n\t\t\tself.model.set_weights(self.best_weights)\n\t\tself.wait += 1\n\t\tself.round += 1\n\nclass LearningRateScheduler(keras.callbacks.Callback):\n\tdef __init__(self):\n\t\tsuper(LearningRateScheduler, self).__init__()\n\t\t#self.schedule = schedule\n\t\t\n\t\tself.og_lr = 0.01 #1e-4\n\t\tself.decay_factor = 0.75\n\t\tself.step_size = 2\n\n\t\tself.lr = self.og_lr\n\t\tself.iteration = 0\n\t\tself.decay = 0.01\n\n\t\tself.best = np.Inf\n\n\t#\thttps://www.jeremyjordan.me/nn-learning-rate/\n\tdef calc_new_lr(self):\n#\t\treturn self.og_lr * (self.decay_factor ** np.floor(self.iteration/self.step_size))\n\t\t return self.og_lr * (1 / (1 + self.decay * self.iteration))\n\n\tdef on_batch_start(self, batch, logs={}):\n\t\tkeras.backend.set_value(self.model.optimizer.lr, self.calc_new_lr())\t\n\n\tdef on_batch_end(self, batch, logs={}):\n\t\tif(0 < self.iteration and self.iteration % 10 == 0):\n\t\t\tkeras.backend.set_value(self.model.optimizer.lr, self.calc_new_lr())\t\n\t\tself.iteration += 1\n\nif __name__ == \"__main__\":\n\tnp.random.seed(0)\n\trandom.seed( 0 )\n\n\t#data = dataloader()\n\tgc.collect() \n\n\tmodel = None\n\tif(MODE == \"TEST\"):\n\t#\tX, y = data.get_traning()\n\t#\tprint(data.tested_all_data())\n\t\tfor i in range(3):\n\t\t\tmodel = None\n\t\t\thistory = None\n\t\t\tif(i == 0):\n\t\t\t\tmodel = sm.Unet(encoder_weights='imagenet')\n\t\t\t\tmodel.compile(\n\t\t\t\t\t'Adam',\n\t\t\t\t\tloss=sm.losses.bce_dice_loss,\n\t\t\t\t\tmetrics=['accuracy',sm.metrics.iou_score],\n\t\t\t\t)\n\t\t\t\thistory = model.fit(X, y, epochs=1, batch_size=5, \n\t\t\t\t\tvalidation_split=0.1,\n\t\t\t\t\tshuffle=True,\n\t\t\t\t\tverbose=0)\n\t\t\t\tsave_model_own(model, name=\"model\" + str(i))\n\t\t\telif(i == 1):\n\t\t\t\tmodel = sm.FPN(encoder_weights='imagenet')\n\t\t\t\tmodel.compile(\n\t\t\t\t\t'Adam',\n\t\t\t\t\tloss=sm.losses.bce_dice_loss,\n\t\t\t\t\tmetrics=['accuracy',sm.metrics.iou_score],\n\t\t\t\t)\n\t\t\t\thistory = model.fit(X, y, epochs=1, batch_size=5, \n\t\t\t\t\tvalidation_split=0.1,\n\t\t\t\t\tshuffle=True,\n\t\t\t\t\tverbose=0)\n\t\t\t\tsave_model_own(model, name=\"model\" + str(i))\n\t\t\telif(i == 2):\n\t\t\t\tmodel = sm.Linknet(encoder_weights='imagenet')\n\t\t\t\tmodel.compile(\n\t\t\t\t\t'Adam',\n\t\t\t\t\tloss=sm.losses.bce_dice_loss,\n\t\t\t\t\tmetrics=['accuracy',sm.metrics.iou_score],\n\t\t\t\t)\n\t\t\t\thistory = model.fit(X, y, epochs=1, batch_size=5, \n\t\t\t\t\tvalidation_split=0.1,\n\t\t\t\t\tshuffle=True,\n\t\t\t\t\tverbose=0)\n\t\t\t\tsave_model_own(model, name=\"model\" + str(i))\n\telif(MODE == \"TRAIN\"):\n\t\tgc.collect() \n\n\t\n\t\tmodel = sm.Unet(encoder_weights='imagenet', classes=1, activation=\"relu\")\n\t\tmodel.compile(\n\t\t\t\t'Adam',\n\t\t\t\tloss=sm.losses.bce_dice_loss,\n\t\t\t\tmetrics=['accuracy',sm.metrics.iou_score],\n\t\t)\n\t\t\n\t\tearly = EarlyStoppingAtMinLoss(timeout=15)\n\t\ttraining_generator = DataGenerator(batch_size=3, single_input=True)\n\t\tmodel.fit_generator(generator=training_generator,\n\t\t\t\t\t\t\t\t\tuse_multiprocessing=True,\n\t\t\t\t\t\t\t\t\tworkers=1,\n\t\t\t\t\t\t\t\t\tverbose=0,\n\t\t\t\t\t\t\t\t\tmax_queue_size=2,\n\t\t\t\t\t\t\t\t\tcallbacks=[early, \n\t\t\t\t\t\t\t\t\t\t\t\tLearningRateScheduler()])\n\t\t\n\telif(MODE == \"SUMBIT\"):\n\t\tmodel = model_from_json(open('/kaggle/input/steel/model.json', 'r').read())\n\t\tmodel.load_weights(\"/kaggle/input/steel/model.h5\")\n\n\t#\textra_model = model_from_json(open('/kaggle/input/steel/extra_model.json', 'r').read())\n\t#\textra_model.load_weights(\"/kaggle/input/steel/extra_model.h5\")\n\t\t\n\n\t\tinput_size = 32 \n\t\tdata_frame_data = {\n\t\t\t'ImageId_ClassId':[],\n\t\t\t'EncodedPixels':[]\n\t\t}\n\t\tprint(\"Model loaded\")\n\t\troundd = 0\n\t\tfor size_jump in range(0, len(data.test_fns), input_size):\n\t\t\tmovment_step = (len(data.test_fns)-size_jump) if ((len(data.test_fns)-size_jump) < input_size) else input_size\n\n\t\t\tindex = 0\n\t\t\ttest_input = np.zeros((movment_step, 256, 1600, 3))\n\t\t\tfor i in range(size_jump, size_jump + movment_step): \n\t\t\t\timage = np.asarray(Image.open(data.test_fns[i]))\n\t\t\t\ttest_input[index, :] = image\n\t\t\t\tindex += 1\n\n\n\t\t\t'''\n\t\t\tY = model.predict(test_input)\n\t\t\tY = Y.reshape(Y.shape[:-1])\n\t\t\ty = np.zeros(Y.shape + (3, ))\n#\t\t\tprint(y.shape)\n#\t\t\tprint(Y.shape)\n\t\t\ty[:, :, :, 0] = Y \n\t\t\ty[:, :, :, 1] = Y\n\t\t\ty[:, :, :, 2] = Y\n\n\t\t\toutput = extra_model.predict(y)\n\t\t\t'''\n\t\t\toutput = model.predict(test_input)\n#\t\t\tnew_output = extra_model.predict(output)\n#\t\t\tnew_output = connect_back_input(new_output)\n\n\t\t\tprint(\"Done new prediction\")\n\t\t\tmask_1 = np.zeros((movment_step, output.shape[1], output.shape[2], 1), dtype=np.int32)\t\t\n\t\t\tmask_2 = np.zeros((movment_step, output.shape[1], output.shape[2], 1), dtype=np.int32)\n\t\t\tmask_3 = np.zeros((movment_step, output.shape[1], output.shape[2], 1), dtype=np.int32)\n\t\t\tmask_4 = np.zeros((movment_step, output.shape[1], output.shape[2], 1), dtype=np.int32)\n\t\t\tmask_1[output == 1] = 1\n\t\t\tmask_2[output == 2] = 1\n\t\t\tmask_3[output == 3] = 1\n\t\t\tmask_4[output == 4] = 1\n\n\t\t\tfor i in range(0, output.shape[0]):\n\t\t\t\tname = data.test_fns[size_jump + i].split(\"/\")[-1]\n\t\t\t\tdata_frame_data[\"ImageId_ClassId\"].append(name + \"_1\")\n\t\t\t\tdata_frame_data[\"EncodedPixels\"].append(mask_2_rle(mask_1[i, :]))\n\t\t\t\t\n\t\t\t\tdata_frame_data[\"ImageId_ClassId\"].append(name + \"_2\")\n\t\t\t\tdata_frame_data[\"EncodedPixels\"].append(mask_2_rle(mask_2[i, :]))\n\t\t\t\t\n\t\t\t\tdata_frame_data[\"ImageId_ClassId\"].append(name + \"_3\")\n\t\t\t\tdata_frame_data[\"EncodedPixels\"].append(mask_2_rle(mask_3[i, :]))\n\t\t\t\t\n\t\t\t\tdata_frame_data[\"ImageId_ClassId\"].append(name + \"_4\")\n\t\t\t\tdata_frame_data[\"EncodedPixels\"].append(mask_2_rle(mask_4[i, :]))\n\n\t\tpd.DataFrame(data_frame_data).to_csv(\"submission.csv\", encoding='utf-8', index=False)\n\t","sub_path":"kaggle/severstal-steel-defect-detection/steel.py","file_name":"steel.py","file_ext":"py","file_size_in_byte":15283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"88376821","text":"#!/usr/bin/python3\n\"\"\"\nContains the class DBStorage\n\"\"\"\n\nimport models\nfrom models.base_model import BaseModel, Base\nfrom models.product import Product\nfrom models.customer import Customer\nfrom models.customer_product_mapping import CustomerProductMapping\nfrom os import getenv\nimport sqlalchemy\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\n\nclasses = {\"Customer\": Customer, \"Product\": Product,\n \"CustomerProductMapping\": CustomerProductMapping}\n\n\nclass DBStorage:\n \"\"\"interaacts with the MySQL database\"\"\"\n __engine = None\n __session = None\n\n def __init__(self):\n \"\"\"Instantiate a DBStorage object\"\"\"\n GRAPHIT_MYSQL_USER = getenv('GRAPHIT_MYSQL_USER')\n GRAPHIT_MYSQL_PWD = getenv('GRAPHIT_MYSQL_PWD')\n GRAPHIT_MYSQL_HOST = getenv('GRAPHIT_MYSQL_HOST')\n GRAPHIT_MYSQL_DB = getenv('GRAPHIT_MYSQL_DB')\n GRAPHIT_ENV = getenv('GRAPHIT_ENV')\n self.__engine = create_engine('mysql+mysqldb://{}:{}@{}/{}'.\n format(GRAPHIT_MYSQL_USER,\n GRAPHIT_MYSQL_PWD,\n GRAPHIT_MYSQL_HOST,\n GRAPHIT_MYSQL_DB))\n\n def all(self, cls=None):\n \"\"\"query on the current database session\"\"\"\n new_dict = {}\n for clss in classes:\n if cls is None or cls is classes[clss] or cls is clss:\n objs = self.__session.query(classes[clss]).all()\n for obj in objs:\n key = obj.__class__.__name__ + '.' + obj.id\n new_dict[key] = obj\n return (new_dict)\n\n def new(self, obj):\n \"\"\"add the object to the current database session\"\"\"\n self.__session.add(obj)\n\n def save(self):\n \"\"\"commit all changes of the current database session\"\"\"\n self.__session.commit()\n\n def delete(self, obj=None):\n \"\"\"delete from the current database session obj if not None\"\"\"\n if obj is not None:\n self.__session.delete(obj)\n\n def reload(self):\n \"\"\"reloads data from the database\"\"\"\n Base.metadata.create_all(self.__engine)\n sess_factory = sessionmaker(bind=self.__engine, expire_on_commit=False)\n Session = scoped_session(sess_factory)\n self.__session = Session\n\n def close(self):\n \"\"\"call remove() method on the private session attribute\"\"\"\n self.__session.remove()\n\n def get(self, cls, id):\n \"\"\"get object based on class and id\"\"\"\n objs = self.__session.query(classes[cls]).all()\n for obj in objs:\n if obj.__class__.__name__ == cls and obj.id == id:\n return obj\n return None\n\n def count(self, cls=None):\n \"\"\"get count of all objects or objects of a specific class\"\"\"\n object_count = 0\n object_list = []\n if cls is None:\n for value in classes.values():\n object_count += self.__session.query(value).count()\n else:\n if cls in classes:\n object_count += self.__session.query(classes[cls]).count()\n return object_count\n\n def get_session(self):\n \"\"\"returns a session to query\"\"\"\n return self.__session\n","sub_path":"models/engine/db_storage.py","file_name":"db_storage.py","file_ext":"py","file_size_in_byte":3310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"338460095","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\nwith open('README.rst') as readme_file:\n readme = readme_file.read()\n\nwith open('HISTORY.rst') as history_file:\n history = history_file.read()\n\nwith open('requirements.txt', 'r') as req:\n requirements_install = [l.strip() for l in req.readlines() if l.strip() != '']\n\nwith open('requirements_test.txt', 'r') as req:\n requirements_test = [l.strip() for l in req.readlines() if l.strip() != '']\n\nwith open('requirements_links.txt', 'r') as req:\n dependency_links = [l.strip() for l in req.readlines() if l.strip() != '']\n\nextras = {\n 'test': requirements_test\n}\n\n\nentry_points = '[console_scripts]\\ndatafs=datafs.datafs:cli'\n\nsetup(\n name='datafs',\n version='0.6.5',\n description=\"DataFS is an abstraction layer for data storage systems. It manages file versions and metadata using a json-like storage system like AWS's DynamoDB and relies on PyFilesystem to abstract file storage, allowing you to store files locally and on the cloud in a seamless interface.\",\n long_description=readme + '\\n\\n' + history,\n author=\"Climate Impact Lab\",\n url='https://github.com/ClimateImpactLab/datafs',\n packages=find_packages(exclude=['*.tests', '*.tests.*', 'tests.*', 'tests', 'docs', 'examples']),\n package_dir={'datafs':\n 'datafs'},\n include_package_data=True,\n install_requires=requirements_install,\n entry_points=entry_points,\n license=\"MIT license\",\n zip_safe=False,\n keywords='datafs',\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n \"Programming Language :: Python :: 2\",\n 'Programming Language :: Python :: 2.7'\n ],\n test_suite='tests',\n tests_require=requirements_test,\n extras_require=extras,\n dependency_links = dependency_links\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"19141907","text":"from idlelib import statusbar\nfrom tkinter import *\nimport tkinter.messagebox\nfrom pygame import mixer\n\n# Window\nroot = Tk()\nmixer.init()\nroot.geometry('500x550')\nroot.title(\"Settings\")\nroot.iconbitmap(r'Art/settings.png')\n\n\n# About Credits\ndef about_game():\n tkinter.messagebox.showinfo('Credits', 'Sound from:\\n~Zapsplat.com, '\n '\\n~PlayOnLoop.com, '\n '\\n~http://www.freesfx.co.uk')\n\n\n# How to play\ndef rules():\n tkinter.messagebox.showinfo('Rules', 'Two players are trying to score a goal in the opponents net with horizontal, '\n 'vertical and diagonal moves.\\nAlready used points can be re-used for a double'\n 'move')\n\n\n# menubar\nmenubar = Menu(root)\nroot.config(menu=menubar)\n\n# submenu\nsubMenu = Menu(menubar, tearoff=0)\nmenubar.add_cascade(label=\"About\", menu=subMenu)\nsubMenu.add_command(label=\"Credits\", command=about_game)\nsubMenu.add_command(label=\"How to play\", command=rules)\n\n# text\ntext = Label(root, text='Game Settings')\ntext.pack(pady=10)\n\n\n# button functions\nmuted = FALSE\n\n\ndef mute_music():\n global muted\n if muted: # unmute music\n mixer.music.set_volume(0.5)\n volume1Btn.configure(image=volume1Photo)\n scale.set(50)\n muted = FALSE\n else: # mute\n mixer.music.set_volume(0)\n volume1Btn.configure(image=mutePhoto)\n scale.set(0)\n muted = TRUE\n\n\ndef play_btn():\n mixer.music.load('background.wav')\n mixer.music.play()\n\n\ndef pause_btn():\n mixer.music.load('../Sound/background.wav')\n mixer.music.stop()\n statusbar['text'] = \"Paper football: music paused\"\n\n\ndef set_vol(val):\n volume = int(val) / 100\n mixer.music.set_volume(volume)\n\n\ndef exit_btn():\n root.destroy()\n\n\n# frame\nmiddleframe = Frame(root, relief=RAISED, borderwidth=0)\nmiddleframe.pack()\n\n# Volume 1\nvolume1Photo = PhotoImage(file='../Art/sound.png')\nvolume1Btn = Button(image=volume1Photo, command=mute_music)\nvolume1Btn.pack()\nmutePhoto = PhotoImage(file='../Art/sound_off.png')\n\n# Volume button\nvolumePhoto = PhotoImage(file='../Art/sound.png')\nplay_btn = Button(middleframe, image=volumePhoto, command=play_btn)\nplay_btn.pack(pady=5, padx=10)\n\n\n\n# Mixer\nscale = Scale(root, from_=0, to=100, orient=HORIZONTAL, command=set_vol)\nscale.set(50) # default value\nmixer.music.set_volume(50)\nscale.pack()\n\n# Exit button\nexitPhoto = PhotoImage(file='../Art/exit.png')\nexit_btn = Button(middleframe, image=exitPhoto, command=exit_btn)\nexit_btn.pack(pady=5, padx=10)\n\n\n# status bar\nstatusbar = Label(root, text=\"Paper Football\", relief=SUNKEN, anchor=W)\nstatusbar.pack(side=BOTTOM, fill=X)\n\n# loop\nroot.mainloop()\n","sub_path":"View/SettingsUI.py","file_name":"SettingsUI.py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"313325779","text":"# -*- coding: utf-8 -*-\nimport datetime\nimport calendar\nfrom dateutil.relativedelta import relativedelta\n# from ChryslerMTD.sql.current_month_data import current_month_sql\n\n\ndef get_pre_day():\n date = datetime.date.today()\n dates = str(date).split('-')\n print(dates)\n month_range = calendar.monthrange(int(dates[0]), int(dates[1]))\n if int(dates[1]) == 1:\n year = int(dates[0]) - 1\n fact_date = str(year) + '-' + '12' + '-' + str(month_range[1])\n else:\n if int(dates[1])-1 < 10:\n fact_date = str(dates[0]) + '-0' + str(int(dates[1])-1) + '-' + str(dates[2])\n else:\n fact_date = str(dates[0]) + '-' + str(int(dates[1]) - 1) + '-' + str(dates[2])\n return fact_date\n\n\ndef get_days(n_day):\n date = datetime.date.today()\n tar_day = date.today() + relativedelta(days=-n_day)\n return str(tar_day)\n# a = get_pre_day()\n# print(a)\n\n\ndef get_pre_month(mon):\n # 输入int,获取之前n月\n date = datetime.date.today()\n month = date.today() + relativedelta(months=-mon)\n return str(month)[0:7]\n\n\ndef wrap_month_condition(mon):\n cond_temp = 'CalendarMonth=\\'' + get_pre_month(mon) + '\\''\n return cond_temp\n\n\ndef wrap_date_condition(day):\n cond_temp = 'Date=\\'' + get_days(day) + '\\''\n return cond_temp\n\n# sql = current_month_sql.format(date_condition=wrap_month_condition(0)+' and '+wrap_date_condition(0))\n# print(wrap_date_condition(1))\n\n\n","sub_path":"ChryslerMTD/get_date.py","file_name":"get_date.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"438700006","text":"#!/usr/bin/env python\n\nimport json # Used when TRACE=jsonp\nimport os # Used to get the TRACE environment variable\nimport re # Used when TRACE=jsonp\nimport sys # Used to smooth over the range / xrange issue.\n\n# Python 3 doesn't have xrange, and range behaves like xrange.\nif sys.version_info >= (3,):\n xrange = range\n\n# Circuit verification library.\n\nclass Wire(object):\n \"\"\"A wire in an on-chip circuit.\n \n Wires are immutable, and are either horizontal or vertical.\n \"\"\"\n \n def __init__(self, name, x1, y1, x2, y2):\n \"\"\"Creates a wire.\n \n Raises an ValueError if the coordinates don't make up a horizontal wire\n or a vertical wire.\n \n Args:\n name: the wire's user-visible name\n x1: the X coordinate of the wire's first endpoint\n y1: the Y coordinate of the wire's first endpoint\n x2: the X coordinate of the wire's last endpoint\n y2: the Y coordinate of the wire's last endpoint\n \"\"\"\n # Normalize the coordinates.\n if x1 > x2:\n x1, x2 = x2, x1\n if y1 > y2:\n y1, y2 = y2, y1\n \n self.name = name\n self.x1, self.y1 = x1, y1\n self.x2, self.y2 = x2, y2\n self.object_id = Wire.next_object_id()\n \n if not (self.is_horizontal() or self.is_vertical()):\n raise ValueError(str(self) + ' is neither horizontal nor vertical')\n \n def is_horizontal(self):\n \"\"\"True if the wire's endpoints have the same Y coordinates.\"\"\"\n return self.y1 == self.y2\n \n def is_vertical(self):\n \"\"\"True if the wire's endpoints have the same X coordinates.\"\"\"\n return self.x1 == self.x2\n \n def intersects(self, other_wire):\n \"\"\"True if this wire intersects another wire.\"\"\"\n # NOTE: we assume that wires can only cross, but not overlap.\n if self.is_horizontal() == other_wire.is_horizontal():\n return False \n \n if self.is_horizontal():\n h = self\n v = other_wire\n else:\n h = other_wire\n v = self\n return v.y1 <= h.y1 and h.y1 <= v.y2 and h.x1 <= v.x1 and v.x1 <= h.x2\n \n def __repr__(self):\n # :nodoc: nicer formatting to help with debugging\n return('')\n \n def as_json(self):\n \"\"\"Dict that obeys the JSON format restrictions, representing the wire.\"\"\"\n return {'id': self.name, 'x': [self.x1, self.x2], 'y': [self.y1, self.y2]}\n\n # Next number handed out by Wire.next_object_id()\n _next_id = 0\n \n @staticmethod\n def next_object_id():\n \"\"\"Returns a unique numerical ID to be used as a Wire's object_id.\"\"\"\n id = Wire._next_id\n Wire._next_id += 1\n return id\n\nclass WireLayer(object):\n \"\"\"The layout of one layer of wires in a chip.\"\"\"\n \n def __init__(self):\n \"\"\"Creates a layer layout with no wires.\"\"\"\n self.wires = {}\n \n def wires(self):\n \"\"\"The wires in the layout.\"\"\"\n self.wires.values()\n \n def add_wire(self, name, x1, y1, x2, y2):\n \"\"\"Adds a wire to a layer layout.\n \n Args:\n name: the wire's unique name\n x1: the X coordinate of the wire's first endpoint\n y1: the Y coordinate of the wire's first endpoint\n x2: the X coordinate of the wire's last endpoint\n y2: the Y coordinate of the wire's last endpoint\n \n Raises an exception if the wire isn't perfectly horizontal (y1 = y2) or\n perfectly vertical (x1 = x2).\"\"\"\n if name in self.wires:\n raise ValueError('Wire name ' + name + ' not unique')\n self.wires[name] = Wire(name, x1, y1, x2, y2)\n \n def as_json(self):\n \"\"\"Dict that obeys the JSON format restrictions, representing the layout.\"\"\"\n return { 'wires': [wire.as_json() for wire in self.wires.values()] }\n \n @staticmethod\n def from_file(file):\n \"\"\"Builds a wire layer layout by reading a textual description from a file.\n \n Args:\n file: a File object supplying the input\n \n Returns a new Simulation instance.\"\"\"\n\n layer = WireLayer()\n \n while True:\n command = file.readline().split()\n if command[0] == 'wire':\n coordinates = [float(token) for token in command[2:6]]\n layer.add_wire(command[1], *coordinates)\n elif command[0] == 'done':\n break\n \n return layer\n\ndef nodeNum(node):\n if(node is None):\n return 0\n return node.nodeNum\n\ndef height(node):\n if(node is None):\n return -1\n else:\n return node.height\n\nclass Node(object):\n def __init__(self,key,patient):\n self.left=None\n self.right=None\n self.key=key\n self.height=0\n self.patient=patient\n self.nodeNum=1\n def updateHeight(self):\n self.height=1+max(height(self.right),height(self.left))\n def updateNodeNum(self):\n self.nodeNum=nodeNum(self.right)+nodeNum(self.left)+1\n\n def rRotate(self):\n lNode=self.left\n patient=self.patient\n self.left=lNode.right\n if(self.left!=None):\n self.left.patient=self\n lNode.right=self\n self.patient=lNode\n if(self == patient.right):\n patient.right=lNode\n else: \n patient.left=lNode\n lNode.patient=patient\n self.updateHeight()\n self.updateNodeNum()\n lNode.updateHeight()\n lNode.updateNodeNum()\n \n def lRotate(self):\n rNode=self.right\n patient=self.patient\n self.right=rNode.left\n if(self.right != None):\n self.right.patient=self\n rNode.left=self\n self.patient=rNode\n if(self==patient.right):\n patient.right=rNode\n else:\n patient.left=rNode\n rNode.patient=patient \n self.updateHeight()\n self.updateNodeNum()\n rNode.updateHeight()\n rNode.updateNodeNum()\n\n def rlRotate(self):\n self.right.rRotate()\n self.lRotate()\n def lrRotate(self):\n self.left.lRotate()\n self.rRotate()\n def checkAndFixRightInsert(self):\n if(height(self.right)-height(self.left)<=1):\n self.updateHeight()\n self.updateNodeNum()\n return\n if(height(self.right.left)-height(self.right.right)>=1):\n self.rlRotate()\n else:\n self.lRotate()\n\n def checkAndFixLeftInsert(self):\n if(height(self.left)-height(self.right)<=1):\n self.updateHeight()\n self.updateNodeNum()\n return\n if(height(self.left.right)-height(self.left.left)>=1):\n self.lrRotate()\n else:\n self.rRotate()\n def checkAndFixRightDelete(self):\n self.checkAndFixLeftInsert()\n def checkAndFixLeftDelete(self):\n self.checkAndFixRightInsert()\n \n \n#klevel=1\n\nclass RangeIndex(object):\n \"\"\"\n Post: Array-based range index implementation.\n Now: AVL-tree-based range index implementation\n \"\"\"\n \n def __init__(self):\n \"\"\"Initially empty range index.\"\"\"\n self.head=Node(0,None)\n \n def add(self, key):\n \"\"\"Inserts a key in the range index.\"\"\"\n \n if key is None:\n raise ValueError('Cannot insert nil in the index')\n if(self.head.right is None):\n self.head.right=Node(key,self.head)\n else:\n self.__add__(self.head.right,key)\n \n def __add__(self,toBeInserted,key):\n if(toBeInserted is None):\n if(self.head.right is None):\n raise Exception(\"Fucking! We algothrim won't happened that we add to a None!\")\n else:\n raise Exception(\"We algothrim won't happened that we add to a None!\")\n if(key>toBeInserted.key):\n if(toBeInserted.right is None):\n toBeInserted.right=Node(key,toBeInserted)\n toBeInserted.updateHeight()\n toBeInserted.updateNodeNum()\n else:\n self.__add__(toBeInserted.right,key)\n toBeInserted.updateHeight()\n toBeInserted.updateNodeNum()\n toBeInserted.checkAndFixRightInsert()\n \n elif(keycurrentNode.key):\n self.__remove__(currentNode.right,key)\n currentNode.checkAndFixRightDelete()\n elif(key=tree.key)):\n break\n if(l=l):\n# klevel+=1\n self.__nodeList__(node.left,l,h,result)\n# klevel-=1\n if(node.key<=h):\n# klevel+=1\n self.__nodeList__(node.right,l,h,result)\n# klevel-=1\n def __rank__(self,currentNode,key):\n if(currentNode is None):\n return False,0\n if(key>currentNode.key):\n hit,count=self.__rank__(currentNode.right,key)\n return hit,count+1+nodeNum(currentNode.left)\n if(key other.key or\n (self.key == other.key and self.wire_id > other.wire_id))\n \n def __ge__(self, other):\n # :nodoc: Delegate comparison to keys.\n return (self.key > other.key or\n (self.key == other.key and self.wire_id >= other.wire_id))\n\n def __eq__(self, other):\n # :nodoc: Delegate comparison to keys.\n return self.key == other.key and self.wire_id == other.wire_id\n \n def __ne__(self, other):\n # :nodoc: Delegate comparison to keys.\n return self.key == other.key and self.wire_id == other.wire_id\n\n def __hash__(self):\n # :nodoc: Delegate comparison to keys.\n return hash([self.key, self.wire_id])\n\n def __repr__(self):\n # :nodoc: nicer formatting to help with debugging\n return ''\n\nclass KeyWirePairL(KeyWirePair):\n \"\"\"A KeyWirePair that is used as the low end of a range query.\n \n This KeyWirePair is smaller than all other KeyWirePairs with the same key.\"\"\"\n def __init__(self, key):\n self.key = key\n self.wire = None\n self.wire_id = -1000000000\n\nclass KeyWirePairH(KeyWirePair):\n \"\"\"A KeyWirePair that is used as the high end of a range query.\n \n This KeyWirePair is larger than all other KeyWirePairs with the same key.\"\"\"\n def __init__(self, key):\n self.key = key\n self.wire = None\n # HACK(pwnall): assuming 1 billion objects won't fit into RAM.\n self.wire_id = 1000000000\n\nclass CrossVerifier(object):\n \"\"\"Checks whether a wire network has any crossing wires.\"\"\"\n \n def __init__(self, layer):\n \"\"\"Verifier for a layer of wires.\n \n Once created, the verifier can list the crossings between wires (the \n wire_crossings method) or count the crossings (count_crossings).\"\"\"\n\n self.events = []\n self._events_from_layer(layer)\n self.events.sort()\n \n self.index = RangeIndex()\n self.result_set = ResultSet()\n self.performed = False\n \n def count_crossings(self):\n \"\"\"Returns the number of pairs of wires that cross each other.\"\"\"\n if self.performed:\n raise \n self.performed = True\n return self._compute_crossings(True)\n\n def wire_crossings(self):\n \"\"\"An array of pairs of wires that cross each other.\"\"\"\n if self.performed:\n raise \n self.performed = True\n return self._compute_crossings(False)\n\n def _events_from_layer(self, layer):\n \"\"\"Populates the sweep line events from the wire layer.\"\"\"\n for wire in layer.wires.values():\n if wire.is_horizontal():\n self.events.append([wire.x1, 0, wire.object_id, 'add', wire])\n self.events.append([wire.x2,3,wire.object_id, 'delete', wire])\n else: \n self.events.append([wire.x1, 1, wire.object_id, 'query', wire])\n\n def _compute_crossings(self, count_only):\n \"\"\"Implements count_crossings and wire_crossings.\"\"\"\n if count_only:\n result = 0\n else:\n result = self.result_set\n for event in self.events:\n event_x, event_type, wire = event[0], event[3], event[4]\n \n if event_type == 'add':\n self.trace_sweep_line(event_x)\n self.index.add(KeyWirePair(wire.y1, wire))\n\n elif event_type == 'query':\n self.trace_sweep_line(event_x)\n if count_only:\n result += self.index.count(KeyWirePairL(wire.y1),\n KeyWirePairH(wire.y2))\n else:\n cross_wires=self.index.list(KeyWirePairL(wire.y1),\n KeyWirePairH(wire.y2))\n for cross_wire in cross_wires:\n result.add_crossing(wire, cross_wire.wire)\n elif event_type=='delete':\n self.trace_sweep_line(event_x)\n self.index.remove(KeyWirePair(wire.y1, wire))\n \n return result\n \n def trace_sweep_line(self, x):\n \"\"\"When tracing is enabled, adds info about where the sweep line is.\n \n Args:\n x: the coordinate of the vertical sweep line\n \"\"\"\n # NOTE: this is overridden in TracedCrossVerifier\n pass\n\nclass TracedCrossVerifier(CrossVerifier):\n \"\"\"Augments CrossVerifier to build a trace for the visualizer.\"\"\"\n \n def __init__(self, layer):\n CrossVerifier.__init__(self, layer)\n self.trace = []\n self.index = TracedRangeIndex(self.trace)\n self.result_set = TracedResultSet(self.trace)\n \n def trace_sweep_line(self, x):\n self.trace.append({'type': 'sweep', 'x': x})\n \n def trace_as_json(self):\n \"\"\"List that obeys the JSON format restrictions with the verifier trace.\"\"\"\n return self.trace\n\n# Command-line controller.\nif __name__ == '__main__':\n import sys\n layer = WireLayer.from_file(sys.stdin)\n verifier = CrossVerifier(layer)\n \n if os.environ.get('TRACE') == 'jsonp':\n verifier = TracedCrossVerifier(layer)\n result = verifier.wire_crossings()\n json_obj = {'layer': layer.as_json(), 'trace': verifier.trace_as_json()}\n sys.stdout.write('onJsonp(')\n json.dump(json_obj, sys.stdout)\n sys.stdout.write(');\\n')\n elif os.environ.get('TRACE') == 'list':\n verifier.wire_crossings().write_to_file(sys.stdout)\n else:\n sys.stdout.write(str(verifier.count_crossings()) + \"\\n\")\n","sub_path":"6006/A3/circuit2/circuit2.py","file_name":"circuit2.py","file_ext":"py","file_size_in_byte":20778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"638839702","text":"from flask import Flask\nimport sqlite3\n\n\nNO_RESULT = 'No result from local server'\nconn = sqlite3.connect('resultsDB.sqlite')\ncur = conn.cursor()\n\nhost = '0.0.0.0'\nportNumber = 12223\napp = Flask(__name__)\n\n\ndef getResult(usn, retry=False):\n usn = usn[0:5] + usn[5:7].upper() + usn[7:]\n print(usn)\n cur.execute('SELECT result FROM Results WHERE usn = ?', (usn,))\n htmlRes = cur.fetchone()\n if(htmlRes != None):\n htmlRes, = htmlRes\n return htmlRes\n else:\n return None\n\n@app.route('/', methods=['GET'])\ndef keyGet(usn):\n res = getResult(usn)\n if res is None:\n return NO_RESULT\n else:\n return res\n\n\nif __name__ == '__main__':\n app.run(host=host, port=portNumber, threaded=False)\n","sub_path":"local_result_server.py","file_name":"local_result_server.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"383419355","text":"import os\n\nfrom classify import show_trained_data\n\n\ndef test_file_codec():\n rootdir = os.getcwd() + '/DataSet'\n for subdir, dirs, files in os.walk(rootdir):\n for file in files:\n path = os.path.join(subdir, file)\n\n try:\n f = open(path)\n sentence = f.read()\n except:\n print(path.split('/')[-1])\n\n\nshow_trained_data()\n\n# test_file_codec()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"177957609","text":"import io\nimport sys\n\nclass GeneralizedSuffixTree(object):\n \"\"\"\n Represents a generalized suffix tree for string matching.\n\n Uses a list of words.\n \"\"\"\n\n WORD_DELIMITER_CHAR = '$'\n\n class Node():\n \"\"\"\n Represents a node in the generalized suffix tree.\n \"\"\"\n\n def __init__(self, parent, words=set()):\n \"\"\"\n Initializes.\n \"\"\"\n\n self._parent = parent\n self._children = []\n self._words = words\n\n @property\n def parent(self):\n \"\"\"\n Returns the parent node ID.\n \"\"\"\n\n return self._parent\n\n @property\n def children(self):\n \"\"\"\n Returns the IDs of the child nodes.\n \"\"\"\n\n return self._children\n\n @property\n def words(self):\n \"\"\"\n Returns the set of the indices of the words that pass through\n this node.\n \"\"\"\n\n return self._words\n\n def __repr__(self):\n \"\"\"\n Returns the string representation.\n \"\"\"\n\n return 'parent={}, children={}, words={}'.format(self._parent,\n self._children,\n self._words)\n\n class Edge():\n \"\"\"\n Represents an edge in the generalized suffix tree.\n \"\"\"\n\n def __init__(self, word_index, start_index, stop_index):\n \"\"\"\n Initializes.\n \"\"\"\n\n self._word_index = word_index\n self._start_index = start_index\n self._stop_index = stop_index\n\n def __repr__(self):\n \"\"\"\n Returns the string representation.\n \"\"\"\n\n return 'word={}, start_index={}, stop_index={}'.format(\n self._word_index,\n self._start_index,\n self._stop_index)\n\n def __init__(self, words):\n \"\"\"\n Initializes.\n \"\"\"\n\n self._words = []\n root = GeneralizedSuffixTree.Node(-1)\n self._nodes = [ root ]\n self._edges = {}\n\n for word in words:\n self._add_word(word, len(words) > 1)\n\n def _add_word(self, word, multiple=True):\n \"\"\"\n Adds the word to the tree.\n\n Algorithm:\n\n - Append $ to the word (so that all the words are delimited\n by $0, ..., and $(N - 1).\n\n - Add each suffix of the word to the tree.\n\n - Find the insertion point and corresponding suffix.\n\n - Create a new node and add it as a child to its parent node.\n\n - Create an edge from the insertion point to the new node.\n \"\"\"\n\n word += GeneralizedSuffixTree.WORD_DELIMITER_CHAR\n if multiple:\n word += str(len(self._words))\n self._words.append(word)\n\n end_index = word.index(GeneralizedSuffixTree.WORD_DELIMITER_CHAR)\n for i in range(end_index + 1):\n suffix = word[i:]\n insertion_suffix, insertion_parent_id = self._insert_node(suffix)\n\n new_word_index = len(self._words) - 1\n node = GeneralizedSuffixTree.Node(insertion_parent_id,\n { new_word_index })\n self._nodes.append(node)\n new_child_id = len(self._nodes) - 1\n self._nodes[insertion_parent_id]._children.append(new_child_id)\n\n end_index = len(word)\n start_index = end_index - len(insertion_suffix)\n edge = GeneralizedSuffixTree.Edge(new_word_index,\n start_index,\n end_index)\n self._edges[insertion_parent_id, new_child_id] = edge\n\n def _insert_node(self, suffix, current_node=0):\n \"\"\"\n Traverses the tree to determine the insertion point of the given suffix.\n\n Algorithm:\n\n - Update the current node's word indices to include the last word's\n index.\n\n - If the first character of the given suffix is the delimiter,\n then return (suffix, current node).\n\n - Consider each child edge leading from the current node.\n\n - If the entire edge is a prefix of the suffix, make a\n recursive call to move to the child node and traverse\n further down the tree.\n\n - Otherwise, if the edge partially overlaps in the prefix of\n the current suffix, split the edge and insert a new node\n at the split point (which is at the end of the overlap).\n Return (offset suffix, new node ID).\n \"\"\"\n\n new_word_index = len(self._words) - 1\n self._nodes[current_node]._words.add(new_word_index)\n\n if suffix[0] == GeneralizedSuffixTree.WORD_DELIMITER_CHAR:\n return suffix, current_node\n\n for child_id in self._nodes[current_node]._children:\n edge = self._edges[current_node, child_id]\n edge_word = self.edge_substring(edge)\n\n if suffix[:len(edge_word)] == edge_word:\n suffix = suffix[len(edge_word):]\n return self._insert_node(suffix, child_id)\n elif suffix[0] == edge_word[0]:\n offset = 0\n while (suffix[offset] == edge_word[offset] !=\n GeneralizedSuffixTree.WORD_DELIMITER_CHAR):\n offset += 1\n\n new_node_id = self._split_edge(current_node, child_id, offset)\n\n return suffix[offset:], new_node_id\n\n return suffix, current_node\n\n def _split_edge(self, parent_id, child_id, split_pos):\n \"\"\"\n Splits the edge between the given parent and child nodes at the given\n split position.\n\n Inserts a new node at the split position and returns the index of the\n new node.\n\n Algorithm:\n\n - Create a new node, copying the child node's word indices and\n adding the last word's index. The node's parent is the old edge's\n parent node. The node's children is the old edge's child node.\n\n - The old edge's parent node's children are updated to include the\n new node and exclude the old edge's child node.\n\n - The old edge's child node's parent is updated as the new node.\n\n - The tree's edges are updated to remove the old edge and to add\n 2 new edges from the parent node to the new node and from the new\n node to the child node.\n \"\"\"\n\n new_node_id = len(self._nodes)\n new_word_index = len(self._words) - 1\n word_indices = self._nodes[child_id]._words | { new_word_index }\n new_node = GeneralizedSuffixTree.Node(parent_id,\n words=word_indices)\n self._nodes.append(new_node)\n self._nodes[new_node_id]._children.append(child_id)\n\n self._nodes[parent_id]._children.append(new_node_id)\n self._nodes[parent_id]._children.remove(child_id)\n\n self._nodes[child_id]._parent = new_node_id\n\n old_edge = self._edges[parent_id, child_id]\n parent_to_new_node_edge = GeneralizedSuffixTree.Edge(\n old_edge._word_index,\n old_edge._start_index,\n old_edge._start_index + split_pos)\n self._edges[parent_id, new_node_id] = parent_to_new_node_edge\n new_node_to_child_edge = GeneralizedSuffixTree.Edge(\n old_edge._word_index,\n old_edge._start_index + split_pos,\n old_edge._stop_index)\n self._edges[new_node_id, child_id] = new_node_to_child_edge\n\n del self._edges[parent_id, child_id]\n\n return new_node_id\n\n def edge_substring(self, edge):\n \"\"\"\n Returns the substring that corresponds to the given edge.\n \"\"\"\n\n word = self._words[edge._word_index]\n\n return word[edge._start_index:edge._stop_index]\n\n def node_substring(self, node_id):\n \"\"\"\n Returns the substring that corresponds to a traversal from\n the root to the given node.\n\n Algorithm:\n\n - Traverse the tree from the given node to the root.\n Accumulate characters over the visited edges.\n \"\"\"\n\n word = ''\n while self._nodes[node_id]._parent != -1:\n edge = self._edges[self._nodes[node_id]._parent, node_id]\n word = self.edge_substring(edge) + word\n\n node_id = self._nodes[node_id]._parent\n\n return word\n\n def node_depth(self, node_id):\n \"\"\"\n Returns the node's depth in the tree, which means the length of the\n substring that leads to the given node.\n\n Note: The substring does not include the out-of-alphabet character.\n\n Algorithm:\n\n - If the node ID is that of the root node, return 0.\n\n - Check the first edge for whether it includes the delimited\n character. The depth is initialized to the first edge's word's\n length.\n\n - Continue traversing the tree from the given node to the root.\n Increment the depth by each edge's word length.\n \"\"\"\n\n if node_id == 0:\n return 0\n\n edge = self._edges[self._nodes[node_id]._parent, node_id]\n edge_word = self.edge_substring(edge)\n depth = None\n if GeneralizedSuffixTree.WORD_DELIMITER_CHAR not in edge_word:\n depth = len(edge_word)\n else:\n marker_index = edge_word.index(\n GeneralizedSuffixTree.WORD_DELIMITER_CHAR)\n depth = len(edge_word[:marker_index])\n node_id = self._nodes[node_id]._parent\n\n while self._nodes[node_id]._parent != -1:\n edge = self._edges[self._nodes[node_id]._parent, node_id]\n edge_word = self.edge_substring(edge)\n depth += len(edge_word)\n\n node_id = self._nodes[node_id]._parent\n\n return depth\n\n @property\n def nodes(self):\n \"\"\"\n Returns the nodes.\n \"\"\"\n\n return self._nodes\n\n @property\n def edges(self):\n \"\"\"\n Returns the edges.\n \"\"\"\n\n return self._edges\n","sub_path":"course4-strings/practice/suffix_tree/generalized_suffix_tree.py","file_name":"generalized_suffix_tree.py","file_ext":"py","file_size_in_byte":10726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"408234447","text":"#!/bin/python\n\n#importing the porgrams\nimport os\nimport sys\n\npdb_file = None\nglobal file_name\nlines = None\n\n\n### THE MENU FUNCTIONS\ndef print_menu():\n print(\n\"\"\"\n********************************************************************************\n* PDB FILE ANALYZER *\n********************************************************************************\n* Select an option from below: *\n* *\n* 1) Open a PDB File (O) *\n* 2) Information (I) *\n* 3) Show histogram of amino acids (H) *\n* 4) Display Secondary Structure (S) *\n* 5) Export PDB File (X) *\n* 6) Exit (Q) *\n* *\n* Current PDB: %s *\n********************************************************************************\n\"\"\"%pdb_file)\n \n option = input(\": \")\n #return option \n\n # \"\"\"Takes user's option following main menu display, assesses it and calls the respective funtions\"\"\"\n if option.lower() in ('o','i','h','s','x','q'):\n if option.lower() == 'o':\n open_file()\n print_menu()\n \n if option.lower() == 'i':\n print(\"Information\")\n main_info_function(file_name)\n print_menu()\n \n if option.lower() == 'h':\n print(\"Histogram of Amino Acids\")\n main_histogram(file_name)\n print_menu()\n \n if option.lower() == 's':\n print(\"Display Secondary Structure\")\n main_sec_structure(file_name)\n print_menu()\n \n if option.lower() == 'x':\n export_func(file_name)\n print_menu()\n \n if option.lower() == 'q':\n print(\"Exit\")\n main_quit()\n \n else:\n print(\"\\n\" + \"unsupported option: %s, please enter valid option\" %(option))\n print_menu()\n\n\n#------------------------------------------------------------------------------------------------------------------------\n# OPEN SECTION #\n#------------------------------------------------------------------------------------------------------------------------\n\ndef path_check(file):\n \"\"\" Checks if file exists. if it does, then call function that checks whether file is open \"\"\"\n if os.path.exists(file): \n check_open(file)\n else:\n print(\"file not found\")\n print_menu()\n\ndef check_open(file):\n \"\"\"checks if file is open or not. If file is already open, it calls function for replacement enquiry\"\"\"\n if pdb_file:\n file_name = file\n else:\n replacement_check(file)\n\ndef replacement_check(file):\n \"\"\"checks if user wants to replace file or not, incase file was already loaded\"\"\"\n if True:\n x = input(\"Would you like to replace the current file, Y/N: \")\n if x.lower() == 'y':\n file_name = file\n print(\"The File 3AYU.pdb has been successfully loaded\")\n else:\n print(\"proceed to select options to work with\", pdb_file)\n print_menu()\n\ndef read_pdb(pdb_file):\n with open(file_name,\"r\") as myfile:\n lines = myfile.readlines()\n return lines \n#------------------------------------------------------------------------------------------------------------------------\n# INFORMATION SECTION #\n#------------------------------------------------------------------------------------------------------------------------\ndef title_print(lines):\n \"\"\"print title of the protein\"\"\"\n title_string = \"\"\n for line in lines:\n if line.startswith(\"TITLE\"):\n title_string = title_string + line[9:].strip()\n print(\"Title : \" , title_string)\n \ndef extract_chain_sequences(lines):\n \"\"\"extract all the sequence residue lines from the file\"\"\"\n seq = []\n for line in lines:\n if line.startswith('SEQRES'):\n seq.append(line[0:])\n return seq\n\ndef collect_chain_ids(all_sequences):\n \"\"\"identify chains in protein\"\"\"\n chains = []\n for line in all_sequences:\n if line[11] not in chains:\n chains.append(line[11])\n return chains\n\ndef print_chains(chains_in_prot):\n \"\"\"print chains in protein\"\"\"\n x = '' .join(chains_in_prot)\n print(\"- Chains:\", x[0], \"and\", x[1])\n \ndef pdb_info(all_sequences, chains_in_prot, lines):\n title_print(lines)\n for chain in chains_in_prot:\n print(\"- Chain \", chain)\n \n residues = []\n for line in all_sequences:\n if line[11] == chain:\n one_letter_code = {'GLY':'G', 'ALA':'A', 'VAL':'V', 'CYS':'C', 'PRO':'P', 'LEU':'L',\\\n 'ILE':'I', 'MET':'M', 'TRP':'W', 'PHE':'F', 'SER':'S', 'THR':'T',\\\n 'TYR':'Y', 'ASN':'N', 'GLN':'Q', 'LYS':'K', 'ARG':'R', 'HIS':'H',\\\n 'ASP':'D', 'GLU':'E'}\n residues.extend(line[18:].split()) #splits the string into a list of residues after appending to the list of residues\n chain_seq = '' .join([one_letter_code[i] for i in residues])#converts the 3 code residues to their corresponding 1 letter denotation\n \n helix = []\n for line in lines:\n if line.startswith('HELIX') and line[19] == chain:\n helix.append(line[0:])\n numb = len(helix)\n \n sheet = []\n for line in lines:\n if line.startswith('SHEET') and line[21] == chain:\n sheet.append(line[0:])\n num = len(sheet)\n \n print(\"Number of amino acids:\", len(chain_seq))\n print(\"Number of helix: \", numb)\n print(\"Number of sheet: \", num)\n print(\"Sequence:\", '\\n'.join(''.join(chain_seq[i:i+50]) for i in range(0, len(chain_seq), 50)))\n\n \n\n#------------------------------------------------------------------------------------------------------------------------\n# HISTOGRAM SECTION #\n#------------------------------------------------------------------------------------------------------------------------\ndef chain_sequence(all_sequences):\n \"\"\"general print sequences in all chains\"\"\"\n residues = []\n for line in all_sequences:\n one_letter_code = {'GLY':'G', 'ALA':'A', 'VAL':'V', 'CYS':'C', 'PRO':'P', 'LEU':'L', 'ILE':'I',\\\n 'MET':'M', 'TRP':'W', 'PHE':'F', 'SER':'S', 'THR':'T', 'TYR':'Y', 'ASN':'N', \\\n 'GLN':'Q', 'LYS':'K', 'ARG':'R', 'HIS':'H', 'ASP':'D', 'GLU':'E'}\n residues.extend(line[18:].split()) #splits the string into a list of residues after appending to the list of residues\n chain_seq = '' .join([one_letter_code[i] for i in residues]) #converts the 3 code residues to their corresponding 1 letter denotation\n return chain_seq\n\ndef ordering_option():\n \"\"\"prints the ordering options and gives the user an input cell. returns the input\"\"\"\n print(\"\"\"\n Choose an option to order by:\n number of amino acids - ascending (an)\n number of amino acids - descending (dn)\n alphabetically - ascending (aa)\n alphabetically - descending (da)\n \"\"\")\n choice = input(\"order by: \")\n return choice\n\ndef an_order(chain_seq):\n \"\"\" generates a list of the 20 amino acids ordered in ascending order of their abundance in the sequence\"\"\"\n # availing list of all the possible (20) amino acids\n aa_residues = ['G', 'A', 'V', 'C', 'P', 'L', 'I', 'M', 'W', 'F', 'S', 'T', 'Y', 'N', 'Q', 'K', 'R', 'H', 'D', 'E']\n aa_number = [] # initializing an empty list of count for every amino acid in the sequence\n \n # Create a dictionary of each amino acid paired with its count in the sequence\n for residue in aa_residues:\n aa_number.append(chain_seq.count(residue))\n aa_count_dict = dict((residue,aa) for residue,aa in zip(aa_residues, aa_number))\n \n # create a new list of the 20 amino acids ordered according to user option(an)\n residues_list= []\n for k,v in sorted(aa_count_dict.items(), key=lambda p:p[1]):\n residues_list.append(k)\n return residues_list\n\ndef dn_order(chain_seq):\n \"\"\"generates a list of the 20 amino acids ordered in descending order of their abundance in the sequence\"\"\"\n # availing list of all the possible (20) amino acids\n aa_residues = ['G', 'A', 'V', 'C', 'P', 'L', 'I', 'M', 'W', 'F', 'S', 'T', 'Y', 'N', 'Q', 'K', 'R', 'H', 'D', 'E']\n aa_number = [] # initializing an empty list of count for every amino acid in the sequence\n \n # Create a dictionary of each amino acid paired with its count in the sequence\n for residue in aa_residues:\n aa_number.append(chain_seq.count(residue))\n aa_count_dict = dict((residue,aa) for residue,aa in zip(aa_residues, aa_number))\n \n # create a new list of the 20 amino acids ordered according to user option(dn)\n residues_list = []\n for k,v in sorted(aa_count_dict.items(), key=lambda p:p[1], reverse=True):\n residues_list.append(k)\n \n return residues_list\n\ndef aa_order(chain_seq):\n \"\"\"generates a list of the 20 amino acids ordered in ascending alphabetical order\"\"\"\n residue_list = []\n one_letter_code = {'G':'Gly', 'A':'Ala', 'V':'Val', 'C':'Cys', 'P':'Pro', 'L':'Leu', 'I':'Ile', 'M':'Met', 'W':'Trp', 'F':'Phe', 'S':'Ser', 'T':'Thr', 'Y':'Tyr', 'N':'Asn', 'Q':'Gln', 'K':'Lys', 'R':'Arg', 'H':'His', 'D':'Asp', 'E':'Glu'}\n for k,v in sorted(one_letter_code.items(), key=lambda p:p[1]):\n residue_list.append(k)\n return residue_list\n\n\ndef da_order(chain_seq):\n \"\"\"generates a list of the 20 amino acids ordered in ascending alphabetical order\"\"\"\n residue_list = []\n one_letter_code = {'G':'Gly', 'A':'Ala', 'V':'Val', 'C':'Cys', 'P':'Pro', 'L':'Leu', 'I':'Ile', 'M':'Met', 'W':'Trp', 'F':'Phe', 'S':'Ser', 'T':'Thr', 'Y':'Tyr', 'N':'Asn', 'Q':'Gln', 'K':'Lys', 'R':'Arg', 'H':'His', 'D':'Asp', 'E':'Glu'}\n for k,v in sorted(one_letter_code.items(), key=lambda p:p[1], reverse=True):\n residue_list.append(k)\n return residue_list\n\ndef draw_hist(chain_seq, residues):\n \"\"\"generates a histogram of amino acids in the sequence\"\"\"\n one_letter_code = {'G':'Gly', 'A':'Ala', 'V':'Val', 'C':'Cys', 'P':'Pro', 'L':'Leu', 'I':'Ile', 'M':'Met', 'W':'Trp', 'F':'Phe', 'S':'Ser', 'T':'Thr', 'Y':'Tyr', 'N':'Asn', 'Q':'Gln', 'K':'Lys', 'R':'Arg', 'H':'His', 'D':'Asp', 'E':'Glu'}\n #residues = ['G', 'A', 'V', 'C', 'P', 'L', 'I', 'M', 'W', 'F', 'S', 'T', 'Y', 'N', 'Q', 'K', 'R', 'H', 'D', 'E']\n for residue in residues:\n freq = []\n for i in chain_seq:\n if residue == i:\n l = one_letter_code[i] \n freq.append(l)\n amino_acid = \"\".join(set(freq))\n if residue in chain_seq:\n print(amino_acid, \"(\", len(freq),\"):\", \"*\" * len(freq))\n else:\n pass\n\ndef summary_caller(order, chain_seq):\n \"\"\"takes theorder option given by the user and calls appropriate function to excecute task\"\"\"\n \n if order.lower() in ('an','dn','aa','da'):\n if order.lower() == 'an': \n residues = an_order(chain_seq)\n draw_hist(chain_seq, residues)\n \n elif order.lower() == 'dn':\n residues = dn_order(chain_seq)\n draw_hist(chain_seq, residues)\n \n elif order.lower() == 'aa':\n residues = aa_order(chain_seq)\n draw_hist(chain_seq, residues)\n \n else:\n residues = da_order(chain_seq)\n draw_hist(chain_seq, residues)\n else:\n print(\"invalid input\")\n ordering_option() \n \n\n#------------------------------------------------------------------------------------------------------------------------\n# SECONDARY STRUCTURE SECTION #\n#------------------------------------------------------------------------------------------------------------------------\n\ndef sec_structure_generate(chains_in_prot, all_sequences,lines):\n for chain in chains_in_prot: # Adress a chain at a time from my unique chains list\n print('\\n' + \"Chain\",chain,\":\")\n \n residues = [] # Initiate empty list for my residues per chain\n count = 0\n structure = [] # Initiate an empty list to create the secondary structure symbols as we move over our residue list once generated\n tag =[]\n for line in all_sequences: # Generate primary sequence of the chain\n if line[11] == chain:\n count = count+1\n one_letter_code = {'GLY':'G', 'ALA':'A', 'VAL':'V', 'CYS':'C', 'PRO':'P', 'LEU':'L', 'ILE':'I',\\\n 'MET':'M', 'TRP':'W', 'PHE':'F', 'SER':'S', 'THR':'T', 'TYR':'Y', 'ASN':'N', \\\n 'GLN':'Q', 'LYS':'K', 'ARG':'R', 'HIS':'H', 'ASP':'D', 'GLU':'E'}\n x = line[18:].split()\n residues.extend(x) #splits the string into a list of residues after appending to the list of residues\n chain_seq = '' .join([one_letter_code[i] for i in residues]) #converts the 3 code residues to their corresponding 1 letter denotation\n for i in residues: #Fill the structure list with dashes ('-') as place holders per residue\n structure.append(\"-\")\n tag.append(\" \")\n for line in lines: # Identify where each structure starts and ends using the secondary structure info in the pdb file\n \n if line.startswith('SHEET'): #Process for sheet part of the chain\n new_line = line.split()\n if new_line[5] == chain:\n start = int(new_line[6])\n stop = int(new_line[9])\n num = (stop - start) +1\n update_structure = num * \"|\" \n \n update_tag = (new_line[1] + new_line[2])\n tag[start-1:start+1] = update_tag\n structure[start - 1 : stop] = update_structure \n \n if line.startswith('HELIX'): #process for helix part of chain\n new_line = line.split()\n if new_line[4] == chain:\n start = int(new_line[5])\n stop = int(new_line[8])\n num = (stop - start) +1\n update_structure = num * \"/\" \n \n update_tag = (new_line[1])\n tag[start-1:start+1] = update_tag\n structure[start - 1 : stop] = update_structure\n \n for i in range(0, len(chain_seq),80):\n \n print('\\n' + ''.join(chain_seq[i:i+80]) + \\\n '\\n' + ''.join(structure[i:i+80]) +\\\n '\\n' + ''.join(tag[i:i+80]))\n print(\"(%d)\" %(len(chain_seq)))\n \n#------------------------------------------------------------------------------------------------------------------------\n# EXPORT SECTION #\n#------------------------------------------------------------------------------------------------------------------------\n\ndef export_func(file_name):\n out_file_name = file + 'export'\n with open(out_file_name,\"w\") as myfile:\n lines_to_transfer = open(file_name,\"r\")\n for line in lines_to_transfer:\n myfile.write(line)\n file.close()\n\n#------------------------------------------------------------------------------------------------------------------------\n# EXIT SECTION #\n#------------------------------------------------------------------------------------------------------------------------\n\ndef quit():\n \"\"\"Takes users input of whether to exit program or go back to the main menu\"\"\"\n option = input(\"Do you want to exit (E) or do you want go back to the menu (M)\")\n return option\n\ndef quit_options(option): \n \"\"\"Executes user option\"\"\"\n if option == \"E\" or option == \"e\":\n sys.exit()\n elif option == \"M\" or option == \"m\":\n print_menu()\n else:\n quit()\n \n \n#------------------------------------------------------------------------------------------------------------------------\n# THE MASTER FUNCTIONS FOR EACH OPTION #\n#------------------------------------------------------------------------------------------------------------------------\ndef open_file():\n \"\"\" Asks the user to put in file name and once received, it calls the function that checks whether file path exists\"\"\"\n file = input(\"Enter a Valid PATH for a PDB File: \")\n path_check(file)\n return file\n\ndef main_info_function(file_name):\n \"\"\"The information summary boss function\"\"\"\n lines = read_pdb(file)\n all_sequences = extract_chain_sequences(lines) #extracts the sequence residues lines\n sequence = chain_sequence(all_sequences) # extracts the sequence of amino acids in the protein\n chains_in_prot = collect_chain_ids(all_sequences)\n pdb_info(all_sequences, chains_in_prot, lines)\n\ndef main_histogram(file_name):\n \"\"\"Main function. Calls the rest of the functions within the histogram option\"\"\"\n lines = read_pdb(file) \n all_sequences = extract_chain_sequences(lines) #extracts the sequence residues lines\n sequence = chain_sequence(all_sequences) # extracts the sequence of amino acids in the protein\n order = ordering_option() # displays order options for the user and records the choice input\n summary_caller(order,sequence) # sieves through the order options and calls the appropriate functions based on order selected by user\n\ndef main_sec_structure(file_name):\n \"\"\"This is master secondary structure function\"\"\"\n lines = read_pdb(file)\n all_sequences = extract_chain_sequences(lines)\n chains_in_prot = collect_chain_ids(all_sequences)\n sec_structure_generate(chains_in_prot, all_sequences,lines)\n\ndef main_quit():\n \"\"\"The master quit function\"\"\"\n option = quit()\n quit_options(option)\n \ndef software_funct():\n print_menu()\nsoftware_funct()\n\n","sub_path":".ipynb_checkpoints/mini-project-frame-checkpoint.py","file_name":"mini-project-frame-checkpoint.py","file_ext":"py","file_size_in_byte":19133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"272898876","text":"import pandas as pd\nfrom textblob import TextBlob\nfrom textblob.sentiments import NaiveBayesAnalyzer\nimport seaborn as sns; sns.set(color_codes=True)\nfrom seaborn import kdeplot\nimport matplotlib.pyplot as plt\n\n# carga DataFrame\ndf = pd.read_csv(\"CSV-JoeBin.csv\")\n\n# Nuevo data frame solo con dos columnas\nDataAnalis = df[[\"usertweet\", \"TweetMsg\"]]\nprint (DataAnalis.head(10))\nprint ('')\n\n# Remover index si contine el Texto\n# utilizamos or (|)\ndf3 = DataAnalis.drop(\n DataAnalis[DataAnalis['TweetMsg'].str.contains('@ONU_es @free_equal @ONU_derechos')].index | \n DataAnalis[DataAnalis['TweetMsg'].str.contains('@BarcelonaSC')].index |\n DataAnalis[DataAnalis['TweetMsg'].str.contains('LigaPro')].index |\n DataAnalis[DataAnalis['TweetMsg'].str.contains('Alineación confirmada')].index |\n DataAnalis[DataAnalis['TweetMsg'].str.contains('¡Nuestra armadura de hoy!')].index |\n DataAnalis[DataAnalis['TweetMsg'].str.contains('¡Hoy juega el Ídolo!')].index |\n DataAnalis[DataAnalis['TweetMsg'].str.contains('PRÓXIMO PARTIDO')].index |\n DataAnalis[DataAnalis['TweetMsg'].str.contains('LDU_Oficial')].index |\n DataAnalis[DataAnalis['TweetMsg'].str.contains('Escoge el diseño que más te guste')].index\n \n )\nprint(df3.head(10))\n\nprint('Total de elemetos Data-Frame(df)', len(df))\nprint('Total de elemetos Data-Frame(df)', len(df3))\n\n#Eliminar caracteres especiales y urls\ndf4 = df3.TweetMsg.str.replace('http\\S+',\"\").str.replace('@',\"\").str.replace('?',\"\").str.replace('!',\"\").str.replace('(',\"\").str.replace(')',\"\").str.replace('#',\"\").str.replace(':',\"\").str.replace('¡',\"\").str.replace('.',\"\").str.replace(',',\"\").str.replace('/',\"\").str.replace('-',\"\").str.replace('_',\"\").str.replace('+',\"\").str.replace('“',\"\").str.replace('\"',\"\").str.replace(\"'\",\"\").str.replace(\"|\",\"\")\nprint (df4.head(10))\nprint ('')\n\n# Variables Globales \nmsg = 1\nitem = []\ntweet = []\nNPLpolarity = []\n\n# variables Sentimiento\nNeutro=0\nPositivo=0\nMalo=0\n\n# recorrer cada elemento del data frame\nfor ind in df4.index: \n print (msg)\n try:\n print(df4[ind]) \n t=TextBlob(df4[ind])\n #ten=t.translate(to=\"en\")\n #print (ten)\n polarity= t.polarity\n print (polarity)\n #input()\n except:\n print(\"Error\")\n polarity= \"Error\"\n \n # agregar resultados a Lista de objetos\n tweet.append(df4[ind])\n NPLpolarity.append(t.polarity)\n item.append(msg)\n\n # Agrupar Polaridad del mensaje analizado\n if (t.polarity == 0):\n Neutro += 1\n\n if (t.polarity > 0 ):\n Positivo += 1\n\n if (t.polarity < 0 and t.polarity < 1):\n Malo += 1\n \n msg += 1\n \n# Nuevo Data-Frame con la Polaridad Incluida\ndfresultado = pd.DataFrame({'tweet': item, 'msg': tweet, 'NPLpolarity': NPLpolarity})\nprint(dfresultado.head(10))\n\n\n# Graficar lmplot\ng=sns.lmplot(x='tweet', y='NPLpolarity', data=dfresultado.head(150), line_kws={'color': 'red'})\nplt.savefig('data-lmplot.png', dpi=300)\nplt.show()\n\n# Graficar Pie chart\nsizes = [Neutro,Malo, Positivo]\nlabels = ['Neutro','Malo', 'Positivo']\ncols = ['c','b','r']\nexplode=(0,0.1,0)\nfig1, ax1 = plt.subplots()\nax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',\n shadow=True, startangle=90)\nax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\nplt.title('Analisis de Sentimientos')\nplt.savefig('data-Pie.png', dpi=300)\nplt.show()\n\n# Save new data Frame\ndfresultado.to_csv('CSV-Resultado.csv')\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":3472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"246222116","text":"import os\nimport numpy as np\ntry:\n import serial\nexcept:\n os.system(\"pip3 install pyserial\")\ntry:\n import serial.tools.list_ports\nexcept:\n os.system(\"pip3 install pyserial\")\n\nimport serial.tools.list_ports\nimport warnings\nimport time\nfrom deploy import predict\nfor p in serial.tools.list_ports.comports():\n print(\"Dev \",p.description)\n pass\n\ngsm_ports = [\n p.device\n for p in serial.tools.list_ports.comports()\n if 'Generic' in p.description or 'Arduino' in p.description or 'tty' in p.description\n]\n\nif not gsm_ports:\n print(\"No Arduino Device Found\")\nif len(gsm_ports) > 1:\n warnings.warn('Multiple Arduino found - using the first')\n\ntry:\n port = gsm_ports[0]\n device = serial.Serial(port, 9600, timeout=0.5) # /dev/ttyUSB0\n print(\"Communication Established with Device.\")\n time.sleep(3)\n\nexcept:\n\n print(\"Unable to Initialize \")\npath = \"Data/9/\"\ni = 0\nwhile True:\n a = device.readall()\n if len(a) > 5:\n a = a.decode()\n print(a)\n break\n time.sleep(0.1)\n\nwhile True:\n a = device.readall()\n if len(a) > 5:\n a = a.decode()\n # if (input(\"0-discard/1-save: \") == '0'):\n # continue\n data = a.split(\"\\n\\r\\n\")[1]\n parsed = []\n for i in data.split(\"\\n\"):\n i = i.split(\",\")\n try:\n int(i[0])\n i = list(map(int, i))\n parsed.append(i)\n except:\n pass\n parsed = np.array(parsed).flatten()\n print(\"Predicted Shape: \")\n predict(parsed)\n\n time.sleep(0.1)\n","sub_path":"Python_MPU6050.py","file_name":"Python_MPU6050.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"341238593","text":"from django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.views.generic import (CreateView, DetailView, ListView,\n TemplateView, UpdateView)\nimport datetime as dt\nimport json\n\nfrom . import models, forms\nfrom .nvd3 import (duration_chart_data, duration_weekday_chart_data,\n duration_monthly_chart_data, duration_yearly_chart_data)\nfrom common.views import LoginMixin, SiteMixin\n\n\nclass MainView(LoginMixin, SiteMixin, TemplateView):\n template_name = 'tracker/index.html'\n section = 'tracker'\n\n def get_context_data(self):\n context = super().get_context_data()\n events = models.Event.objects.filter(progress=3).annotate_duration()\n context['ongoing_events'] = events.order_by('-duration')\n events = models.Event.objects.filter(progress=4).annotate_duration()\n context['waiting_events'] = events.order_by('-duration')\n entries = models.Entry.objects.filter(duration=None)\n context['pending_entries'] = entries.order_by('-datetime')\n return context\n\n\nclass StatsView(LoginMixin, SiteMixin, TemplateView):\n template_name = 'tracker/stats.html'\n section = 'stats'\n\n def get_context_data(self):\n context = super().get_context_data()\n duration = models.Entry.objects.all().duration_over_time()\n duration_data = duration_weekday_chart_data(duration)\n context['weekday_data'] = json.dumps(duration_data)\n duration_data = duration_monthly_chart_data(duration)\n context['monthly_data'] = json.dumps(duration_data)\n duration_data = duration_yearly_chart_data(duration)\n context['yearly_data'] = json.dumps(duration_data)\n # This time n years ago\n context['n_years_ago'] = []\n today = dt.date.today()\n delta = dt.timedelta(3)\n for year in range(today.year - 1, 2007, -1):\n start = dt.datetime(year, today.month, today.day, 0, 0) - delta\n start = start.replace(tzinfo=timezone.utc)\n end = dt.datetime(year, today.month, today.day, 23, 59) + delta\n end = end.replace(tzinfo=timezone.utc)\n entries = models.Entry.objects.filter(datetime__range=(start, end))\n if entries:\n context['n_years_ago'].append((year, entries))\n return context\n\n\nclass TopView(SiteMixin, ListView):\n template_name = 'tracker/top.html'\n section = 'top'\n queryset = models.Event.objects.all()\\\n .annotate_duration()\\\n .order_by('-rating', '-duration')\n\n\n# Entries\n\nclass EntryMixin(LoginMixin, SiteMixin):\n model = models.Entry\n section = 'entries'\n\n\nclass EntryView(EntryMixin, DetailView):\n pass\n\n\nclass EntryCreateView(EntryMixin, CreateView):\n fields = 'datetime', 'event', 'duration', 'annotation'\n\n def get_initial(self):\n try:\n event_pk = self.request.GET.get('event')\n event = models.Event.objects.get(id=event_pk)\n initial = {'event': event}\n except models.Event.DoesNotExist:\n initial = {}\n finally:\n initial['datetime'] = timezone.now()\n return initial\n\n\nclass EntryUpdateView(EntryMixin, UpdateView):\n fields = 'datetime', 'event', 'duration', 'annotation'\n\n\nclass EntryListView(EntryMixin, ListView):\n queryset = models.Entry.objects.all()\\\n .select_related('event')\\\n .order_by('-datetime')\n paginate_by = 100\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n object_list = context['object_list']\n if object_list is not None:\n duration = object_list.duration_over_time()\n duration_data = duration_chart_data(duration)\n context['duration_data'] = json.dumps(duration_data)\n return context\n\n\nclass EntryCloseView(EntryMixin, UpdateView):\n\n def dispatch(self, request, *args, **kwargs):\n instance = self.get_object()\n delta = timezone.now() - instance.datetime\n instance.duration = int(delta.days * 24 * 60 + delta.seconds / 60)\n instance.save()\n url = reverse('tracker:events:detail', args=[instance.event.id])\n return HttpResponseRedirect(url)\n\n\n# Events\n\nclass EventMixin(LoginMixin, SiteMixin):\n model = models.Event\n section = 'events'\n\n\nclass EventView(EventMixin, DetailView):\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n entries = self.object.entries.all()\n context['entry_list'] = entries.order_by('-datetime')\n context['tag_list'] = self.object.tags.all().order_by('value')\n duration = entries.duration_over_time()\n context['duration_data'] = json.dumps(duration_chart_data(duration))\n return context\n\n\nclass EventCreateView(EventMixin, CreateView):\n form_class = forms.EventForm\n\n\nclass EventUpdateView(EventMixin, UpdateView):\n form_class = forms.EventForm\n\n\nclass EventListView(EventMixin, ListView):\n paginate_by = 100\n queryset = models.Event.objects.all()\\\n .annotate_duration()\\\n .order_by('-date')\n\n\n# Tags\n\nclass TagMixin(LoginMixin, SiteMixin):\n model = models.Tag\n section = 'tags'\n\n\nclass TagView(TagMixin, DetailView):\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n events = self.object.events.exclude(rating=None).annotate_duration()\n context['event_list'] = events.order_by('-rating', '-duration')\n entries = models.Entry.objects.filter(event__in=events)\n entries = entries.select_related('event')\n context['entry_list'] = entries.order_by('-datetime')\n duration = entries.duration_over_time()\n duration_data = duration_monthly_chart_data(duration)\n context['duration_data'] = json.dumps(duration_data)\n return context\n\n\nclass TagListView(TagMixin, ListView):\n queryset = models.Tag.objects.all()\\\n .annotate_duration()\\\n .annotate_rating()\\\n .extra(select={'upper': 'upper(value)'})\\\n .order_by('upper')\n paginate_by = 100\n","sub_path":"web/tracker/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"361630249","text":"# U03_EX11_SumofNaturalNumbers.py\n#\n# Author: Will Baschab\n# Course: Coding for OOP\n# Section: A2\n# Date: 29 Sep 2018\n# IDE: PyCharm\n#\n# Assignment Info\n# Exercise: 11\n# Source: Python Programming\n# Chapter: 03\n#\n# Program Description\n# This program determines the sum of the first n natural numbers\n# with n being inputted by the user.\n#\n#\n# Algorithm (pseudocode)\n# 1. print introduction\n# 2. get amount of terms from user input\n# 3. initialize total at 0\n# 4. begin for loop in range of terms\n# 5. total = total + (i + 1)\n# 6. print total in complete sentence\n#\n\n\ndef main():\n\n print(\"\\nThis program determines the sum of the first n natural numbers\",\n \"\\nwith n being inputted by the user.\")\n\n terms = int(input(\"\\nEnter the number of first natural numbers to sum: \"))\n\n total = 0\n\n for i in range(terms):\n total = total + (i + 1)\n\n print(\"\\nThe total of the first\", terms, \"natural numbers is\", str(total) + \".\")\n\n\nmain()\n","sub_path":"Chapter03/U03_EX11_SumofNaturalNumbers.py","file_name":"U03_EX11_SumofNaturalNumbers.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"632488924","text":"\"\"\"Search API.\n\nIt's assumed that the API is used with an object stored in a\nRelStorage with a Postgres back end.\n\"\"\"\n\nimport re\nfrom ZODB.utils import p64\n\ndef _try_to_close_cursor(cursor):\n try:\n cursor.close()\n except Exception:\n pass\n\ndef search(conn, query, *args, **kw):\n \"\"\"Search for newt objects using an SQL query.\n\n Query parameters may be provided as either positional\n arguments or keyword arguments. They are inserted into the\n query where there are placeholders of the form ``%s`` for\n positional arguments or ``%(NAME)s`` for keyword arguments.\n\n The query results must contain the columns ``zoid`` and\n ``ghost_pickle``. It's simplest and costs nothing to simply\n select all columns (using ``*``) from the ``newt`` table.\n\n A sequence of newt objects is returned.\n \"\"\"\n if kw:\n if args:\n raise TypeError(\"Only positional or keyword arguments can be used,\"\n \" not both\")\n args = kw\n get = conn.ex_get\n cursor = conn._storage.ex_cursor()\n try:\n cursor.execute(\"select zoid, ghost_pickle from (%s)_\" % query,\n args or kw)\n return [get(p64(zoid), ghost_pickle) for (zoid, ghost_pickle) in cursor]\n finally:\n _try_to_close_cursor(cursor)\n\ndef search_batch(conn, query, args, batch_start, batch_size):\n \"\"\"Query for a batch of newt objects.\n\n Query parameters are provided using the ``args``\n argument, which may be a tuple or a dictionary. They are\n inserted into the query where there are placeholders of the\n form ``%s`` for an arguments tuple or ``%(NAME)s`` for an\n arguments dict.\n\n The ``batch_size`` and ``batch_size`` arguments are used to\n specify the result batch. An ``ORDER BY`` clause should be\n used to order results.\n\n The total result count and sequence of batch result objects\n are returned.\n \"\"\"\n query = \"\"\"\n select zoid, ghost_pickle, count(*) over()\n from (%s) _\n offset %s limit %s\n \"\"\" % (query, batch_start, batch_size)\n get = conn.ex_get\n cursor = conn._storage.ex_cursor()\n try:\n cursor.execute(query, args)\n count = 0\n result = []\n for zoid, ghost_pickle, count in cursor:\n result.append(get(p64(zoid), ghost_pickle))\n return count, result\n finally:\n _try_to_close_cursor(cursor)\n\n\ntext_extraction_template = \"\"\"\\\ncreate or replace function %s(state jsonb) returns tsvector as $$\ndeclare\n text text;\n result tsvector;\nbegin\n if state is null then return null; end if;\n\"\"\", \"\"\"\\\n return result;\nend\n$$ language plpgsql immutable;\n\"\"\"\n\ndef _texts(texts, exprs, weight=None):\n if not exprs:\n return\n\n if isinstance(exprs, str):\n exprs = (exprs, )\n\n first_block = not texts\n\n first = True\n for expr in exprs:\n if identifier(expr):\n expr = \"state ->> '%s'\" % expr\n\n text = \"coalesce(%s, '')\" % expr\n if first:\n first = False\n else:\n text = \"text || \" + text\n texts.append(\" text = %s;\" % text)\n\n tsvector = 'to_tsvector(text)'\n if weight:\n tsvector = \"setweight(%s, '%s')\" % (tsvector, weight)\n\n if not first_block:\n tsvector = \"result || \" + tsvector\n\n texts.append(\" result := %s;\\n\" % tsvector)\n\n\nidentifier = re.compile(r'\\w+$').match\ndef create_text_index_sql(fname, D=None, C=None, B=None, A=None):\n \"\"\"Compute and return SQL to set up a newt text index.\n\n The resulting SQL contains a statement to create a\n `PL/pgSQL `_\n function and an index-creation function that uses it.\n\n The first argument is the name of the function to be generated. The\n second argument is a single expression or property name or a\n sequence of expressions or property names. If expressions are\n given, they will be evaluated against the newt JSON ``state``\n column. Values consisting of alphanumeric characters (including\n underscores) are threaded as names, and other values are treated\n as expressions.\n\n Additional arguments, ``C``, ``B``, and ``A`` can be used to\n supply expressions and/or names for text to be extracted with\n different weights for ranking. See:\n https://www.postgresql.org/docs/current/static/textsearch-controls.html#TEXTSEARCH-RANKING\n \"\"\"\n texts = []\n _texts(texts, D)\n _texts(texts, C, 'C')\n _texts(texts, B, 'B')\n _texts(texts, A, 'A')\n\n if not texts:\n raise TypeError(\"No text expressions were specified\")\n\n texts.insert(0, text_extraction_template[0] % fname)\n texts.append(text_extraction_template[1])\n texts.append(\"create index newt_%s_idx on newt using gin (%s(state));\\n\"\n % (fname, fname))\n return '\\n'.join(texts)\n\ndef create_text_index(conn, fname, D, C=None, B=None, A=None):\n \"\"\"Set up a newt full-text index.\n\n The ``create_text_index_sql`` method is used to compute SQL, which\n is then executed to set up the index. (This can take a long time\n on an existing database with many records.)\n\n The SQL is executed against the database associated with the given\n connection, but a separate connection is used, so it's execution\n is independent of the current transaction.\n \"\"\"\n conn, cursor = conn._storage.ex_connect()\n sql = create_text_index_sql(fname, D, C, B, A)\n try:\n cursor.execute(sql)\n conn.commit()\n finally:\n try:\n cursor.close()\n except Exception:\n pass\n try:\n conn.close()\n except Exception:\n pass\n","sub_path":"src/newt/db/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":5665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"474345234","text":"import ctypes\r\nimport string\r\nimport sys\r\n\r\ndef get_wavelength_information(pSpecCore):\r\n \"\"\"Calculate Spectrum\r\n\r\n This function returns wavelength range information for spectrum data.\r\n\r\n * \\brief Get wavelength information data.\r\n *\r\n * This function returns wavelength range information for spectrum data.\r\n *\r\n * \\code\r\n * double start_wavelength, end_wavelength, wavelength_interval;\r\n * int ret_val = csGetWavelengthInfo(&start_wavelength, &end_wavelength, &wavelength_interval);\r\n * \\endcode\r\n *\r\n * \\param start_wavelength - double pointer to start wavelength [OUT]\r\n * \\param end_wavelength - double pointer to end wavelength [OUT]\r\n * \\param wavelength_interval - double pointer to wavelength interval[OUT]\r\n *\r\n * \\return\r\n * Returns one numeric value of NSP_RETURN_VALUE_SUCCESS.\r\n * - NSP_RETURN_VALUE_SUCCESS (1)\r\n * - NSP_RETURN_VALUE_FAILURE (-1)\r\n \"\"\"\r\n\r\n Start_Wavelength= ctypes.c_double()\r\n End_Wavelength= ctypes.c_double()\r\n Interval_Wavelength= ctypes.c_double()\r\n\r\n ret = pSpecCore.csGetWavelengthInfo(ctypes.byref(Start_Wavelength),ctypes.byref(End_Wavelength),ctypes.byref(Interval_Wavelength))\r\n\r\n if ret <=0:\r\n print (\"[PythonPrismError] Getting Wavelength Information Failed!\")\r\n return (-1,-1,-1)\r\n else:\r\n print (\"[PythonPrism] (StartWL, EndWL , IntervalWL) : (\",Start_Wavelength.value, \" , \", End_Wavelength.value, \" , \", Interval_Wavelength.value, \" )\")\r\n return (Start_Wavelength, End_Wavelength, Interval_Wavelength)","sub_path":"NSP32_SDK/wrappers/python/wrapper_python3/core/get_wavelength_information.py","file_name":"get_wavelength_information.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"248840827","text":"import netaddr\nfrom io import StringIO\nfrom netaddr import *\nfrom os import listdir\nfrom os.path import isfile, join\n\n\nclass Subnets(object):\n def __init__(self, subnets):\n self.subnets = subnets\n\n @classmethod\n def build_from_directory(cls, directory_path):\n \"\"\"\n\n :param directory_path:\n :return: A Subnets object initialised with a list of all the subnets contained in the files in a directory.\n\n >>> s = Subnets.build_from_directory('./data/test')\n >>> s.subnets\n [IPNetwork('192.136.55.0/24'), IPNetwork('103.214.228.0/24'), IPNetwork('192.136.54.0/24')]\n \"\"\"\n all_subnets = []\n for file_path in Subnets.get_files_in_directory(directory_path):\n subnets = Subnets.build_subnet_list(file_path)\n all_subnets += subnets\n return cls(all_subnets)\n\n @staticmethod\n def get_files_in_directory(directory_path):\n \"\"\"\n\n :param directory_path: The path of a directory\n :return: A list of paths of files in the derectory\n\n >>> Subnets.get_files_in_directory('.')\n ['./subnet.py']\n\n \"\"\"\n file_list = []\n for filename in listdir(directory_path):\n file_path = join(directory_path, filename)\n if isfile(file_path):\n file_list.append(file_path)\n return file_list\n\n @staticmethod\n def build_subnet_list(file_path):\n with open(file_path, 'r') as file:\n return Subnets.build_subnet_list_from_file(file)\n\n @staticmethod\n def build_subnet_list_from_file(file):\n \"\"\"\n Build a list of subnets from a file.\n\n :param file: A file of text lines\n :return: A list of subnets\n\n >>> input = StringIO(\"103.214.228.0/24\\\\n192.136.54.0/24\\\\n\")\n >>> Subnets.build_subnet_list_from_file(input)\n [IPNetwork('103.214.228.0/24'), IPNetwork('192.136.54.0/24')]\n\n \"\"\"\n subnets = []\n for line in file:\n subnets.append(IPNetwork(line.strip()))\n return subnets\n\n def merge(self):\n \"\"\"\n Merge a list of subnets combining adjacent subnets.\n\n :param subnets: A list of subnets\n :return: A list of merged subnets\n\n >>> s = Subnets([IPNetwork('192.136.55.0/24'), IPNetwork('103.214.228.0/24'), IPNetwork('192.136.54.0/24')])\n >>> s.merge()\n [IPNetwork('103.214.228.0/24'), IPNetwork('192.136.54.0/23')]\n \"\"\"\n return netaddr.cidr_merge(self.subnets)\n\n\nif __name__ == '__main__':\n import doctest\n\n doctest.testmod()\n","sub_path":"network/subnet.py","file_name":"subnet.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"}
+{"seq_id":"612573956","text":"from django.db import IntegrityError\nfrom django.db.models import F, Q, Max\nfrom django.conf import settings\nfrom django.core import serializers\nfrom django.core.urlresolvers import reverse\nfrom django.core.files import File\nfrom django.core.files.temp import NamedTemporaryFile\nfrom django.forms import ValidationError\nfrom django.forms.models import inlineformset_factory\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404, render\nfrom django.template import RequestContext, loader\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext as _\n\n\nfrom general.decorators import login_required_403\nfrom models import *\nfrom forms import AlbumForm, AlbumGroupForm, EditAlbumFormAjax, PickAlbumForm, OrderAlbumForm, UploadFromURLForm\nfrom utils import resize, admin_mode, get_default_img_size\n\n\nimport itertools\nimport json\nimport urllib2\nfrom urlparse import urlparse\n\nGroupFormset = inlineformset_factory(Album, AlbumGroup, form=AlbumGroupForm, extra=1, can_delete=False)\n\n@login_required_403\ndef new_album(request):\n \"\"\" Deprecated \"\"\"\n error = None\n if request.method == \"POST\": #submission of new album\n form = AlbumForm(request.POST, user=request.user)\n if form.is_valid():\n album = form.save(commit=False)\n album.user = request.user\n try:\n album.validate_unique()\n album.set_order()\n album.save()\n return HttpResponse(\"\" % (album.title, album.id))\n except ValidationError:\n error = _(\"You have used this album title before. Make sure to pick an unique title.\")\n else: #request for rendered form\n album = Album(user=request.user)\n form = AlbumForm(instance=album, user=request.user, initial={'user': request.user})\n return render(request, 'albums/ajax/new_album.html', {'form': form, 'error': error})\n\n@login_required_403\ndef uploadify(request):\n # Processing of each uploaded image\n albumform = PickAlbumForm(request.user, request.POST)\n # import pdb; pdb.set_trace()\n\n import logging\n logging.info(request.FILES)\n\n if albumform.is_valid():\n album = albumform.cleaned_data['album']\n max_order = Photo.objects.filter(album=album).aggregate(Max('order'))['order__max'] or 0\n img = request.FILES['Filedata']\n path = 'albums/%s/%s/' % (request.user.id, album.id) #/media/albums///.jpg\n # get the resizing dimensions from the preferences #TODO this might move to utils in the future\n preferences = Preferences.get_or_create(request.user)\n resize_dimensions = get_default_img_size(preferences)\n img_data = resize(img, upload_to=path, sizes_data=[resize_dimensions])\n\n # if img_data is not None:\n for data in img_data:\n photo = Photo(user=request.user, album=album, width=data[1], height=data[2])\n photo.image = data[0]\n photo.order = max_order + 1\n photo.save()\n p_id = photo.id\n return HttpResponse('%s' % p_id, mimetype=\"text/plain\") #return the photo id\n # else:\n # return HttpResponse(_('File extension not in valid extensions: \"%(extensions)s\".' % {\n # 'extensions': \",\".join(settings.VALID_IMG_EXTENSIONS)\n # })\n # )\n else:\n return HttpResponse()\n\n### uploading images from urls\n@login_required_403\ndef upload_url(request):\n albumform = PickAlbumForm(request.user, request.POST)\n urlform = UploadFromURLForm(request.POST)\n if albumform.is_valid() and urlform.is_valid():\n url = urlform.cleaned_data['url']\n album = albumform.cleaned_data['album']\n name = urlparse(url).path.split('/')[-1]\n\n tmp_img = NamedTemporaryFile(delete=True)\n tmp_img.write(urllib2.urlopen(url).read())\n tmp_img.flush()\n\n max_order = Photo.objects.filter(album=album).aggregate(Max('order'))['order__max'] or 0\n path = 'albums/%s/%s/' % (request.user.id, album.id)\n\n photo = Photo(user=request.user, album=album)\n photo.image.save(name, File(tmp_img))\n photo.image.open()\n\n # get the resizing dimensions from the preferences #TODO this might move to utils in the future\n preferences = Preferences.get_or_create(request.user)\n resize_dimensions = get_default_img_size(preferences)\n img_data = resize(photo.image, upload_to=path, sizes_data=[resize_dimensions], overwrite=True)\n\n for data in img_data:\n photo.width=data[1]\n photo.height=data[2]\n photo.order = max_order + 1\n photo.save()\n p_id = photo.id\n return HttpResponse(p_id, mimetype=\"text/plain\")\n return render(request, 'albums/uploadify_url.html', {'urlform': urlform})\n\n### search function\ndef search(request):\n inputresults = request.GET.__getitem__('term').split(' ')\n query = []\n for value in inputresults:\n q = Q(title__icontains=value) | Q(description__icontains=value) | Q(user__username__icontains=value)\n query.append(q)\n if len(query) > 0 and len(query) < 10:\n if not request.user.is_authenticated() or not admin_mode(request.user):\n query.append(Q(public=True))\n albums = Album.objects.filter(trash=False, *query).order_by('title')\n else:\n return HttpResponse()\n output = []\n for album in albums:\n label = mark_safe(u\"%s \\u2022 %s\" % (album.__unicode__(), album.user.get_profile().forum_nickname))\n output.append({\n \"id\": album.id,\n \"label\": label,\n \"value\": album.__unicode__(),\n \"url\": album.get_absolute_url()\n })\n return HttpResponse(json.dumps(output))\n\n### set album_cover\n@login_required_403\ndef set_cover(request):\n if request.method == \"POST\":\n p_id = request.POST['photo']\n try:\n p_id = int(p_id)\n if admin_mode(request.user):\n photo = get_object_or_404(Photo, pk=p_id)\n else:\n photo = get_object_or_404(Photo, pk=p_id, user=request.user)\n photo.album.cover = photo\n photo.album.save()\n return HttpResponse(1)\n except ValueError: #not an integer\n pass\n return HttpResponse()\n\n@login_required_403\ndef delete_photo(request):\n if request.method == \"POST\":\n p_id = request.POST['photo']\n try:\n p_id = int(p_id)\n if admin_mode(request.user):\n photo = get_object_or_404(Photo, pk=p_id)\n else:\n photo = get_object_or_404(Photo, pk=p_id, user=request.user)\n photo.trash = True\n photo.save()\n return HttpResponse(1)\n except ValueError: #not an integer\n pass\n return HttpResponse()\n\n@login_required_403\ndef reorder(request):\n form = OrderAlbumForm(request.user, request.POST)\n if form.is_valid():\n album = form.cleaned_data['album']\n album_before = form.cleaned_data['album_before']\n album_after = form.cleaned_data['album_after']\n\n if album.writable_to in [\"g\", \"o\"]:\n q = Q(writable_to=\"g\") | Q(writable_to=\"o\")\n else:\n q = Q(writable_to=\"u\")\n if album_after and album.order > album_after.order: # moved forward\n lower = album_after.order\n upper = album.order\n album.order = lower\n\n albums_to_reorder = Album.objects.filter(q, order__gte=lower, order__lt=upper)\n albums_to_reorder.update(order=(F('order') + 1))\n album.save()\n\n elif album_before and album_before.order > album.order: # moved backwards\n lower = album.order\n upper = album_before.order\n album.order = upper\n\n albums_to_reorder = Album.objects.filter(q, order__gt=lower, order__lte=upper)\n albums_to_reorder.update(order=(F('order') - 1))\n album.save()\n elif ((album_before and album_before.order == album.order) \\\n or (album_after and album_after.order == album.order)):\n order = album.order\n if album_before:\n album.order = (F('order') + 1)\n q1 = Q(order__exact=order, title__gt=album.title)\n q2 = Q(order__gt=order)\n albums_to_reorder = Album.objects.filter(q, Q(q1 | q2)).exclude(pk=album_before.id)\n albums_to_reorder.update(order=(F('order') + 2))\n album.save()\n elif album_after:\n q1 = Q(order__exact=order, title__gt=album.title)\n q2 = Q(order__gt=order)\n album_after.order = (F('order') + 1)\n albums_to_reorder = Album.objects.filter(q, Q(q1 | q2)).exclude(pk=album_after.id)\n albums_to_reorder.update(order=(F('order') + 2))\n album_after.save()\n return HttpResponse()\n\n@login_required_403\ndef get_all_own_albums(request):\n own_albums = Album.objects.filter(user=request.user, writable_to='u', trash=False)\n return render(request, 'albums/albums_list/albums_li.html', {'albums': own_albums})\n\n@login_required_403\ndef edit_album(request):\n admin = admin_mode(request.user)\n editform, formset, photos = None, None, None\n if request.method == \"POST\":\n form = PickAlbumForm(request.user, request.POST, admin_mode=admin)\n if form.is_valid():\n album = form.cleaned_data[\"album\"]\n formset = GroupFormset(request.POST, instance=album)\n editform = EditAlbumFormAjax(request.POST, instance=album, user=request.user)\n photos = editform.fields[\"cover\"].queryset.select_related('album', 'album__cover')\n if editform.is_valid() and (album.user == request.user or admin):\n editform.save()\n \"\"\"\n try:\n album.validate_unique()\n album.save()\n except ValidationError:\n error = _(\"You have used this album title before. Make sure to pick an unique title.\")\n print editform._errors\n \"\"\"\n if formset.is_valid() and album.writable_to == 'g':\n formset.save()\n album = get_object_or_404(Album, pk=album.id);\n # TODO: convert to JSON - see new_album_jquery-ui\n return render(request, 'albums/ajax/album_li.html', {'album': album, 'custom_id': 'temp'})\n else:\n form = PickAlbumForm(request.user, request.GET, admin_mode=admin)\n if form.is_valid():\n album = form.cleaned_data[\"album\"]\n if request.user.id == album.user_id or admin:\n editform = EditAlbumFormAjax(instance=album, user=request.user)\n #formset = GroupFormset(instance=album)\n photos = editform.fields[\"cover\"].queryset.select_related('user', 'album', 'album__cover')\n else:\n return HttpResponse(_('This event has been logged'))\n else:\n return HttpResponse(form.as_p())\n return render(request, 'albums/ajax/edit_album.html', {'form': editform, 'formset': formset, 'photos': photos})\n\n@login_required_403\ndef edit_albumgroup(request):\n GroupFormset = inlineformset_factory(\n Album, AlbumGroup,\n form=AlbumGroupForm, extra=1,\n can_delete=False\n )\n admin = admin_mode(request.user)\n form = PickAlbumForm(request.user, request.GET, admin_mode=admin)\n if form.is_valid():\n album = form.cleaned_data[\"album\"]\n formset = GroupFormset(instance=album)\n return render(request, 'albums/ajax/group_rights.html', {'formset': formset})\n return HttpResponse(0)\n\n@login_required_403\ndef remove_album(request):\n status = 'fail'\n form = PickAlbumForm(request.user, request.POST)\n if form.is_valid():\n album = form.cleaned_data[\"album\"]\n album.trash = True\n album.title = \"trash_%s_%s\" % (datetime.now().strftime('%d%m%Y_%H.%M.%s'), album.title)\n album.save()\n status = 'ok'\n return HttpResponse(status)\n\n@login_required_403\ndef new_album_jquery_ui(request):\n if request.method == \"POST\":\n try:\n from_page = request.POST['from-page']\n except KeyError:\n from_page = None\n\n new_album = Album(user=request.user)\n form = AlbumForm(request.POST, user=request.user, instance=new_album)\n t = loader.get_template(u'albums/ajax/new_album_jquery-ui.html')\n\n context = {'from_page': from_page}\n if form.is_valid():\n album = form.save()\n context['form'] = AlbumForm(\n instance = Album(user=request.user),\n user=request.user\n )\n rendered_form = t.render(RequestContext(request, context))\n\n output = {\n 'status': 1,\n 'form': rendered_form,\n }\n\n if from_page == u\"upload\": # return option to append to select\n option = '' % (\n album.id,\n album.__unicode__()\n )\n output['option'] = mark_safe(option)\n elif from_page == u\"my-albums-list\": # return li element to place in list\n t2 = loader.get_template(u'albums/album_li.html')\n album_li = t2.render(RequestContext(request, {'album': album}))\n output['album_li'] = album_li\n output['album_write_mode'] = album.writable_to\n else:\n context['form'] = form\n rendered_form = t.render(RequestContext(request, context))\n output = {'form': rendered_form, 'status': 0}\n return HttpResponse(json.dumps(output), mimetype=\"application/json\")\n\n@login_required_403\ndef get_title(request):\n title = ''\n form = PickAlbumForm(request.user, request.GET)\n if form.is_valid():\n album = form.cleaned_data[\"album\"]\n title = album.title\n return HttpResponse(title)\n\n@login_required_403\ndef get_covers(request):\n admin = admin_mode(request.user)\n form = PickAlbumForm(request.user, request.GET, admin_mode=admin)\n if form.is_valid():\n album = form.cleaned_data[\"album\"]\n photos = Photo.objects.select_related('user', 'album').filter(album=album, trash=False).order_by('order')\n return render(request, 'albums/ajax/album_covers.html', {'album': album, 'photos':photos})\n return HttpResponse(0)\n\n@login_required_403\ndef restore_album(request):\n form = PickAlbumForm(request.user, request.POST, trash=True)\n if form.is_valid():\n album = form.cleaned_data[\"album\"]\n album.trash = False\n album.title = album.clean_title\n\n i = itertools.count(2)\n saved = False\n while(not saved):\n try:\n album.save()\n saved = True\n return HttpResponse('ok')\n except IntegrityError:\n album.title = \"%s_%s\" % (album.clean_title, i.next())\n\n return HttpResponse('
'+form.as_table()+'
')\n\n\n### CLASS BASED VIEWS ###\nfrom django.views.generic import View\nfrom django.views.generic.detail import SingleObjectMixin\n\nclass RotateView(SingleObjectMixin, View):\n \"\"\" View taking a photo, rotating it, and returning the success status when done \"\"\"\n #TODO: unittest!\n model = Photo\n\n def get_queryset(self):\n qs = super(RotateView, self).get_queryset()\n if not admin_mode(self.request.user):\n qs = qs.filter(user=self.request.user)\n return qs\n\n def post(self, request, *args, **kwargs):\n photo = self.get_object()\n direction = self.request.POST['direction']\n if direction == 'cw':\n photo.rotate_right()\n elif direction == 'ccw':\n photo.rotate_left()\n else:\n response = {'result': 'Invalid direction'}\n\n response = {'result': 'success', 'ok': True}\n return HttpResponse(json.dumps(response), mimetype='application/json')\n","sub_path":"albums/ajax_views.py","file_name":"ajax_views.py","file_ext":"py","file_size_in_byte":16355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"}
+{"seq_id":"124169367","text":"__author__ = 'altvod'\n\nfrom ..format import FormatHandler\n\n\nclass HTMLFormatHandler(FormatHandler):\n def __init__(self, html_indent=None, **kwargs):\n super(HTMLFormatHandler, self).__init__(**kwargs)\n self.indent = html_indent\n self.tab = 0\n\n def accept_item(self, t, item):\n return t == 'html_tag'\n\n def _format_block(self, block, formatter):\n text = ''\n for child in block:\n if isinstance(child, str):\n if self.indent is not None:\n child = ' '*self.tab + child + '\\n'\n text += child\n\n elif isinstance(child, list):\n text += self._format_block(child, formatter)\n\n else:\n text += formatter.format_item(child)\n\n return text\n\n def format_item(self, formatter, t, item):\n text = ''\n if item[\"tag\"] == 'html':\n # Add the DOCTYPE delcaration before the tag\n text += ''.format(\n doctype=item.get('doctype', 'html')\n )\n if self.indent is not None:\n text += '\\n'\n\n if self.indent is not None:\n text += ' '*self.tab\n\n text += '<{0}'.format(item[\"tag\"])\n for attr_name, attr_value in item.get('@', {}).items():\n text += ' {0}=\"{1}\"'.format(attr_name, attr_value)\n\n if 'content' in item and item[\"content\"]:\n text += '>'\n if self.indent is not None:\n text += '\\n'\n self.tab += self.indent\n\n text += self._format_block(item[\"content\"], formatter)\n\n if self.indent is not None:\n self.tab -= self.indent\n text += ' '*self.tab\n\n text += '{0}>'.format(item[\"tag\"])\n\n else:\n text += \" />\"\n\n if self.indent is not None:\n text += '\\n'\n\n return text\n","sub_path":"docscheme/html/format.py","file_name":"format.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"}
+{"seq_id":"637068899","text":"class BinTreeNode(object):\r\n def __init__(self, value):\r\n self.value=value\r\n self.left=None\r\n self.right=None\r\n\r\ndef tree_insert(tree, item):\r\n if tree==None:\r\n tree=BinTreeNode(item)\r\n else:\r\n if(item < tree.value):\r\n if(tree.left==None):\r\n tree.left=BinTreeNode(item)\r\n else:\r\n tree_insert(tree.left,item)\r\n else:\r\n if(tree.right==None):\r\n tree.right=BinTreeNode(item)\r\n else:\r\n tree_insert(tree.right,item)\r\n return tree\r\n\r\ndef post_order(tree):\r\n if(tree.left!=None):\r\n post_order(tree.left)\r\n if(tree.right!=None):\r\n post_order(tree.right)\r\n print (tree.value)\r\n\r\ndef in_order(tree):\r\n ''' \r\n An iterative function to sort nodes in a tree.\r\n'''\r\n currentNode = tree #Set currentNode to root of binary tree\r\n emptyStack = [] #Initializing stack\r\n completed = 0\r\n \r\n while (completed !=1):\r\n #Keeps looking through left nodes first.\r\n if currentNode != None:\r\n emptyStack.append(currentNode) #Appends empty stack with current node.\r\n currentNode = currentNode.left #Keeps looking through left-hand nodes\r\n #Goes back from empty subtree to visit node at the top of stack.\r\n #If node is empty, complete.\r\n else:\r\n if (len(emptyStack)>0):\r\n currentNode = emptyStack.pop()\r\n print (currentNode.value)\r\n currentNode = currentNode.right\r\n else:\r\n completed = 1\r\n\r\nif __name__ == '__main__':\r\n t=tree_insert(None,6)\r\n tree_insert(t,10)\r\n tree_insert(t,-12)\r\n tree_insert(t,5)\r\n tree_insert(t,2)\r\n tree_insert(t,3)\r\n tree_insert(t,4)\r\n tree_insert(t,11)\r\n tree_insert(t,110)\r\n in_order(t)\r\n","sub_path":"Task 12 - Week 6.py","file_name":"Task 12 - Week 6.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"}
+{"seq_id":"80328573","text":"\"\"\" This function creates a thumbnail from an image and registers the thumbnail with the DB\n\nFunction will take the following steps:\n Create a thumbnail directory\n Utilize FFMPEG to create a thumbnail\n With a video, we want to take ~25% through a video just so it's not the beginning\n NOTE: Can we take full storyboard at some point?\n Update the database entry with a Thumbnail section\n Return success or failure\n\nauthor: Michael Schuler [mischuler@deloitte.com]\n\n\"\"\"\n\nimport boto3\nfrom botocore.client import Config\nimport botocore\nimport sys\nimport os\nimport subprocess\nimport string\nimport simplejson as json\nimport math\nimport fnmatch\n\nimport logging\nimport logging.config\n\nsys.path.insert(0, '/Assets/sharedLibraries')\nimport parseHelper\nimport databaseHelper\n\n\n\ndef main(args):\n\n DOMAIN = 'ITD'\n TASKLIST = 'default'\n VERSION = '1'\n\n taskName = os.path.basename(__file__)[:-3]\n\n logging.config.fileConfig('/Assets/sharedLibraries/logging_config.ini')\n logging.debug(\"Creating SWF boto client\")\n botoConfig = Config(connect_timeout=50, read_timeout=70) # suggestion is the read is higher than connect\n swf = boto3.client('swf', config=botoConfig)\n logging.debug(\"Created SWF boto client: %s\", swf)\n \n while True:\n\n task = swf.poll_for_activity_task(\n domain=DOMAIN,\n taskList={'name': taskName},\n identity='%s-01' %(taskName)\n )\n\n if 'taskToken' not in task:\n logging.info(\"%s - Poll timed out, no new task. Repoll\", taskName)\n \n # Run the operation\n else:\n taskToken = task['taskToken']\n workID = task['workflowExecution']['workflowId']\n logging.info(\"[%s] New request for %s\", workID, taskName)\n\n INPUT = json.loads(task['input'])\n asset = INPUT['asset']\n dbPrimaryKey = INPUT['dbPrimaryKey']\n # Take the thumbnail 25% through the video\n \n #scale = \"640x360\"\n # Use the multipliers so that we don't distort vertical videos. This makes it generic. \n scale = \"iw/3:ih/3\" # 1/3 gives 1920 (HD) down to 640\n fps = 1 # Set the number of frames to be once per second\n newDir = \"thumbnails\"\n (filePath, fileName, fileExt) = parseHelper.splitFilename(asset)\n subDir = parseHelper.createDir(filePath, newDir)\n \n # We require the %d to keep the file names incremented\n # Note that we need to escape the percentage sign by using another %, hence the double %\n outfile = '%s_thumbnail_%%d.jpg' % (fileName)\n vtt = '%s.vtt' % (fileName)\n \n # Parameters are\n # -y for\n # -i for Input\n # -vf, fps=1,scale= for the video filter stating we want to take every one second\n cmd = ['ffmpeg'\n ,'-y'\n ,'-i', asset\n ,'-vf', 'fps=%s,scale=%s' %(fps, scale)\n ,'-loglevel', 'fatal'\n ,'%s/%s' %(subDir, outfile)\n ]\n\n logging.debug(\"[%s] Execute video thumbnail creation: %s\", workID, cmd)\n try:\n output = subprocess.check_output(cmd)\n \n # Start setting the parameters needed to update the thumbnail\n \n # Comment block is staying for reference sake\n '''# Call the update function\n # The \"thumbnails\" map will need to be created if it doesn't exist (Note: It shouldn't at this point)\n # A validation exception will be thrown, and when this is thrown, we will create an empty map and try it again\n try:\n response = databaseHelper.updateEntry(key, updateExpression, expressionValues) \n \n except botocore.exceptions.ClientError as err:\n if err.response['Error']['Code'] == 'ValidationException':\n \n \n response = databaseHelper.updateEntry(key, 'set thumbnails = :t', {':t' : {}})\n response = databaseHelper.updateEntry(key, updateExpression, expressionValues)\n '''\n \n # After the thumbnails are created, we need to do two things:\n # OLD # 1. Create the storyboard object which is [http://docs.brightcove.com/en/perform/brightcove-player/guides/thumbnails-plugin.html#collectimages]\n # 1. Create the storyboard VTT file (https://support.jwplayer.com/customer/portal/articles/1407439-adding-preview-thumbnails)\n # 2. We also need to identify the thumbnail for the video which we will take a percentage of the way through the video\n\n \n #STORYBOARD = {}\n thumbnailTime = .25 # Pick the thumbnail that's 25% of the way through the video\n counter = 0\n \n for thumb in os.listdir(subDir):\n if fnmatch.fnmatch(thumb, '*_thumbnail_*.jpg'): # Match files in the directory that are the thumbnails\n #sequenceNum = thumb[thumb.rfind('_')+1:-4] # filename_thumbnail_$frame.jpg\n #STORYBOARD[sequenceNum] = {'src' : '/%s/%s' %(newDir, thumb) }\n counter = counter + 1\n\n # Open the VTT file and write\n logging.debug(\"[%s] Writing VTT file: %s\", workID, vtt)\n vttFile = open('%s/%s' %(subDir, vtt), 'w')\n vttFile.write(\"WEBVTT\")\n # The counter represents how many files of FPS we have -- range is COUNTER*FPS --> (COUNTER+1)* fps\n # FPS references the frames per second so if we put (1/60), that means a frame EVERY MINUTE\n # Therefore, we need to invest the FPS\n # Use %02d to PAD the numbers \n \n baseURL = \"https://dnt4vq51jg2tj.cloudfront.net\" # There needs to be a better way then the full URL\n for i in range(0,counter):\n startSecond = i * (1/fps)\n endSecond = (i + 1) * (1/fps)\n startSpan = '%02d:%02d:%02d.000' % ( startSecond / 3600, startSecond / 60 % 60, startSecond % 60) \n endSpan = '%02d:%02d:%02d.000' % ( endSecond / 3600, endSecond / 60 % 60, endSecond % 60)\n \n \n thumbSpan = '%s/%s/%s/%s_thumbnail_%d.jpg' % (baseURL, fileName, newDir, fileName,i + 1)\n \n vttFile.write(\"\\n\\n%s --> %s\\n%s\" % (startSpan, endSpan, thumbSpan))\n \n vttFile.close()\n logging.debug(\"[%s] Wrote VTT file: %s\", workID, vtt)\n \n index = str(math.trunc(counter * thumbnailTime))\n logging.debug(\"[%s] Key frame identified in index: %s\", workID, index)\n \n updateExpression = 'set thumbnail = :t, storyboard = :s'\n thumbnail = '/%s/%s_thumbnail_%s.jpg' % (newDir, fileName,index)\n \n # THERE MUST BE A DYNAMIC WAY TO DO THIS BUT I DONT KNOW YET\n storyboard = '/%s/%s' %(newDir, vtt)\n \n '''expressionValues = {\n ':t' : STORYBOARD[index]['src'],\n ':s' : STORYBOARD\n }'''\n \n expressionValues = {\n ':t' : thumbnail,\n ':s' : storyboard,\n }\n \n logging.debug(\"[%s] Update thumbnail value\", workID)\n response = databaseHelper.updateEntry(dbPrimaryKey, updateExpression, expressionValues)\n\n OUTPUT = {\n 'tool' : output,\n 'dbPrimaryKey' : dbPrimaryKey,\n 'assetClass' : INPUT['assetClass'], \n 'asset' : asset,\n }\n \n swf.respond_activity_task_completed(\n taskToken = taskToken,\n result = json.dumps(OUTPUT)\n )\n # We should catch other errors here\n except subprocess.CalledProcessError as err:\n \n result = { \n 'reason' : 'THB-0002_Error in video thumbnail creation',\n 'detail' : str(err)\n }\n \n logging.error(\"%s\", result)\n \n swf.respond_activity_task_failed(\n taskToken=taskToken,\n reason=json.dumps(result['reason']),\n details=json.dumps(result['detail'])\n )\n \n logging.info(\"[%s] %s Complete\", workID, taskName)\n\nif __name__ == '__main__':\n \n main(sys.argv)\n","sub_path":"EC2-Backup/Deprecated Code/workflowcode/createThumbnailFromVideo.py","file_name":"createThumbnailFromVideo.py","file_ext":"py","file_size_in_byte":8976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"}
+{"seq_id":"2661653","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('request', views.send_request),\n path('approve-reject', views.approve_reject),\n path('list', views.chat_list),\n path('group/create', views.create_group),\n path('group/edit', views.edit_group),\n path('group/member/add-remove', views.add_remove_member),\n path('group/detail', views.group_details),\n path('group/update/message', views.update_last_message),\n path('group/leave', views.leave_group)\n]\n","sub_path":"chat/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"315091159","text":"# encoding: utf-8\n#\n# Copyright (c) 2015 Safari Books Online. All rights reserved.\n#\n# This software may be modified and distributed under the terms\n# of the 3-clause BSD license. See the LICENSE file for details.\n\nfrom __future__ import unicode_literals, with_statement\n\nimport csv\nfrom datetime import datetime, timedelta\ntry:\n from cStringIO import StringIO\nexcept ImportError:\n try:\n from StringIO import StringIO\n except ImportError:\n from io import StringIO\nfrom itertools import chain\nimport logging\nimport os\nimport time\nfrom xml.etree import ElementTree\n\nimport requests\nfrom simple_salesforce import Salesforce\n\nlogger = logging.getLogger('salesforce-bulk-api')\n\nNAMESPACE = 'http://www.force.com/2009/06/asyncapi/dataload'\n\n\ndef salesforce_session():\n \"\"\"Returns an authenticated simple_salesforce.Salesforce instance.\"\"\"\n return Salesforce(username=os.environ['SALESFORCE_USERNAME'],\n password=os.environ['SALESFORCE_PASSWORD'],\n security_token=os.environ['SALESFORCE_SECURITY_TOKEN'],\n instance=os.environ['SALESFORCE_INSTANCE'],\n sandbox=os.environ.get('SALESFORCE_SANDBOX') == 'True',\n version='34.0')\n\n\nclass SalesforceBulkJob:\n \"\"\"A Python interface to the Salesforce Bulk API.\"\"\"\n\n PUBLISHING_BATCH_SIZE = 9999\n SUPPORTED_OPERATIONS = {'insert', 'update', 'delete', 'upsert'}\n\n def __init__(self, operation, object_name, external_id_field=None, salesforce=None):\n \"\"\"Creates a new API interface to Salesforce's bulk API, from which any\n number of jobs may be created. The operation should be one of ('insert',\n 'update', 'upsert', or 'delete'), and the object_name should be the\n proper-case name of a Salesforce object (like Lead or Contact).\"\"\"\n if not salesforce:\n salesforce = salesforce_session()\n self.session_id = salesforce.session_id\n self.async_url = (salesforce.base_url\n .replace('/data/', '/async/')\n .replace('v' + salesforce.sf_version,\n salesforce.sf_version))\n\n assert operation in self.SUPPORTED_OPERATIONS, '{} is not a valid bulk operation.'.format(operation)\n self.operation = operation\n\n supported_objects = {o['name'] for o in salesforce.describe()['sobjects']}\n assert object_name in supported_objects, '{} is not a known Salesforce object.'.format(object_name)\n self.object_name = object_name\n self.external_id_field = external_id_field\n\n self.reset()\n\n def upload(self, fields, data):\n \"\"\"Given a list of fields and a (potentially very long) iterable of\n tuples matching those fields, perform a complete upload to Salesforce\"\"\"\n self.create()\n for chunk in chunked(data, self.PUBLISHING_BATCH_SIZE):\n if chunk:\n self.add_batch(fields, chunk)\n if not self.pending_batches:\n logger.info('No batches added to job.')\n self.abort()\n return\n self.close()\n self.wait()\n\n def create(self):\n \"\"\"Creates a new Salesforce bulk Job and prepares for adding batches.\"\"\"\n assert not self.job, 'The current job is still open.'\n\n logger.info('Creating new job to %s %s', self.operation, self.object_name)\n job_request = '''\n \n {operation}\n \n '''\n\n if self.operation == 'upsert':\n job_request += '{external_id_field}'\n\n job_request += '''\n CSV\n \n '''\n\n job_request = job_request.format(\n NAMESPACE=NAMESPACE,\n object_name=self.object_name,\n operation=self.operation,\n external_id_field=self.external_id_field\n )\n response = self.request('post', self.async_url + 'job',\n data=job_request)\n\n self.job = bulk_response_attribute(response, 'id')\n self.job_url = self.async_url + 'job/' + self.job\n self.pending_batches = []\n self.is_open = True\n\n def add_batch(self, fields, data):\n \"\"\"Given a list of fields and an iterable of tuples matching those\n fields, adds a batch of data to the current job. The data must be\n shorter than PUBLISHING_BATCH_SIZE rows\"\"\"\n assert self.job, 'There is no current job.'\n assert self.is_open, 'The current job is not open.'\n\n logger.info('Adding batch to job %s', self.job_url)\n response = self.request('post', self.job_url + '/batch',\n data=itercsv(fields, data),\n content_type='text/csv; charset=UTF-8')\n batch = bulk_response_attribute(response, 'id')\n self.pending_batches.append(batch)\n\n def close(self):\n \"\"\"Closes the current job, which signals to Salesforce that no further\n batches will be added to it.\"\"\"\n logger.info('Closing job %s', self.job_url)\n self.set_job_state('Closed')\n self.is_open = False\n\n def abort(self):\n \"\"\"Aborts the current job, and resets the instance\"\"\"\n logger.info('Aborting job %s', self.job_url)\n self.set_job_state('Aborted')\n self.reset()\n\n def set_job_state(self, state):\n \"\"\"Sets the current job to the given state (\"Closed\" or \"Aborted\")\"\"\"\n assert self.job, 'There is no current job.'\n assert self.is_open, 'The current job is not open.'\n\n state_request = '''\n \n {state}\n \n '''.format(NAMESPACE=NAMESPACE, state=state)\n self.request('post', self.job_url, data=state_request, expected_response=200)\n\n def wait(self):\n \"\"\"Waits for all batches of the current job to finish\"\"\"\n assert self.job, 'There is no current job.'\n assert not self.is_open, 'The current job must be closed before waiting.'\n\n self.finished_batches = []\n total = len(self.pending_batches)\n while self.pending_batches:\n finished = []\n for i, batch in enumerate(self.pending_batches):\n batch_url = self.job_url + '/batch/' + batch\n response = self.request('get', batch_url, expected_response=200)\n state = bulk_response_attribute(response, 'state')\n if state not in {'Queued', 'InProgress'}:\n finished.append(i)\n log_method = (logger.warn\n if state in {'Failed', 'Not Processed'}\n else logger.info)\n log_method('Batch %s (%s/%s) finished with state %s',\n batch_url, total - len(self.pending_batches) + len(finished), total, state)\n\n for i in sorted(finished, reverse=True):\n self.finished_batches.append(self.pending_batches.pop(i))\n\n if self.pending_batches:\n logger.info('Waiting for %s more batches to complete...', len(self.pending_batches))\n time.sleep(10)\n\n def results(self):\n assert self.job, 'There is no current job.'\n assert not self.is_open, 'The current job must be closed before getting results.'\n assert self.finished_batches is not None, 'SalesforceBulkJob.wait() should be called before getting results.'\n\n for batch in self.finished_batches:\n result_url = self.job_url + '/batch/' + batch + '/result'\n response = self.request('get', result_url, expected_response=200)\n reader = csv.reader(StringIO(response.decode('utf-8')))\n next(reader) # consume the header row\n for id, success, created, error in reader:\n yield id, success == 'true', created == 'true', error\n\n def reset(self):\n \"\"\"Resets the state of this job to that of a new instance. This *does\n not* change anything that has happened so far at Salesforce. See\n `.abort()` to cancel the currently open job.\"\"\"\n self.is_open = False\n self.job = self.job_url = self.pending_batches = self.finished_batches = None\n\n def request(self, method, url,\n data=None,\n content_type='application/xml; charset=UTF-8',\n expected_response=201):\n \"\"\"Performs an HTTP request against Salesforce's bulk API, and validates\n the expected response. Returns the content of the response\"\"\"\n\n headers = {'X-SFDC-Session': self.session_id}\n kwargs = {'headers': headers}\n if data is not None:\n headers['Content-Type'] = content_type\n kwargs['data'] = data\n\n RETRIES, LAST, WAIT = 3, 2, timedelta(seconds=5)\n for retry in range(RETRIES):\n try:\n response = getattr(requests, method)(url, **kwargs)\n except requests.exceptions.ConnectionError:\n if retry == LAST:\n raise\n logger.info('ConnectionError from %r %r. Retrying in %r...',\n method, url, WAIT, exc_info=True)\n else:\n if retry < LAST and response.status_code in (502, 503):\n logger.info('%r response from %r %r. Retrying in %r...',\n response.status_code, method, url, WAIT)\n else:\n break\n time.sleep(WAIT.total_seconds())\n\n if response.status_code != expected_response:\n raise Exception(('Unexpected status {} from '\n 'Salesforce async API. Details: {}'\n ).format(response.status_code, response.content))\n return response.content\n\n\ndef bulk_response_attribute(response, attribute):\n \"\"\"Given a Salesforce bulk API response bytes, and the name of an attribute,\n find it in the given document, or raise if it isn't present\"\"\"\n tree = ElementTree.fromstring(response)\n value = tree.findtext('{{{}}}{}'.format(NAMESPACE, attribute))\n if not value:\n raise Exception(('<{}> not found in Salesforce '\n 'async API response. Response: {}'\n ).format(attribute, response))\n return value\n\n\ndef chunked(iterable, size):\n \"\"\"Yields chunks of the requested size from the iterable. The final chunk\n may be smaller than size\"\"\"\n if not size:\n for item in iterable:\n yield item\n return\n\n chunk = []\n for i, item in enumerate(iterable):\n chunk.append(item)\n if len(chunk) == size:\n yield chunk\n chunk = []\n if chunk:\n yield chunk\n\ndef itercsv(headers, data):\n \"\"\"Given a list of headers name and a (potentially large) iterable of\n tuples, yield the lines of a CSV file representing that data\"\"\"\n buffer = StringIO()\n writer = csv.writer(buffer)\n\n for row in chain([headers], data):\n writer.writerow(row)\n buffer.seek(0)\n yield buffer.read().encode('utf-8')\n buffer.truncate(0)\n buffer.seek(0)\n","sub_path":"Lib/site-packages/salesforce_bulk_api.py","file_name":"salesforce_bulk_api.py","file_ext":"py","file_size_in_byte":11492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"111911272","text":"import numpy as np\nimport pandas as pd\nimport altair as alt\nimport streamlit as st\nfrom sklearn import metrics\n\nfrom app_utils import load_model, load_data, predict\nfrom preprocess.constants import FEATURES, TARGET, CONFIG_FAI\nfrom .static_fai import (\n get_aif_metric,\n compute_fairness_measures,\n get_confusion_matrix_chart,\n plot_fmeasures_bar,\n color_red,\n)\nfrom .toolkit import get_perf_measure_by_group\n\nMETRICS_TO_USE = [\"Equal opportunity\", \"Predictive parity\", \"Statistical parity\"]\n\n\ndef print_model_perf(y_val, y_pred):\n text = \"\"\n text += \"Model accuracy = {:.4f}\\n\".format(metrics.accuracy_score(y_val, y_pred))\n text += \"Weighted Average Precision = {:.4f}\\n\".format(\n metrics.precision_score(y_val, y_pred, average=\"weighted\"))\n text += \"Weighted Average Recall = {:.4f}\\n\\n\".format(\n metrics.recall_score(y_val, y_pred, average=\"weighted\"))\n text += metrics.classification_report(y_val, y_pred, digits=4)\n return text\n\n\n@st.cache\ndef prepare_pred(x_valid, y_valid, debias=False):\n # Load model\n clf = load_model(\"output/lgb_model.pkl\")\n\n # Predict on val data\n y_prob = predict(clf, x_valid)\n\n # st.header(\"Prediction Distributions\")\n cutoff = 0.5\n y_pred = (y_prob > cutoff).astype(int)\n\n if debias:\n raise NotImplementedError\n\n # Model performance\n text_model_perf = print_model_perf(y_valid, y_pred)\n\n return y_pred, text_model_perf\n\n\ndef fai(debias=False):\n protected_attribute = st.selectbox(\"Select protected column.\", list(CONFIG_FAI.keys()))\n\n # Load data\n valid = load_data(\"output/test.gz.parquet\").fillna(0)\n x_valid = valid[FEATURES]\n y_valid = valid[TARGET].values\n valid_fai = valid[list(CONFIG_FAI.keys())]\n\n # Get predictions\n y_pred, text_model_perf = prepare_pred(x_valid, y_valid, debias=debias)\n\n st.header(\"Model Performance\")\n st.text(text_model_perf)\n\n st.header(\"Algorithmic Fairness Metrics\")\n fthresh = st.slider(\"Set fairness deviation threshold\", 0., 1., 0.2, 0.05)\n st.write(\"Absolute fairness is 1. The model is considered fair \"\n f\"if **ratio is between {1-fthresh:.2f} and {1+fthresh:.2f}**.\")\n\n # Compute fairness measures\n privi_info = CONFIG_FAI[protected_attribute]\n aif_metric = get_aif_metric(\n valid_fai,\n y_valid,\n y_pred,\n protected_attribute,\n privi_info[\"privileged_attribute_values\"],\n privi_info[\"unprivileged_attribute_values\"],\n )\n fmeasures = compute_fairness_measures(aif_metric)\n fmeasures = fmeasures[fmeasures[\"Metric\"].isin(METRICS_TO_USE)]\n fmeasures[\"Fair?\"] = fmeasures[\"Ratio\"].apply(\n lambda x: \"Yes\" if np.abs(x - 1) < fthresh else \"No\")\n\n st.altair_chart(plot_fmeasures_bar(fmeasures, fthresh), use_container_width=True)\n \n st.dataframe(\n fmeasures[[\"Metric\", \"Unprivileged\", \"Privileged\", \"Ratio\", \"Fair?\"]]\n .style.applymap(color_red, subset=[\"Fair?\"])\n .format({\"Unprivileged\": \"{:.3f}\", \"Privileged\": \"{:.3f}\", \"Ratio\": \"{:.3f}\"})\n )\n\n st.subheader(\"Confusion Matrices\")\n cm1 = aif_metric.binary_confusion_matrix(privileged=None)\n c1 = get_confusion_matrix_chart(cm1, \"All\")\n st.altair_chart(alt.concat(c1, columns=2), use_container_width=False)\n cm2 = aif_metric.binary_confusion_matrix(privileged=True)\n c2 = get_confusion_matrix_chart(cm2, \"Privileged\")\n cm3 = aif_metric.binary_confusion_matrix(privileged=False)\n c3 = get_confusion_matrix_chart(cm3, \"Unprivileged\")\n st.altair_chart(c2 | c3, use_container_width=False)\n\n st.header(\"Annex\")\n st.subheader(\"Performance Metrics\")\n all_perfs = []\n for metric_name in [\n 'TPR', 'TNR', 'FPR', 'FNR', 'PPV', 'NPV', 'FDR', 'FOR', 'ACC',\n 'selection_rate', 'precision', 'recall', 'sensitivity',\n 'specificity', 'power', 'error_rate']:\n df = get_perf_measure_by_group(aif_metric, metric_name)\n c = alt.Chart(df).mark_bar().encode(\n x=f\"{metric_name}:Q\",\n y=\"Group:O\",\n tooltip=[\"Group\", metric_name],\n )\n all_perfs.append(c)\n \n all_charts = alt.concat(*all_perfs, columns=1)\n st.altair_chart(all_charts, use_container_width=False)\n\n st.subheader(\"Notes\")\n st.write(\"**Equal opportunity**:\")\n st.latex(r\"\\frac{\\text{FNR}(D=\\text{unprivileged})}{\\text{FNR}(D=\\text{privileged})}\")\n st.write(\"**Predictive parity**:\")\n st.latex(r\"\\frac{\\text{PPV}(D=\\text{unprivileged})}{\\text{PPV}(D=\\text{privileged})}\")\n st.write(\"**Statistical parity**:\")\n st.latex(r\"\\frac{\\text{Selection Rate}(D=\\text{unprivileged})}{\\text{Selection Rate}(D=\\text{privileged})}\")\n\n\ndef chart_cm_comparison(orig_clf_metric, clf_metric, privileged, title):\n cm1 = orig_clf_metric.binary_confusion_matrix(privileged=privileged)\n cm2 = clf_metric.binary_confusion_matrix(privileged=privileged)\n c1 = get_confusion_matrix_chart(cm1, f\"{title}: Before Mitigation\")\n c2 = get_confusion_matrix_chart(cm2, f\"{title}: After Mitigation\")\n return c1 | c2\n\n\ndef compare():\n protected_attribute = st.selectbox(\"Select protected column.\", list(CONFIG_FAI.keys()))\n\n # Load data\n valid = load_data(\"output/valid.csv\")\n x_valid = valid[FEATURES]\n y_valid = valid[TARGET].values\n\n # Get predictions\n orig_y_pred, orig_text_model_perf = prepare_pred(x_valid, y_valid, debias=False)\n y_pred, text_model_perf = prepare_pred(x_valid, y_valid, debias=True)\n\n st.header(\"Model Performance\")\n st.subheader(\"Before Mitigation\")\n st.text(orig_text_model_perf)\n st.subheader(\"After Mitigation\")\n st.text(text_model_perf)\n\n st.header(\"Algorithmic Fairness Metrics\")\n fthresh = st.slider(\"Set fairness deviation threshold\", 0., 1., 0.2, 0.05)\n st.write(\"Absolute fairness is 1. The model is considered fair \"\n f\"if **ratio is between {1 - fthresh:.2f} and {1 + fthresh:.2f}**.\")\n\n # Compute fairness measures\n privi_info = CONFIG_FAI[protected_attribute]\n orig_aif_metric = get_aif_metric(\n valid,\n y_valid,\n orig_y_pred,\n protected_attribute,\n privi_info[\"privileged_attribute_values\"],\n privi_info[\"unprivileged_attribute_values\"],\n )\n orig_fmeasures = compute_fairness_measures(orig_aif_metric)\n orig_fmeasures[\"Fair?\"] = orig_fmeasures[\"Ratio\"].apply(\n lambda x: \"Yes\" if np.abs(x - 1) < fthresh else \"No\")\n\n aif_metric = get_aif_metric(\n valid,\n y_valid,\n y_pred,\n protected_attribute,\n privi_info[\"privileged_attribute_values\"],\n privi_info[\"unprivileged_attribute_values\"],\n )\n fmeasures = compute_fairness_measures(aif_metric)\n fmeasures[\"Fair?\"] = fmeasures[\"Ratio\"].apply(\n lambda x: \"Yes\" if np.abs(x - 1) < fthresh else \"No\")\n\n for m in METRICS_TO_USE:\n source = pd.concat([orig_fmeasures.query(f\"Metric == '{m}'\"),\n fmeasures.query(f\"Metric == '{m}'\")])\n source[\"Metric\"] = [\"1-Before Mitigation\", \"2-After Mitigation\"]\n\n st.write(m)\n st.altair_chart(plot_fmeasures_bar(source, fthresh), use_container_width=True)\n\n \nif __name__ == \"__main__\":\n fai()\n","sub_path":"xai_fairness/app_fai.py","file_name":"app_fai.py","file_ext":"py","file_size_in_byte":7200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"597872791","text":"#Programmer: Chris Tralie\n#Purpose: To wrap around Rann's pipeline to compute persistence diagrams and\n#Dionysus for computing bottleneck distance\nimport subprocess\nimport os\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\nfrom SparseEdgeList import *\n\ndef plotDGM(dgm, color = 'b', sz = 20, label = 'dgm'):\n if dgm.size == 0:\n return\n # Create Lists\n # set axis values\n axMin = np.min(dgm)\n axMax = np.max(dgm)\n axRange = axMax-axMin;\n # plot points\n plt.scatter(dgm[:, 0], dgm[:, 1], sz, color,label=label)\n plt.hold(True)\n # plot line\n plt.plot([axMin-axRange/5,axMax+axRange/5], [axMin-axRange/5, axMax+axRange/5],'k');\n # adjust axis\n #plt.axis([axMin-axRange/5,axMax+axRange/5, axMin-axRange/5, axMax+axRange/5])\n # add labels\n plt.xlabel('Time of Birth')\n plt.ylabel('Time of Death')\n\ndef plotDGMAx(ax, dgm, color = 'b', sz = 20, label = 'dgm'):\n if dgm.size == 0:\n return\n axMin = np.min(dgm)\n axMax = np.max(dgm)\n axRange = axMax-axMin;\n ax.scatter(dgm[:, 0], dgm[:, 1], sz, color,label=label)\n ax.hold(True)\n ax.plot([axMin-axRange/5,axMax+axRange/5], [axMin-axRange/5, axMax+axRange/5],'k');\n ax.set_xlabel('Time of Birth')\n ax.set_ylabel('Time of Death')\n\ndef plot2DGMs(P1, P2, l1 = 'Diagram 1', l2 = 'Diagram 2'):\n plotDGM(P1, 'r', 10, label = l1)\n plt.hold(True)\n plt.plot(P2[:, 0], P2[:, 1], 'bx', label = l2)\n plt.legend()\n plt.xlabel(\"Birth Time\")\n plt.ylabel(\"Death Time\")\n\ndef savePD(filename, I):\n if os.path.exists(filename):\n os.remove(filename)\n fout = open(filename, \"w\")\n for i in range(I.shape[0]):\n fout.write(\"%g %g\"%(I[i, 0], I[i, 1]))\n if i < I.shape[0]-1:\n fout.write(\"\\n\")\n fout.close()\n\n#Wrap around Dionysus's bottleneck distance after taking the log\ndef getInterleavingDist(PD1, PD2):\n savePD(\"PD1.txt\", np.log(PD1))\n savePD(\"PD2.txt\", np.log(PD2))\n proc = subprocess.Popen([\"./bottleneck\", \"PD1.txt\", \"PD2.txt\"], stdout=subprocess.PIPE)\n lnd = float(proc.stdout.readline())\n return np.exp(lnd) - 1.0 #Interleaving dist is 1 + eps\n\ndef getBottleneckDist(PD1, PD2):\n savePD(\"PD1.txt\", PD1)\n savePD(\"PD2.txt\", PD2)\n proc = subprocess.Popen([\"./bottleneck\", \"PD1.txt\", \"PD2.txt\"], stdout=subprocess.PIPE)\n return float(proc.stdout.readline())\n\ndef parsePDs(filename):\n PDs = {}\n fin = open(filename)\n for l in fin.readlines():\n fs = [float(s.rstrip()) for s in l.split()]\n dim = int(fs[0])\n if not dim in PDs:\n PDs[dim] = []\n if fs[-2] == fs[-1]:\n continue #Don't display classes which die instantly\n PDs[dim].append(fs[-2:])\n fin.close()\n ret = []\n count = 0\n for i in range(len(PDs)):\n ret.append(np.array(PDs[i]))\n return ret\n\ndef getPDs(I, J, D, N, m):\n if os.path.exists(\"temp.dimacs\"):\n os.remove(\"temp.dimacs\")\n writeResults(I, J, D, N, \"temp.dimacs\")\n if os.path.exists(\"temp.results\"):\n os.remove(\"temp.results\")\n proc = subprocess.Popen([\"./phatclique\", \"-i\", \"temp.dimacs\", \"-m\", \"%i\"%m, \"-o\", \"temp.results\"], stdout=subprocess.PIPE)\n #stdout = proc.communicate()[0]\n while True:\n output=proc.stdout.readline()\n if (output == b'' or output == '') and proc.poll() is not None:\n break\n if output:\n print(output.strip())\n rc = proc.poll()\n return parsePDs(\"temp.results\")\n\ndef doRipsFiltration(X, maxHomDim, eps = 0):\n (I, J, D) = makeComplex(X, 0)\n PDs = getPDs(I, J, D, X.shape[0], maxHomDim+2)\n return PDs\n \nif __name__ == '__main__':\n X = np.random.randn(200, 2)\n X = X/np.sqrt(np.sum(X**2, 1)[:, None])\n #plt.plot(X[:, 0], X[:, 1], '.')\n #plt.show()\n PDs = doRipsFiltration(X, 1)\n plotDGM(PDs[1])\n plt.show()\n","sub_path":"TDA.py","file_name":"TDA.py","file_ext":"py","file_size_in_byte":3854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"380490159","text":"\"\"\"\nImplements article search and filter\n\"\"\"\nfrom urllib.parse import unquote\n\nfrom django.db import models\nfrom django.db.models import Q\n\nfrom authors.apps.articles.filter_search_extras import extra_vars, get_response\n\n\nclass ArticleManager(models.Manager):\n \"\"\"\n define custom manager for articles\n \"\"\"\n\n def search(self, params):\n \"\"\"\n customised search functionality\n \"\"\"\n author = unquote(params.get(\"author\", \"\"))\n title = unquote(params.get(\"title\", \"\"))\n tag = unquote(params.get(\"tag\", \"\"))\n\n author_query = (Q(author__username__icontains=author) | Q(author__email__exact=author))\n tag_query = Q(tags__tag_name__exact=tag)\n title_query = Q(title__icontains=title)\n\n all_fields = (author and title and tag)\n author_and_title = (author and title and not tag)\n author_and_tag = (author and tag and not title)\n author_only, tag_only, title_and_tag, title_only = extra_vars(all_fields, author, tag, title)\n\n queryset = self.get_queryset()\n\n attrs = (all_fields, author_and_tag, author_and_title, author_only, queryset, tag_only,\n title_and_tag, title_only, author_query, title_query, tag_query)\n\n return get_response(attrs)\n","sub_path":"authors/apps/articles/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"325154852","text":"# -*- coding:utf-8 -*-\nfrom setuptools import setup\nfrom setuptools import find_packages\n\n\nversion = '1.0.2.dev0'\ndescription = \"JSON based migrations for Plone\"\n\nrequirements = [\n 'setuptools',\n 'collective.transmogrifier>=1.5',\n 'plone.app.transmogrifier',\n 'zope.app.container',\n]\n\ntry:\n import json\nexcept ImportError:\n requirements.append('simplejson')\n\n\nsetup(\n name='collective.jsonmigrator',\n version=version,\n description=description,\n long_description=\"%s\\n%s\" % (\n open(\"README.rst\").read(),\n open(\"CHANGES.rst\").read(),\n ),\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Web Environment\",\n \"Framework :: Plone\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: GNU General Public License v2 (GPLv2)\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n keywords='plone transmogrifier ',\n author='Rok Garbas',\n author_email='rok@garbas.si',\n url='https://github.com/collective/collective.jsonmigrator',\n license='BSD',\n packages=find_packages(),\n namespace_packages=['collective'],\n include_package_data=True,\n zip_safe=False,\n install_requires=requirements,\n entry_points=\"\"\"\n [z3c.autoinclude.plugin]\n target = plone\n \"\"\"\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"57504838","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport pywikibot, re, sys, argparse, copy\n\nimport blib\nfrom blib import getparam, rmparam, tname, msg, errmsg, site\n\ndef process_page(page, index, parsed):\n global args\n pagetitle = str(page.title())\n def pagemsg(txt):\n msg(\"Page %s %s: %s\" % (index, pagetitle, txt))\n def errpagemsg(txt):\n msg(\"Page %s %s: %s\" % (index, pagetitle, txt))\n errmsg(\"Page %s %s: %s\" % (index, pagetitle, txt))\n\n pagemsg(\"Processing\")\n\n def expand_text(tempcall):\n return blib.expand_text(tempcall, pagetitle, pagemsg, args.verbose)\n\n notes = []\n for t in parsed.filter_templates():\n origt = str(t)\n if tname(t) in [\"ru-conj\", \"ru-conj-old\"]:\n if [x for x in t.params if str(x.value) == \"or\"]:\n errpagemsg(\"WARNING: Skipping multi-arg conjugation: %s\" % str(t))\n continue\n param2 = getparam(t, \"2\")\n if \"+p\" in param2:\n continue\n ppp = getparam(t, \"ppp\") or getparam(t, \"past_pasv_part\")\n if not ppp or ppp == \"-\":\n continue\n ppp2 = getparam(t, \"ppp2\") or getparam(t, \"past_pasv_part2\")\n rmparam(t, \"ppp\")\n rmparam(t, \"past_pasv_part\")\n rmparam(t, \"ppp2\")\n rmparam(t, \"past_pasv_part2\")\n t.add(\"2\", param2 + \"+p\")\n if tname(t) == \"ru-conj\":\n tempcall = re.sub(r\"^\\{\\{ru-conj\", \"{{ru-generate-verb-forms\", str(t))\n else:\n tempcall = re.sub(r\"^\\{\\{ru-conj-old\", \"{{ru-generate-verb-forms|old=1\", str(t))\n result = expand_text(tempcall)\n if not result:\n errpagemsg(\"WARNING: Error expanding template %s\" % tempcall)\n continue\n forms = blib.split_generate_args(result)\n pppform = forms.get(\"past_pasv_part\", \"\")\n if \",\" in pppform:\n auto_ppp, auto_ppp2 = pppform.split(\",\")\n wrong = False\n if ppp != auto_ppp:\n errpagemsg(\"WARNING: ppp %s != auto_ppp %s\" % (ppp, auto_ppp))\n wrong = True\n if ppp2 != auto_ppp2:\n errpagemsg(\"WARNING: ppp2 %s != auto_ppp2 %s\" % (ppp2, auto_ppp2))\n wrong = True\n if wrong:\n continue\n else:\n if ppp != pppform:\n errpagemsg(\"WARNING: ppp %s != auto_ppp %s\" % (ppp, pppform))\n continue\n newt = str(t)\n if origt != newt:\n notes.append(\"Replaced manual ppp= with irreg verb with +p\")\n pagemsg(\"Replaced %s with %s\" % (origt, newt))\n\n return parsed, notes\n\nparser = blib.create_argparser(\"Make irregular verbs use +p instead of manual ppp=\")\nargs = parser.parse_args()\nstart, end = blib.parse_start_end(args.start, args.end)\n\nfor category in [\"Russian irregular verbs\"]:\n msg(\"Processing category: %s\" % category)\n for i, page in blib.cat_articles(category, start, end):\n blib.do_edit(page, i, process_page, save=args.save, verbose=args.verbose)\n","sub_path":"make_irreg_verbs_use_plus_p.py","file_name":"make_irreg_verbs_use_plus_p.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"383262506","text":"import sys\nimport json\nsys.path.append('./db/')\nfrom Basketballdb import Basketballdb\n\nfrom flask import Flask\nfrom flask import request\nfrom flask import render_template\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n\treturn 'Welcome to the app. Go to /query for the application home page.'\n\ndef filterByTeamName(teams,theTeam):\n result = False\n if (theTeam != False):\n #return ateam\n for team in teams:\n if theTeam == team['name']:\n result = team\n break\n else:\n return False\n\n if result:\n return [result]\n\ndef filterByPlayerName(players, thePlayer):\n a = [];\n result = False\n while (thePlayer != False):\n for player in players:\n if thePlayer == player['lastname']:\n result = player\n a.append(result)\n break\n \n else:\n return False\n\n if result:\n return a\n \n \n \n \n@app.route('/query', methods=['POST', 'GET'])\ndef query():\n #get full players and teams\n db = Basketballdb()\n teams = db.getTeams()\n players = db.getPlayers()\n\n #start with full teams and players\n filterteams = teams\n filterplayers = players\n\n #filters\n if( request.args.get('team', False) ):\n filterteams = filterByTeamName(filterteams, request.args.get('team', False))\n\n if( request.args.get('player_last_name', False) ):\n filterplayers = filterByPlayerName(players, request.args.get('player_last_name', False))\n \n data = { 'teams' : teams, 'players' : players,\n 'filteredTeams' : filterteams[:50],\n 'filteredPlayers' : filterplayers[:50] }\n \n return render_template('main.html', data=data)\n #return json.dumps(filterteams)\n\nif __name__ == '__main__':\n\tapp.debug = True\n\tapp.run()\n","sub_path":"multi-name mod.py","file_name":"multi-name mod.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"557354616","text":"\"\"\"Celery tasks.\"\"\"\nfrom app import app\nimport requests\n\n\nclass HTTPTask(app.Task):\n\n def __init__(self):\n self.client = requests.Session()\n\n def on_failure(self, exc, task_id, args, kwargs, einfo):\n print('Handled exception: {}'.format(exc))\n\n # def after_return(self, status, retval, task_id, args, kwargs, einfo): pass\n # def on_retry(self, exc, task_id, args, kwargs, einfo): pass\n # def on_success(self, retval, task_id, args, kwargs): pass\n\n\n\n@app.task(base=HTTPTask, name='tasks.get_url')\ndef get_url(url=None):\n response = get_url.client.get(url)\n response.raise_for_status()\n return response.json()\n\n\n\nif __name__ == '__main__':\n urls = [\n 'https://httpbin.org/status/404',\n 'https://httpbin.org/get'\n ]\n for url in urls:\n get_url.delay(url=url)\n","sub_path":"libraries_third_party/celery_/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"385341860","text":"\"\"\"\n\n**INPUT FILE FORMAT**\n\nThe file format consists of a one-row template header followed by a one-row data header and subsequent data\nrows.\n\nThe data represents tailpipe emission rates by model year, age, reg-class and fuel type as estimated by\nEPA's MOVES model.\n\nFile Type\n comma-separated values (CSV)\n\nSample Header\n .. csv-table::\n\n input_template_name:,emission_rates_vehicles,input_template_version:,0.2\n\nSample Data Columns\n .. csv-table::\n :widths: auto\n\n start_year,sourcetype_name,reg_class_id,market_class_id,in_use_fuel_id,rate_name,independent_variable,slope,intercept,ind_variable_data,rate_data,equation\n 1995,passenger car,car,non_hauling.ICE,pump gasoline,pm25_exhaust_grams_per_mile,age,0.000020575,0.02556,\"[22, 30]\",\"[0.02601255162083171, 0.026177151337127946]\",((2.0575e-05 * age) + 0.02556)\n 1995,passenger car,car,non_hauling.ICE,pump gasoline,nmog_exhaust_grams_per_mile,age,-0.00059478,0.77323,\"[22, 30]\",\"[0.7601447516760625, 0.7553865333609487]\",((-0.00059478 * age) + 0.77323)\n\nData Column Name and Description\n :start_year:\n The model year to which the rate applies; model years not shown will apply the start_year rate\n less than or equal to the model year.\n\n :sourcetype_name:\n The MOVES sourcetype name (e.g., passenger car, passenger truck, light-commercial truck, etc.).\n\n :reg_class_id:\n Vehicle regulatory class at the time of certification, e.g. 'car','truck'. Reg class definitions may differ\n across years within the simulation based on policy changes. ``reg_class_id`` can be considered a 'historical'\n or 'legacy' reg class.\n\n :market_class_id:\n The OMEGA market class (e.g., non-hauling.ICE, hauling.BEV, etc.).\n\n :in_use_fuel_id:\n In-use fuel id, for use with context fuel prices, must be consistent with the context data read by\n ``class context_fuel_prices.ContextFuelPrices``\n\n :rate_name:\n The emission rate providing the pollutant and units.\n\n :independent_variable:\n The independent variable used in calculating the emission rate (e.g., age).\n\n :slope:\n The slope of the linear fit to the emission rate input data.\n\n :intercept:\n The intercept of the linear fit to the emission rate input data.\n\n :ind_variable_data:\n Input data for the independent variable used to generate the emission rate curve where data represent the age\n associated with the corresponding input data.\n\n :rate_data:\n The emission rate data used to generate the emission rate curve.\n\n :equation:\n The linear fit emission rate equation used to calculate an emission rate at the given independent variable.\n\n----\n\n**CODE**\n\n\"\"\"\nfrom omega_effects.general.general_functions import read_input_file\nfrom omega_effects.general.input_validation import validate_template_version_info, validate_template_column_names\n\n_cache = dict()\n\n\nclass EmissionRatesVehicles:\n \"\"\"\n Loads and provides access to vehicle emission factors by model year, age, legacy reg class ID and in-use fuel ID.\n\n \"\"\"\n def __init__(self):\n self._data = dict()\n self._cache = dict()\n self.startyear_min = 0\n self.deets = {} # this dictionary will not include the legacy fleet\n\n def init_from_file(self, filepath, effects_log):\n \"\"\"\n\n Initialize class data from input file.\n\n Args:\n filepath: the Path object to the file.\n effects_log: an instance of the EffectsLog class.\n\n Returns:\n Nothing, but reads the appropriate input file.\n\n \"\"\"\n # don't forget to update the module docstring with changes here\n input_template_name = 'emission_rates_vehicles'\n input_template_version = 0.2\n input_template_columns = {\n 'start_year',\n 'sourcetype_name',\n 'reg_class_id',\n 'market_class_id',\n 'in_use_fuel_id',\n 'rate_name',\n 'equation',\n }\n\n df = read_input_file(filepath, effects_log)\n validate_template_version_info(df, input_template_name, input_template_version, effects_log)\n\n # read in the data portion of the input file\n df = read_input_file(filepath, effects_log, skiprows=1)\n validate_template_column_names(filepath, df, input_template_columns, effects_log)\n\n rate_keys = zip(\n df['start_year'],\n df['sourcetype_name'],\n df['reg_class_id'],\n df['in_use_fuel_id'],\n df['rate_name']\n )\n df.set_index(rate_keys, inplace=True)\n\n self.startyear_min = min(df['start_year'])\n\n self._data = df.to_dict('index')\n\n for rate_key in rate_keys:\n rate_eq = self._data[rate_key]['equation']\n self._data[rate_key].update({'equation': compile(rate_eq, '', 'eval')})\n\n def get_emission_rate(self, session_settings, model_year, sourcetype_name, reg_class_id,\n in_use_fuel_id, age, *rate_names):\n \"\"\"\n\n Args:\n session_settings: an instance of the SessionSettings class\n model_year (int): vehicle model year for which to get emission factors\n sourcetype_name (str): the MOVES sourcetype name (e.g., 'passenger car', 'light commercial truck')\n reg_class_id (str): the regulatory class, e.g., 'car' or 'truck'\n in_use_fuel_id (str): the liquid fuel ID, e.g., 'pump gasoline'\n age (int): vehicle age in years\n rate_names: name of emission rate(s) to get\n\n Returns:\n A list of emission rates for the given type of vehicle of the given model_year and age.\n\n \"\"\"\n locals_dict = locals()\n rate = 0\n return_rates = list()\n\n if model_year < self.startyear_min:\n model_year = self.startyear_min\n\n for rate_name in rate_names:\n\n cache_key = (model_year, sourcetype_name, reg_class_id, in_use_fuel_id, age, rate_name)\n if cache_key in self._cache:\n rate = self._cache[cache_key]\n else:\n rate_keys = [\n k for k in self._data\n if k[0] <= model_year\n and k[1] == sourcetype_name\n and k[2] == reg_class_id\n and k[3] == in_use_fuel_id\n and k[4] == rate_name\n ]\n if not rate_keys:\n rate_keys = [\n k for k in self._data\n if k[1] == sourcetype_name\n and k[2] == reg_class_id\n and k[3] == in_use_fuel_id\n and k[4] == rate_name\n ]\n start_year = min([k[0] for k in rate_keys])\n else:\n max_start_year = max([k[0] for k in rate_keys])\n start_year = min(model_year, max_start_year)\n\n rate_key = start_year, sourcetype_name, reg_class_id, in_use_fuel_id, rate_name\n\n rate = eval(self._data[rate_key]['equation'], {}, locals_dict)\n\n if rate < 0:\n temp_key = (model_year, sourcetype_name, reg_class_id, in_use_fuel_id, age - 1, rate_name)\n rate = self._cache[temp_key]\n\n self._cache[cache_key] = rate\n\n self.deets.update(\n {cache_key: {\n 'session_policy': session_settings.session_policy,\n 'session_name': session_settings.session_name,\n 'model_year': model_year,\n 'age': age,\n 'reg_class_id': reg_class_id,\n 'sourcetype_name': sourcetype_name,\n 'in_use_fuel_id': in_use_fuel_id,\n 'rate_name': rate_name,\n 'rate': rate,\n }}\n )\n return_rates.append(rate)\n\n return return_rates\n","sub_path":"omega_effects/effects/emission_rates_vehicles.py","file_name":"emission_rates_vehicles.py","file_ext":"py","file_size_in_byte":8131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"419956359","text":"'''\nDelete Duplicates\nDescription\nGiven an unsorted linked list of N nodes. The task is to remove duplicate elements from this unsorted Linked List. When a value appears in multiple nodes, the node which appeared first should be kept, all others duplicates are to be removed. You need to implement remove duplicates function only.\n\nInput\nFirst line contains an integer N denoting the size of the Linked List.\n\nNext line contain N space separated integers dentoing the LL.\n\nOutput\nPrint the final LL\n\nInput:\n\n5\n\n1 1 2 2 5\n\nOutput:\n\n1 2 5\n'''\nclass Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n \n \nclass LinkedList:\n def __init__(self):\n self.head = None\n self.last_node = None\n \n def append(self, data):\n if self.last_node is None:\n self.head = Node(data)\n self.last_node = self.head\n else:\n self.last_node.next = Node(data)\n self.last_node = self.last_node.next\n \n def get_prev_node(self, ref_node):\n current = self.head\n while (current and current.next != ref_node):\n current = current.next\n return current\n \n def remove(self, node):\n prev_node = self.get_prev_node(node)\n if prev_node is None:\n self.head = self.head.next\n else:\n prev_node.next = node.next\n \n def display(self):\n current = self.head\n while current:\n print(current.data, end = ' ')\n current = current.next\n \n# Implement this Function \ndef remove_duplicates(llist):\n\t#valid for sorted as well as unsorted\n\ttrack=set()\n\ttemp=llist.head\n\tif temp==None:\n\t\treturn\n\ttrack.add(temp.data)#adding the first value in the set\n\twhile temp.next!=None:\n\t\tif temp.next.data in track:\n\t\t\ttemp.next=temp.next.next #temp.next fdeleted\n\t\telse:\n\t\t\ttrack.add(temp.next.data)\n\t\t\ttemp=temp.next\n\treturn\n\t\n\t\n \n \na_llist = LinkedList()\n \nn = int(input())\nl = list(map(int, input().split(' ')))\nfor data in l:\n a_llist.append(data)\n \nremove_duplicates(a_llist)\n \na_llist.display()","sub_path":"Delete Duplicates.py","file_name":"Delete Duplicates.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"585677235","text":"#coding:utf-8\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport matplotlib as mpl\nimport os, sys\n\nmpl.rcParams['axes.linewidth'] = 1.2 #set the value globally\nplt.rcParams['font.sans-serif']=['SimHei']\nplt.rcParams['axes.unicode_minus']=False\n\ndir = '../../Data/ClusteringQuality/CluStream/'\nfileName = 'CluStream-KDD99-Normalized-patent'\ndata = pd.read_excel(dir + fileName + '.xlsx')\n\nplt.rc('pdf', fonttype=42)\n\nplt.figure(figsize=(4.0, 2.5))\nplt.subplots_adjust(\n left=0.12,\n bottom=0.18,\n right=0.96,\n top=0.94,\n wspace=0.00,\n hspace=0.00)\n\nplt.rcParams['xtick.direction'] = 'in'\nplt.rcParams['ytick.direction'] = 'in'\n\n\nfont = {'family': 'Helvetica',\n 'weight': 'demibold',\n 'size': 12,\n }\n\n# plt.xticks(fontsize=8, weight='medium')\n# plt.yticks(fontsize=8, weight='medium')\nplt.xlabel(u'数据量 (' + r'$\\times{10^3}$' + ')')#, size=8, weight='medium')\nplt.ylabel(u'聚类质量CMM')#, size=8, weight='medium')\nplt.ylim(0.3, 1.05)\nplt.xlim(0, 500)\n\nmarksize = 2\nlinewidth = 1\n\n\nplt.plot(data[data.columns[0]], data[data.columns[1]], linestyle=\":\", linewidth=linewidth, color='black')#color='#978a84')\nplt.plot(data[data.columns[0]], data[data.columns[3]], marker='D', markersize=marksize, linewidth=linewidth, color='gray')\nplt.plot(data[data.columns[0]], data[data.columns[2]], marker='^', markersize=marksize, linewidth=linewidth, color='black')\n\nplt.legend(labels=[data.columns[1], data.columns[3], data.columns[2]], loc=8, frameon=False, bbox_to_anchor=(0.5, 0))\n# plt.show()\nplt.savefig(dir + fileName + \".png\")\n\n","sub_path":"src/ClusteringQuality/CluStream/CluStreamKDD99CMM-patent.py","file_name":"CluStreamKDD99CMM-patent.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"292535869","text":"## Old development version\nimport numpy as np\nimport utils\n\nclass Connect:\n def __init__(self, num_cols=5, num_rows=3, num_connect=3, verbose=True):\n \"\"\"\n Define a new Connect object\n \"\"\"\n\n self.num_cols = num_cols\n self.num_rows = num_rows\n self.num_connect = num_connect\n self.verbose = verbose\n\n self.players = ['o', 'x']\n self.other_player = {'o': 'x', 'x': 'o'}\n\n def reset(self, first_player='random'):\n self.grid = np.full(fill_value=\" \", shape=(self.num_rows, self.num_cols), dtype=str)\n\n # Each column index is one action.\n self.available_actions = np.arange(self.num_cols)\n\n # Keep track of the lowest free row position per column (where a disk would land if dropped in that column)\n self.lowest_free_rows = np.zeros(self.num_cols, dtype=int)\n\n if first_player == 'random':\n self.player_at_turn = np.random.choice(self.players)\n elif first_player in self.players:\n self.player_at_turn = first_player\n else:\n raise ValueError(\"The argument first_player has to be either 'random', 'x', or 'o'.\")\n\n # Keep track of the last action played (simplifies checking for terminal states).\n self.last_action = None\n\n self.game_over = False\n if self.verbose:\n print(\"Game has been reset.\")\n print(self.grid[::-1, ])\n\n def change_turn(self):\n self.player_at_turn = self.other_player[self.player_at_turn]\n\n def act(self, action):\n \"\"\"\n Given an action (a column index; known to be a valid action!), generate the new board\n\n :param action: an integer referring to the column index where a new token/disk should be dropped\n \"\"\"\n self.grid[self.lowest_free_rows[action], action] = self.player_at_turn\n self.lowest_free_rows[action] += 1\n if self.lowest_free_rows[action] == self.num_rows:\n self.available_actions = np.setdiff1d(self.available_actions, action)\n self.last_action = action\n\n if self.verbose:\n print(self.grid[::-1, ])\n\n def grid_is_full(self):\n return np.all(self.lowest_free_rows == self.num_rows)\n\n def was_winning_move(self):\n \"\"\"\n Check if the move that has just been made wins the game.\n\n Determine in which row the disk (token) landed using self.last_action and look at that row,\n column and both diagonals including this token. Check whether there is any sequence of\n length 'num_connect' of the same token type.\n\n For example, if num_connect == 3\n\n ' 'd' ' ' 'c' ' ' 'u' ' '\n ' ' ' 'd' 'c' 'u' ' ' ' '\n ' 'r' 'r' 'x' 'r' 'r' ' '\n ' ' ' 'u' 'c' 'd' ' ' ' '\n ' 'u' ' ' 'c' ' ' 'd' ' '\n ' ' ' ' ' ' ' ' ' ' ' ' '\n ' ' ' ' ' ' ' ' ' ' ' ' '\n\n and \"x\" is the position the token has dropped, check whether there is a sequence of 'x' of length 3\n in the corresponding row (r), column (c), upward-diagonal (u), or downward diagonal (d).\n\n [This function could be made MUCH more efficient by excluding some of the checks beforehand, for\n example, based on the row height of the last_action.]\n\n :return: a boolean, True if the last move was a winning move\n \"\"\"\n game_is_won = False\n\n action_row = self.lowest_free_rows[self.last_action] - 1\n action_col = self.last_action\n winning_sequence = np.full(shape=self.num_connect, fill_value=self.player_at_turn)\n\n # Calculate candidate vectors\n row_candidates = self.grid[action_row, max(0, action_col - self.num_connect + 1) : min(self.num_cols, action_col + self.num_connect)]\n if utils.search_sequence_numpy(row_candidates, winning_sequence):\n game_is_won = True\n else:\n col_candidates = self.grid[max(0, action_row - self.num_connect + 1): min(self.num_rows, action_row + self.num_connect), action_col]\n if utils.search_sequence_numpy(col_candidates, winning_sequence):\n game_is_won = True\n else:\n diag_index_up = action_col - action_row\n diag_up_candidates = np.diagonal(self.grid, diag_index_up)\n if utils.search_sequence_numpy(diag_up_candidates, winning_sequence):\n game_is_won = True\n else:\n diag_index_down = action_row + action_col - (self.num_rows - 1)\n diag_down_candidates = np.diagonal(self.grid[::-1], diag_index_down)\n if utils.search_sequence_numpy(diag_down_candidates, winning_sequence):\n game_is_won = True\n\n if self.verbose and game_is_won:\n print(\"Player '\", self.player_at_turn, \"' has won the game!\")\n return game_is_won\n\n","sub_path":"connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":4847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"367229587","text":"#AP : Accident Prediction\n#\n\nimport sys\nimport os \nimport numpy as np\nimport time\n\nimport torch\nfrom torch import nn, optim\nfrom torch.nn import functional as F\nfrom torch.utils import data\nfrom torchsummaryX import summary\n\nfrom lib.utils.ap_train_val_utils_AdaLEA import train_ap_wo_ego, val_ap_wo_ego\nfrom lib.models.ap_model_LEA import AP_wo_ego\nfrom lib.utils.ap_dataloader_for_LEA import load_fol_hidden_state\nfrom config.config import * \n\nfrom tensorboardX import SummaryWriter\nimport pandas as pd\n\nGPU_NUM =0\ndevice = torch.device(f'cuda:{GPU_NUM}' if torch.cuda.is_available() else 'cpu')\nprint(device)\ntorch.cuda.set_device(device)\n\n#print(\"Cuda available: \", torch.cuda.is_available())\n#device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n# load args\nargs = parse_args()\n\nif args.enc_concat_type == 'cat':\n args.dec_hidden_size = args.box_enc_size + args.flow_enc_size\nelse:\n if args.box_enc_size != args.flow_enc_size:\n raise ValueError('Box encoder size %d != flow encoder size %d'\n %(args.box_enc_size,args.flow_enc_size))\n else:\n args.dec_hidden_size = args.box_enc_size\n\n\n\n\nprint(\">> Setting the Accident Precition model ... \")\nAP_model = AP_wo_ego(args).to(device)\nall_params = AP_model.parameters()\n#optimizer = optim.RMSprop(all_params, lr=args.lr)\noptimizer = optim.Adam(all_params, lr=args.lr)\n\ndataloader_params ={\n \"batch_size\": args.batch_size,\n \"shuffle\": args.shuffle,\n \"num_workers\": args.num_workers\n }\n\nval_set = load_fol_hidden_state(args, 'val')\nprint(\">> Number of validation samples:\", val_set.__len__())\nval_gen = data.DataLoader(val_set, **dataloader_params)\n\nprint(\">> Check the Model's architecture\")\nsummary(AP_model, \n torch.zeros(1, args.segment_len, args.pred_timesteps, args.dec_hidden_size).to(device)\n )\n\n\nprint(\">> Train data root:\", args.data_root)\n\nwriter = SummaryWriter('summary/train_on_DoTA/concatenation/AdaLEA/')\n\n\n# MODEL TRAINING\nmin_loss = 1e6\n\nbest_ap_model = None\n\nbefore_ATTC = 0.\n\n#save train(mAP, ATTC), val(mAP, ATTC)\ninform = np.zeros((args.train_epoch, 4))\n\nfor epoch in range(1, args.train_epoch+1):\n print(\"\\n\")\n print(\"=====================================\")\n print(\"// Epoch :\", epoch)\n # regenerate the training dataset \n train_set = load_fol_hidden_state(args, 'train')\n train_gen = data.DataLoader(train_set, **dataloader_params)\n print(\" Number of training samples:\", train_set.__len__())\n\n start = time.time()\n\n #===== train\n train_loss, train_mAP, train_ATTC = train_ap_wo_ego(epoch, AP_model, optimizer, train_gen, before_ATTC, verbose=True)\n writer.add_scalar('data/train_loss', train_loss, epoch)\n writer.add_scalar('data/train_mAP', train_mAP, epoch)\n writer.add_scalar('data/train_ATTC', train_ATTC, epoch)\n inform[epoch-1,0] = train_mAP\n inform[epoch-1,1] = train_ATTC\n\n #===== validation\n val_loss, val_mAP, val_ATTC = val_ap_wo_ego(epoch, AP_model, val_gen, before_ATTC, verbose=True)\n writer.add_scalar('data/val_loss', val_loss, epoch)\n writer.add_scalar('data/val_mAP', val_mAP, epoch)\n writer.add_scalar('data/val_ATTC', val_ATTC, epoch)\n inform[epoch-1,2] = val_mAP\n inform[epoch-1,3] = val_ATTC\n\n before_ATTC = train_ATTC\n\n\n # print time\n elipse = time.time() - start\n print(\"Elipse: \", elipse)\n\n # save checkpoint per epoch\n saved_ap_model_name = 'epoch_' + str(format(epoch,'03')) + '.pt'\n print(\"Saving checkpoints: \" + saved_ap_model_name)\n torch.save(AP_model.state_dict(), os.path.join(args.checkpoint_dir, saved_ap_model_name))\n\ndf = pd.DataFrame(inform)\ndf.to_csv(args.checkpoint_dir+\"/train_val_inform.csv\", index=False)\nnp.save(args.checkpoint_dir+\"/train_val_inform\" ,inform)\n","sub_path":"RunFile/final_code/train_AP_with_AdaLEA_concat.py","file_name":"train_AP_with_AdaLEA_concat.py","file_ext":"py","file_size_in_byte":3773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"309370338","text":"#!/usr/bin/env python3\n'''\n This demo application demonstrates the functionality of the safrs documented REST API\n After installing safrs with pip, you can run this app standalone:\n $ python3 demo_relationship.py [Listener-IP]\n\n This will run the example on http://Listener-Ip:5000\n\n - A database is created and a user is added\n - A rest api is available\n - swagger documentation is generated\n\n This is a minimal example, you'll probably want to use demo_relationship_ext.py instead!!!\n'''\nimport sys\nimport logging\nimport builtins\nfrom flask import Flask, redirect\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_swagger_ui import get_swaggerui_blueprint\nfrom flask_cors import CORS\nfrom safrs import SAFRSBase, SAFRSAPI, jsonapi_rpc\n\ndb = SQLAlchemy()\n\n\nclass Response(SAFRSBase, db.Model):\n '''\n description: Response description\n '''\n __tablename__ = 'Responses'\n id = db.Column(db.String, primary_key=True)\n response_data = db.Column(db.String, default='')\n\n\nif __name__ == '__main__':\n HOST = sys.argv[1] if len(sys.argv) > 1 else '0.0.0.0'\n PORT = 5000\n app = Flask('SAFRS Demo Application')\n app.config.update(SQLALCHEMY_DATABASE_URI='sqlite://', DEBUG=True)\n db.init_app(app)\n db.app = app\n # Create the database\n db.create_all()\n API_PREFIX = ''\n \n with app.app_context():\n # Create a user and a book and add the book to the user.books relationship\n response = Response(response_data='{}')\n api = SAFRSAPI(app, host='{}:{}'.format(HOST,PORT), port=PORT, prefix=API_PREFIX)\n # Expose the database objects as REST API endpoints\n api.expose_object(Response)\n # Register the API at /api/docs\n print('Starting API: http://{}:{}{}'.format(HOST, PORT, API_PREFIX))\n app.run(host=HOST, port=PORT)\n","sub_path":"examples/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"61339220","text":"#!/usr/bin/python3\n#coding:utf-8\nimport tensorflow as tf\nfrom tensorflow.python.platform import gfile\nimport numpy as np\nimport pyaudio\nimport wave\nfrom struct import pack\nfrom array import array\nimport collections\nfrom collections import Counter\nimport sys\nimport signal\nimport time\n\nFORMAT = pyaudio.paInt16\nCHANNELS = 1\nRATE = 16000\nclip_stride_ms = 32 # 需要确保 RATE / clip_stride_ms 为整数\nCHUNK_SIZE = int(RATE / clip_stride_ms) # 500\nNUM_PADDING_CHUNKS = clip_stride_ms # 32\nNUM_WINDOW_CHUNKS = 13\naverage_window_ms = 500\nsuppression_ms = 1500\ndetection_threshold = 0.9\n\naverage_window_samples = int(average_window_ms / clip_stride_ms)+2 # 15\nsuppression_samples = int(suppression_ms * RATE / 1000) # 240000\n\n\nclass KWS:\n def __init__(self, model_dir='model\\ds_cnn.pb'): #model/CNN_L.pb model/dnn.pb model\\Pretrained_models\\DS_CNN/DS_CNN_L.pb\n # load model\n self.sess = tf.InteractiveSession()\n # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)\n # self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))\n self.load_pb(self.sess, pb_path=model_dir)\n print('Model restored.')\n self.softmax_tensor = self.sess.graph.get_tensor_by_name('labels_softmax:0')\n\n def recognize_file(self, wav='wav/test.wav', label_path='label/labels.txt',\n num_top_predictions=3):\n # recognize\n with open(wav, 'rb') as wav_file:\n wav_data = wav_file.read()\n print(wav_data)\n self.predictions = np.squeeze(self.sess.run(self.softmax_tensor, {'decoded_sample_data:0': wav_data})) # decoded_sample_data:0 wav_data:0\n # Sort to show labels in order of confidence\n top_k = self.predictions.argsort()[-num_top_predictions:] # argsort()元素从小到大排列,提取其对应的index(索引)\n labels = self.load_labels(label_path)\n result = []\n for node_id in top_k:\n human_string = labels[node_id]\n score = self.predictions[node_id]\n result.append('%s (score = %.5f)' % (human_string, score))\n print('%s (score = %.5f)' % (human_string, score))\n print(result)\n return '\\n'.join(result)\n\n def recognize_realtime(self, wav_stream, label_path='label/labels.txt', num_top_predictions=3):\n wav_stream = wav_stream.read()\n print(wav_stream)\n print(int.from_bytes(wav_stream, byteorder='big'))\n self.predictions = np.squeeze(self.sess.run(self.softmax_tensor, {'wav_data:0': wav_stream}))\n # Sort to show labels in order of confidence\n top_k = self.predictions.argsort()[-num_top_predictions:] # argsort()元素从小到大排列,提取其对应的index(索引)\n labels = self.load_labels(label_path)\n result = []\n for node_id in top_k:\n human_string = labels[node_id]\n score = self.predictions[node_id]\n result.append('%s (score = %.5f)' % (human_string, score))\n print('%s (score = %.5f)' % (human_string, score))\n # print(result)\n return '\\n'.join(result)\n\n def record_to_file(self, path, data, sample_width):\n \"Records from the microphone and outputs the resulting data to 'path'\"\n # sample_width, data = record()\n data = pack('<' + ('h' * len(data)), *data)\n wf = wave.open(path, 'wb')\n wf.setnchannels(1)\n wf.setsampwidth(sample_width)\n wf.setframerate(RATE)\n wf.writeframes(data)\n wf.close()\n\n def handle_int(self, sig, chunk):\n global leave, got_10_result\n leave = True\n got_10_result = True\n\n def normalization(self, data):\n # 归一化数据到[-1,1]\n _range = np.max(abs(data))\n return data / _range\n\n def standardization(self, data):\n # 标准化\n mu = np.mean(data, axis=0)\n sigma = np.std(data, axis=0)\n return (data - mu) / sigma\n\n def counter(self, human_string_arr, score_arr):\n # print(human_string_arr)\n top_num = 2\n string_top2 = Counter(human_string_arr).most_common(top_num)\n # print(string_top2)\n human_string_and_score_dict = {}\n if len(string_top2) == 1:\n human_string = string_top2[0][0]\n human_string_index = [j for j, x in enumerate(human_string_arr) if x == human_string]\n human_string_and_score_dict[human_string] = sum([score_arr[k] for k in human_string_index]) / len(\n human_string_arr)\n else:\n for i in range(top_num):\n human_string = string_top2[i][0]\n # print(human_string)\n human_string_index = [j for j, x in enumerate(human_string_arr) if x == human_string]\n human_string_and_score_dict[human_string] = sum([score_arr[k] for k in human_string_index]) / len(\n human_string_arr)\n return human_string_and_score_dict\n\n def record(self, label_path='label/labels.txt'):\n flag = 0\n pa = pyaudio.PyAudio()\n stream = pa.open(format=pyaudio.paInt16,\n channels=1,\n rate=RATE,\n input=True,\n start=False,\n # input_device_index=2,\n frames_per_buffer=CHUNK_SIZE)\n leave = False\n got_10_result = False\n signal.signal(signal.SIGINT, self.handle_int)\n # print('raw_data', raw_data)\n while not leave:\n suppression_flag = 0\n ring_buffer = collections.deque(maxlen=NUM_PADDING_CHUNKS)\n human_string_flags = ['none'] * average_window_samples\n human_string_index = 0\n score_flags = [0] * average_window_samples\n score_index = 0\n # ring_buffer_flags = [0] * NUM_WINDOW_CHUNKS\n # ring_buffer_index = 0\n print(\"* recording: \")\n stream.start_stream()\n while not got_10_result and not leave:\n\n chunk = stream.read(CHUNK_SIZE)\n # print(chunk)\n ring_buffer.append(chunk)\n if len(ring_buffer) < NUM_PADDING_CHUNKS:\n continue\n # print(ring_buffer)\n data_save = b''\n for i in range(len(ring_buffer)):\n data_save += ring_buffer[i]\n raw_data = array('h')\n raw_data.extend(array('h', data_save))\n raw_data = np.array(raw_data,dtype=np.float32).reshape([16000,1])\n raw_data = self.normalization(raw_data)\n if suppression_flag == 0:\n self.predictions = np.squeeze(self.sess.run(self.softmax_tensor, {'decoded_sample_data:0': raw_data}))\n # Sort to show labels in order of confidence\n top_1 = self.predictions.argsort()[-1] # argsort()元素从小到大排列,提取其对应的index(索引)\n labels = self.load_labels(label_path)\n human_string = labels[top_1]\n score = self.predictions[top_1]\n # print(human_string, str(score))\n\n human_string_flags[human_string_index] = human_string\n human_string_index += 1\n human_string_index %= average_window_samples\n\n score_flags[score_index] = score\n score_index += 1\n score_index %= average_window_samples\n\n human_string_and_score_dict = self.counter(human_string_flags, score_flags)\n human_string_big_score_tuple = sorted(human_string_and_score_dict.items(), key=lambda item:item[1])[0]\n human_string = human_string_big_score_tuple[0]\n score = human_string_big_score_tuple[1]\n\n if score < detection_threshold or human_string == '_silence_' or human_string == '_unknown_' or human_string == 'none':\n sys.stdout.write('_')\n else:\n sys.stdout.write(human_string + '(' + str(score) + ')')\n # sys.stdout.write(human_string)\n # flag += 1\n suppression_flag = 1\n start = time.time()\n else:\n if time.time()-start < suppression_ms / 1000:\n chunk = stream.read(CHUNK_SIZE)\n sys.stdout.write('_')\n else:\n suppression_flag = 0\n sys.stdout.flush()\n if flag >= 1000:\n got_10_result = True\n sys.stdout.write('\\n')\n stream.stop_stream()\n print(\"* done recording\")\n got_10_result = False\n leave = True\n stream.close()\n\n def load_pb(self, sess, pb_path):\n # pb模型导入\n with gfile.GFile(pb_path, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n sess.graph.as_default()\n tf.import_graph_def(graph_def, name='') # 导入计算图\n\n def load_labels(self, filename):\n \"\"\"Read in labels, one label per line.\"\"\"\n return [line.rstrip() for line in tf.gfile.GFile(filename)]\n\n\nif __name__ == '__main__':\n a = KWS()\n a.record()\n","sub_path":"recognition.py","file_name":"recognition.py","file_ext":"py","file_size_in_byte":9458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"63863709","text":"from ShopifyManagement import ManageOrder\nimport time\nfrom multiprocessing import Process\nfrom watchDog import setWatchDog\n\ndef shopifyOrderManagement():\n #Upon start\n while True:\n #Define which store to use\n store = 'DK'\n mo = ManageOrder(switch = store) #Instantiate the store\n \n #Check for existing orders for data base update - closed \n status, orders = mo.getOrders(orderType = 'closed')\n if status is True:\n mo.insert_orders_to_database(orders)\n\n #Check for new orders for data base update - open\n status, orders = mo.getOrders(orderType = 'open')\n\n if status is True:\n mo.insert_orders_to_database(orders)\n\n #Check for sending warning SMS\n mo.sms_delayWarn(orders)\n \n #Print out new orders\n mo.print_orders()\n\n #Geoplotting\n mo.geo_plotter()\n\n #Check for fulfillment\n mo.fulfill_and_capture()\n\n #Define which store to use\n store = 'HK'\n mohk = ManageOrder(switch = store) #Instantiate the storeß\n \n #Check for existing orders for data base update - closed\n\n status, orders = mohk.getOrders(orderType = 'closed')\n\n if status is True:\n mohk.insert_orders_to_database(orders)\n\n #Check for new orders for data base update - open\n status, orders = mohk.getOrders(orderType = 'open')\n\n if status is True:\n mohk.insert_orders_to_database(orders)\n\n #Check for sending warning SMS\n mohk.sms_delayWarn(orders)\n \n #Print out new orders\n mohk.print_orders()\n\n #Geoplotting\n mohk.geo_plotter()\n\n #Check for fulfillment\n mohk.fulfill_and_capture()\n\n #Stamp last system up time\n mohk.stampTime()\n\n print('system working...')\n time.sleep(5)\n\n#Use multiprocessing to run main order tracking method and watch dog simultanously\nif __name__ == '__main__':\n p1 = Process(target = shopifyOrderManagement, args = ())\n p2 = Process(target = setWatchDog, args = (1, 'lastTimeStamp.txt'))\n print('Starting the main process')\n p1.start()\n print('Main process started waiting now for 60 sec...')\n time.sleep(60)\n print('60 sec wait process passed starting watch dog')\n p2.start()\n print('watch dog started. System running again!')\n p1.join()\n p2.join()\n","sub_path":"runShopify.py","file_name":"runShopify.py","file_ext":"py","file_size_in_byte":2458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"616395567","text":"from typing import get_type_hints\n\ndef strict_types(function):\n def type_checker(*args, **kwargs):\n hints = get_type_hints(function)\n\n all_args = kwargs.copy()\n all_args.update(dict(zip(function.__code__.co_varnames, args)))\n\n for argument, argument_type in ((i, type(j)) for i, j in all_args.items()):\n if argument in hints:\n if not issubclass(argument_type, hints[argument]):\n raise TypeError('Type of {} is {} and not {}'.format(argument, argument_type, hints[argument]))\n\n result = function(*args, **kwargs)\n\n if 'return' in hints:\n if not isinstance(result, hints['return']):\n raise TypeError('Type of result is {} and not {}'.format(type(result), hints['return']))\n\n return result\n\n return type_checker\n","sub_path":"strict_types.py","file_name":"strict_types.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"64166397","text":"class ShortestPath:\n graph = {\n '1': ['2', '3', '4'],\n '2': ['5', '6'],\n '5': ['9', '10'],\n '4': ['7', '8'],\n '7': ['11', '12']\n }\n\n def bfs(self, graph, start, goal):\n explored = []\n\n queue = [[start]]\n\n if start == goal:\n return \"That was easy! Start = goal\"\n \n while queue:\n path = queue.pop(0)\n\n node = path[-1]\n\n if node not in explored:\n neighbors = graph[node]\n\n for neighbor in neighbors:\n new_path = list(path)\n new_path.append(neighbor)\n queue.append(new_path)\n\n if neighbor == goal:\n return new_path\n \n explored.append(node)\n \n return \"So sorry, but a connecting path doesn't exist\"","sub_path":"BFS/ShortestPath.py","file_name":"ShortestPath.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"343554415","text":"from rest_framework import generics, permissions\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework_simplejwt.authentication import JWTAuthentication\n\nfrom core import models\nfrom . import serializers\n\n\nclass ProductListApiView(generics.ListAPIView):\n \"\"\"\n API endpoint for listing available products\n \"\"\"\n serializer_class = serializers.ProductSerializer\n permission_classes = (permissions.IsAuthenticated,)\n authentication_classes = (JWTAuthentication,)\n queryset = models.Product.objects.all()\n\n def get_queryset(self):\n queryset = self.queryset.filter(stock__gt=0)\n return queryset\n\n\nclass OrderCreateViewSet(generics.CreateAPIView):\n \"\"\"\n API endpoint for order products\n \"\"\"\n serializer_class = serializers.OrderingSerializer\n permission_classes = (permissions.IsAuthenticated,)\n authentication_classes = (JWTAuthentication,)\n\n @staticmethod\n def sum_quantity_by_id(key, value, products):\n \"\"\"\n Function that adding quantity to existing product in order\n \"\"\"\n for product in products:\n if product['id'] == key:\n product['quantity'] += value\n\n def create(self, request, *args, **kwargs):\n data_items = request.data\n products = []\n\n # check product id is available and check quantity must be more than 0\n for product in data_items:\n # this error handler is for not existing product\n try:\n # try to get object by id\n temp = models.Product.objects.get(id=product['id'])\n\n # check product is available\n if temp.stock == 0:\n res = {\n 'message': 'The order contains unavailable products!'\n }\n return Response(res, status=status.HTTP_400_BAD_REQUEST)\n\n # check quantity that user want is positive\n if product['quantity'] <= 0:\n res = {\n 'message': f'The quantity of {temp.name} is lower than one!'\n }\n return Response(res, status=status.HTTP_400_BAD_REQUEST)\n\n except models.Product.DoesNotExist:\n res = {\n 'message': 'The product not found!'\n }\n return Response(res, status=status.HTTP_404_NOT_FOUND)\n\n # sum quantity of same product\n for product in data_items:\n if product['id'] not in [item['id'] for item in products]:\n temp = {\n 'id': product['id'],\n 'quantity': product['quantity']\n }\n products.append(temp)\n else:\n self.sum_quantity_by_id(product['id'], product['quantity'], products)\n\n # check quantity of products isn't more than our inventory\n for product in products:\n product_obj = models.Product.objects.get(id=product['id'])\n if product['quantity'] > product_obj.stock:\n res = {\n 'message': f'The quantity of {product_obj.name} is more than our inventory!'\n }\n return Response(res, status=status.HTTP_400_BAD_REQUEST)\n\n # save product item in database\n price = 0 # for total price\n # create new order\n order = models.Order.objects.create(user=request.user, price=price)\n\n # this is for compute total price and add order item to database\n for product in products:\n product_obj = models.Product.objects.get(id=product['id'])\n price += product_obj.price * product['quantity']\n models.OrderItem.objects.create(\n order=order,\n product_id=product['id'],\n quantity=product['quantity']\n )\n product_obj.stock -= product['quantity'] # minus product stock from quantity of this order\n product_obj.save()\n # save total price\n order.price = price\n order.save()\n\n res = {\n 'message': 'Your order has been successfully registered',\n 'data': serializers.OrderSerializer(order).data\n }\n\n return Response(\n res,\n status=status.HTTP_201_CREATED\n )\n","sub_path":"shopper/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"92927538","text":"# -*- coding: utf-8 -*-\n__author__ = 'Alex Bo'\n__email__ = 'bosha@the-bosha.ru'\n\nfrom tornado.web import url\n\nfrom handlers import (\n JSONBasePageHandler, JSONTestListHandler, JSONTestViewHandler, AJAXTestRemoveHandler,\n AJAXTestCreateHandler, AJAXTestEditHandler\n)\n\nurl_patterns = [\n url(r\"/\", JSONBasePageHandler, name=\"json_index\"),\n url(r\"/ajax/posts/\", JSONTestListHandler, name=\"json_list\"),\n url(r\"/ajax/posts/add/\", AJAXTestCreateHandler, name=\"json_add\"),\n url(r\"/ajax/posts/view/(?P\\d+)/\", JSONTestViewHandler, name=\"json_view\"),\n url(r\"/ajax/posts/remove/(?P\\d+)/\", AJAXTestRemoveHandler, name=\"json_remove\"),\n url(r\"/ajax/posts/edit/(?P\\d+)/\", AJAXTestEditHandler, name=\"json_edit\"),\n]\n","sub_path":"examples/ajax_weblog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"425961218","text":"r\"\"\"Interpreter za programski \"jezik\" koji reprezentira liste.\nListe se pišu kao [x1,x2,...,xk], svaki xi može biti broj, string ili lista.\nBrojevi su samo prirodni (veći od 0).\nStringovi se pišu kao \"...\", gdje unutar ... ne smije biti znak \".\nStringovi se mogu pisati i kao '...', gdje unutar ... nema znaka '.\nZapravo, \"...\"-stringovi smiju sadržavati i \", ali escape-ane znakom \\.\nDakle, \\\" označava \". \\n označava novi red. \\\\ označava \\.\nUnutar '...'-stringova \\ nema nikakvo posebno značenje.\n\"\"\"\n\n\nfrom pj import *\n\nBKSL, N1, N2, NOVIRED = '\\\\', \"'\", '\"', '\\n'\n\ndef makni(it):\n \"\"\"Miče obrnute kose crte (backslashes) iz iteratora.\"\"\"\n for znak in it:\n if znak == BKSL:\n sljedeći = next(it)\n if sljedeći == 'n': yield NOVIRED\n else: yield sljedeći\n else: yield znak\n\nclass L(enum.Enum):\n UOTV, UZATV, ZAREZ = '[],'\n class BROJ(Token):\n def vrijednost(self): return int(self.sadržaj)\n class STRING(Token):\n def vrijednost(self):\n s = self.sadržaj[1:-1]\n if self.sadržaj.startswith(N2): return ''.join(makni(iter(s)))\n else: return s\n\ndef l_lex(lista):\n lex = Tokenizer(lista)\n for znak in iter(lex.čitaj, ''):\n if znak.isspace(): lex.zanemari()\n elif znak.isdigit() and znak != '0':\n lex.zvijezda(str.isdigit)\n yield lex.token(L.BROJ)\n elif znak == N1:\n lex.pročitaj_do(N1)\n yield lex.token(L.STRING)\n elif znak == N2:\n while True:\n z = lex.čitaj()\n if not z: raise lex.greška('Nezavršeni string!')\n elif z == BKSL: lex.čitaj()\n elif z == N2:\n yield lex.token(L.STRING)\n break\n else: yield lex.literal(L)\n\n# lista -> UOTV elementi UZATV\n# elementi -> element | element ZAREZ elementi | ''\n# element -> BROJ | STRING | lista\n\nclass LParser(Parser):\n def lista(self):\n self.pročitaj(L.UOTV)\n el = self.elementi()\n self.pročitaj(L.UZATV)\n return Lista(el)\n \n def elementi(self):\n rezultat = []\n if not self >= L.UZATV:\n rezultat.append(self.element())\n while self >> L.ZAREZ: rezultat.append(self.element())\n return rezultat\n\n def element(self):\n if self >= L.UOTV: return self.lista()\n else: return self.pročitaj(L.BROJ, L.STRING)\n \n start = element\n\n\nclass Lista(AST('elementi')):\n def vrijednost(self): return [el.vrijednost() for el in self.elementi]\n\n\nif __name__ == '__main__':\n print(LParser.parsiraj(l_lex(r'''\n [23, \"ab\\\"c]\", 'a[]', [2, 3], 523,\n '\"', '\\', \"\\e\", \"\\\\\"]\n ''')).vrijednost())\n\n# DZ: sve više jezika dopušta \"zarez na kraju\" stil pisanja listi\n# (npr. [2,3,] je isto što i [2,3]) -- omogućite to!)\n# DZ: omogućite razne druge \\-escape sekvence (npr. \\u za Unicode znakove)\n# DZ: omogućite izraze umjesto literala: polimorfni + za zbrajanje/konkatenaciju\n","sub_path":"PJ/10_liste_i_stringovi.py","file_name":"10_liste_i_stringovi.py","file_ext":"py","file_size_in_byte":3048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"287672987","text":"import pygame\nimport glob2\nimport poker\n\ndef isBetween(cursor,card_pos):\n \"\"\"pos and card_pos are both tuples of type (x,y)\n Returns true if pos is between card_pos and card_pos+100\"\"\"\n return all([cursorpos>=cardpos and cursorpos<=cardpos+100 for cursorpos,cardpos in zip(cursor,card_pos)])\n\nclass Button():\n def __init__(self, color, x,y,width,height, text=''):\n self.color = color\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.text = text\n\n def draw(self,win,outline=None):\n #Call this method to draw the button on the screen\n if outline:\n pygame.draw.rect(win, outline, (self.x-2,self.y-2,self.width+4,self.height+4),0)\n\n pygame.draw.rect(win, self.color, (self.x,self.y,self.width,self.height),0)\n\n if self.text != '':\n font = pygame.font.SysFont('Garamond', 20)\n text = font.render(self.text, 1, white)\n win.blit(text, (self.x + (self.width/2 - text.get_width()/2), self.y + (self.height/2 - text.get_height()/2)))\n\n def isOver(self, pos):\n #Pos is the mouse position or a tuple of (x,y) coordinates\n if pos[0] > self.x and pos[0] < self.x + self.width:\n if pos[1] > self.y and pos[1] < self.y + self.height:\n return True\n\n return False\n\nclass Poker():\n def __init__(self):\n pygame.init()\n self.state = 'HandState'\n\n self.deck = poker.createDeck()\n self.black = (0,0,0)\n self.white = (255,255,255)\n self.green = (0,68,1)\n self.lightgray = (100,100,100)\n self.height = 600\n self.width = 1500\n self.mainWindow = pygame.display.set_mode((width,height))\n pygame.display.set_caption(\"Poker Calculator\")\n\n self.deckcards100 = []\n self.card_positions = []\n\n self.ConfirmButton = Button(black,25,490,150,60,'Confirm Hand')\n\n self.CardLimit = 0\n self.running = True\n\n self.myhand = []\n self.table_cards = []\n\n\n def GetCardDirectory(self):\n self.spadejpg = glob2.glob(\"deck/100Percent/spades/*.jpg\")\n self.heartsjpg = glob2.glob(\"deck/100Percent/hearts/*.jpg\")\n self.clubsjpg = glob2.glob(\"deck/100Percent/clubs/*.jpg\")\n self.diamondjpg = glob2.glob(\"deck/100Percent/diamonds/*.jpg\")\n\n for clubs in clubsjpg:\n self.deckcards100.append(pygame.image.load(clubs))\n for diamond in diamondjpg:\n self.deckcards100.append(pygame.image.load(diamond))\n for spades in spadejpg:\n self.deckcards100.append(pygame.image.load(spades))\n for hearts in heartsjpg:\n self.deckcards100.append(pygame.image.load(hearts))\n\n def GenerateCardPositions(self):\n x = 15 # initial x\n y = 25 # initial y\n for count in range(len(self.deckcards100)):\n if count % 13 == 0 and count != 0: # change row every 13 cards!\n y += 110\n x = 15\n self.card_positions.append((x,y))\n x += 110\n\n def getEvents(self):\n self.events = pyame.event.get()\n for event in self.events:\n #Quit Event\n if event.type == pygame.QUIT:\n self.running = False\n self.pos = pygame.mouse.get_pos()\n #Button Change Color Mechanic\n if event.type == pygame.MOUSEMOTION:\n if self.ConfirmButton.isOver(pos):\n self.ConfirmButton.color = self.lightgray\n else:\n self.ConfirmButton.color = self.black\n if event.type == pygame.MOUSEBUTTONDOWN:\n if pygame.mouse.get_pressed()[0]:\n for i in range(len(self.card_positions)):\n if isBetween(self.pos,self.card_positions[i]):\n if self.deckcards100[i].get_size() == (100,100) and self.CardLimit<2:\n self.deckcards100[i] = pygame.transform.scale(self.deckcards100[i],(80,80))\n self.myhand.append(deck[i])\n self.CardLimit +=1\n elif self.deckcards100[i].get_size() == (80,80):\n self.deckcards100[i] = pygame.transform.scale(self.deckcards100[i],(100,100))\n self.myhand.remove(deck[i])\n self.CardLimit -= 1\n def mainLoop(self):\n while self.running:\n count = 0\n for img in self.deckcards100:\n self.mainWindow.blit(img,card_positions[count])\n count += 1\n self.ConfirmButton.draw(self.mainWindow)\n\n\n\npygame.init()\n\ndeck = poker.createDeck()\n\n#Set Colors\nblack = (0,0,0)\nwhite = (255,255,255)\ngreen = (0,68,1)\nlightgray = (100,100,100)\n#Window dimensions\nheight = 600\nwidth = 1500\n\n# deselected_scale = 1.25\n# selected_scale = 0.8\n\ndeckcards100 =[]\n\nmainWindow = pygame.display.set_mode((width,height))\npygame.display.set_caption(\"Poker Calculator\")\n\n#Get directory for deck\nspadejpg = glob2.glob(\"deck/100Percent/spades/*.jpg\")\nheartsjpg = glob2.glob(\"deck/100Percent/hearts/*.jpg\")\nclubsjpg = glob2.glob(\"deck/100Percent/clubs/*.jpg\")\ndiamondjpg = glob2.glob(\"deck/100Percent/diamonds/*.jpg\")\n\n\n#get Dire\n\n#Load pygame.image into a deckcard List\n\nfor clubs in clubsjpg:\n deckcards100.append(pygame.image.load(clubs))\nfor diamond in diamondjpg:\n deckcards100.append(pygame.image.load(diamond))\nfor spades in spadejpg:\n deckcards100.append(pygame.image.load(spades))\nfor hearts in heartsjpg:\n deckcards100.append(pygame.image.load(hearts))\n\n\ncard_positions = [] # We will save the card positions here\n\n# Generate Card Positions and Save them\nx = 15 #initial x\ny = 25 # initial y\nfor count in range(len(deckcards100)):\n if count % 13 == 0 and count != 0: # change row every 13 cards!\n y += 110\n x = 15\n card_positions.append((x,y))\n x += 110\n\nConfirmButton = Button(black,25,490,150,60,'Confirm Hand')\n\nrunning = True\ncounter = 0\n#initialize lists\nmyhand = []\nflop = []\n\nwhile running:\n mainWindow.fill(green)\n\n\n #Add cards in mainWindow\n count = 0\n for img in deckcards100:\n mainWindow.blit(img,card_positions[count])\n count += 1\n\n for event in pygame.event.get():\n #Quit Event\n if event.type == pygame.QUIT:\n running = False\n pos = pygame.mouse.get_pos()\n\n\n #Button Change Color Mechanic\n if event.type == pygame.MOUSEMOTION:\n if ConfirmButton.isOver(pos):\n ConfirmButton.color = lightgray\n else:\n ConfirmButton.color = black\n #Click on Card Mechanic\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n\n if pygame.mouse.get_pressed()[0]:\n for i in range(len(card_positions)):\n if isBetween(pos,card_positions[i]):\n if deckcards100[i].get_size() == (100,100) and counter<2:\n deckcards100[i] = pygame.transform.scale(deckcards100[i],(80,80))\n myhand.append(deck[i])\n counter +=1\n elif deckcards100[i].get_size() == (80,80):\n deckcards100[i] = pygame.transform.scale(deckcards100[i],(100,100))\n myhand.remove(deck[i])\n counter -= 1\n if ConfirmButton.isOver(pos) and pygame.mouse.get_pressed()[0]:\n poker.removeCardsfromDeck(myhand,deck)\n\n ConfirmButton.draw(mainWindow)\n pygame.display.flip()\n","sub_path":"legacy/pygamewindow_withclasses.py","file_name":"pygamewindow_withclasses.py","file_ext":"py","file_size_in_byte":7668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"529599435","text":"from flask import jsonify, request\nfrom flask import Flask\nimport pickle\n\n\nwith open('dv.bin', 'rb') as dv_file:\n dv = pickle.load(dv_file)\nwith open('model1.bin', 'rb') as model_file:\n model = pickle.load(model_file)\n\n\ncustomer = {\n \"contract\": \"two_year\",\n \"tenure\": 12,\n \"monthlycharges\": 19.7\n}\n\napp = Flask('churn')\n\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n customer = request.get_json()\n\n X = dv.transform([customer])\n y_pred = model.predict_proba(X)[0, 1]\n\n result = {\n \"churn_probability\": round(float(y_pred), 3)\n }\n\n return jsonify(result)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True, host='0.0.0.0', port=9697)\n","sub_path":"week_5/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"485166949","text":"import collections\n\nclass Solution:\n\n # use monotonic descreasing deque\n def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:\n if not nums or k is None or k < 1:\n return []\n n, ans = len(nums), []\n left = 0\n dq = collections.deque()\n for right in range(n):\n self.push(dq, nums, right)\n length = right - left + 1\n if length > k:\n if dq[0] == left:\n dq.popleft()\n left += 1\n length -= 1\n if length == k:\n ans.append(nums[dq[0]])\n return ans\n\n def push(self, dq, nums, i):\n while dq and nums[i] > nums[dq[-1]]:\n dq.pop()\n dq.append(i)\n","sub_path":"Two pointers/239. Sliding Window Maximum.py","file_name":"239. Sliding Window Maximum.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"370385127","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTencent is pleased to support the open source community by making 蓝鲸智云 - 监控平台 (BlueKing - Monitor) available.\nCopyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License.\nYou may obtain a copy of the License at http://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\n\n\nimport json\nfrom copy import copy\n\nfrom django.utils import six\nfrom rest_framework.renderers import BaseRenderer\n\nfrom bkmonitor.utils.common_utils import DatetimeEncoder\n\n\ndef is_status_code_ok(code):\n return 200 <= code < 300\n\n\nclass UJSONRenderer(BaseRenderer):\n \"\"\"\n Renderer which serializes to JSON.\n Applies JSON's backslash-u character escaping for non-ascii characters.\n Uses the blazing-fast ujson library for serialization.\n \"\"\"\n\n media_type = \"application/json\"\n format = \"json\"\n ensure_ascii = True\n charset = None\n\n def render(self, data, *args, **kwargs):\n\n if data is None:\n return bytes()\n\n ret = json.dumps(data, ensure_ascii=self.ensure_ascii, cls=DatetimeEncoder)\n\n # force return value to unicode\n if isinstance(ret, six.text_type):\n return bytes(ret.encode(\"utf-8\"))\n return ret\n\n\nclass MonitorJSONRenderer(UJSONRenderer):\n def render(self, data, accepted_media_type=None, renderer_context=None):\n\n if hasattr(self, \"rendered_content\"):\n return self.rendered_content\n response = renderer_context[\"response\"]\n\n formatted_data = {\n \"result\": is_status_code_ok(response.status_code),\n \"code\": response.status_code,\n \"message\": \"OK\",\n }\n\n if formatted_data[\"result\"]:\n if isinstance(data, dict) and \"data\" in data and \"result\" in data:\n # 如果是字典类型且字典中已经存在键名为'data'的键\n # 说明已经处理过\n formatted_data = data\n\n else:\n if isinstance(data, dict):\n if \"results\" in data:\n origin_data = copy(data)\n data = origin_data.pop(\"results\")\n meta = origin_data\n formatted_data.update(\n {\n \"data\": data,\n \"_meta\": meta,\n }\n )\n\n formatted_data.update(\n {\n \"data\": data,\n }\n )\n else:\n formatted_data = data\n\n return super(MonitorJSONRenderer, self).render(formatted_data, accepted_media_type, renderer_context)\n","sub_path":"packages/monitor_api/renderers.py","file_name":"renderers.py","file_ext":"py","file_size_in_byte":3115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"327870929","text":"#!/usr/local/bin/python3\n# -*- coding: utf-8 -*- \nimport requests\nimport pymysql\nimport time\nimport sys\nimport re\nimport os\nfrom parsel import Selector\n\n\nclass douyin:\n def __init__(self):\n self.header = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Encoding': '',\n 'Accept-Language': 'zh-CN,zh;q=0.8',\n 'Cache-Control': 'max-age=0',\n 'Connection': 'keep-alive',\n 'Host': 'www.douyin.com',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36',\n }\n self.uids = {'75382141971':'LL', '9330557830':'WD', '101019751610':'HNN'}\n self.db = pymysql.connect(\"localhost\",\"yaccai\",\"go\",\"daily\" )\n self.home = os.environ['HOME']\n\n def __del__(self):\n self.db.close()\n\n def jiexi(self, lists):\n pat = {\n u\"\\ue60d\": 0,\n u\"\\ue603\": 0,\n u\"\\ue616\": 0,\n u\"\\ue60e\": 1,\n u\"\\ue618\": 1,\n u\"\\ue602\": 1,\n u\"\\ue605\": 2,\n u\"\\ue610\": 2,\n u\"\\ue617\": 2,\n u\"\\ue611\": 3,\n u\"\\ue604\": 3,\n u\"\\ue61a\": 3,\n u\"\\ue606\": 4,\n u\"\\ue619\": 4,\n u\"\\ue60c\": 4,\n u\"\\ue60f\": 5,\n u\"\\ue607\": 5,\n u\"\\ue61b\": 5,\n u\"\\ue61f\": 6,\n u\"\\ue612\": 6,\n u\"\\ue608\": 6,\n u\"\\ue61c\": 7,\n u\"\\ue60a\": 7,\n u\"\\ue613\": 7,\n u\"\\ue60b\": 8,\n u\"\\ue61d\": 8,\n u\"\\ue614\": 8,\n u\"\\ue615\": 9,\n u\"\\ue61e\": 9,\n u\"\\ue609\": 9,\n \"w\": \"w\",\n \".\": \".\" }\n _li = list()\n for i in lists:\n if str(i).strip():\n i = i.replace(u'', \"\").strip()\n i = i.replace(u'', \"\").strip()\n i = i.replace(u'', \"\").strip()\n i = pat.get(i, i)\n _li.append(str(i))\n return \"\".join(_li)\n\n\n def fetch(self, url):\n try:\n html = requests.get(url, headers = self.header).text\n except Exception as e:\n print('error:')\n print(e)\n html = None\n return html\n\n\n def spider(self, uid):\n html = self.fetch(\"https://www.douyin.com/share/user/%s\" % uid)\n xbody = Selector(text = html)\n stmp = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n name = self.uids[uid]\n\n douyinID = xbody.xpath(\"//p[@class='shortid']\").extract_first()\n douyinID = re.findall(r'>([\\s\\S]+?)<', douyinID)\n douyinID = self.jiexi(douyinID).replace(u\"抖音ID:\", '').strip()\n # print('ID ', douyinID)\n\n douyinSID = uid\n\n nickname = xbody.xpath(\"//p[@class='nickname']/text()\").extract_first()\n # print('昵称', nickname)\n\n works = xbody.xpath(\"//div[@class='user-tab active tab get-list']/span\").extract_first()\n works = re.findall(r'>([\\s\\S]+?)<', works)\n works = int(self.jiexi(works).strip())\n # print('作品', works)\n\n like = xbody.xpath(\"//div[@class='like-tab tab get-list']/span\").extract_first()\n like = re.findall(r'>([\\s\\S]+?)<', like)\n like = int(self.jiexi(like).strip())\n # print('喜欢', like)\n\n follow = xbody.xpath(\"//span[contains(@class,'focus block')]/span[@class='num']\").extract_first()\n follow = re.findall(r'>([\\s\\S]+?)<', follow)\n follow = int(self.jiexi(follow))\n # print('关注', follow)\n\n fans = xbody.xpath(\"//span[contains(@class,'follower block')]/span[@class='num']\").extract_first()\n fans = re.findall(r'>([\\s\\S]+?)<', fans)\n fans = int(self.jiexi(fans))\n # print('粉丝', fans)\n\n liked = xbody.xpath(\"//span[contains(@class,'liked-num block')]/span[@class='num']\").extract_first()\n liked = re.findall(r'>([\\s\\S]+?)<', liked)\n liked = int(self.jiexi(liked))\n # print('获赞', liked)\n\n sql_search = \"select stmp, name, works, `like`, follow from douyin where name = '%s' order by stmp DESC limit 1\" % name\n cursor = self.db.cursor()\n cursor.execute(sql_search)\n predata = cursor.fetchone() # 前面的数据\n diff = ''\n flag = 0\n if predata is not None:\n if predata[2] != works:\n diff += ('%-6s: %5d ==> %-5d\\n' % ('works', predata[2], works))\n flag |= 0b001\n if predata[3] != like:\n diff += ('%-6s: %5d ==> %-5d\\n' % ('like', predata[3], like))\n flag |= 0b010\n if predata[4] != follow:\n diff += ('%-6s: %5d ==> %-5d\\n' % ('follow', predata[4], follow))\n flag |= 0b100\n if diff != '':\n fname = time.strftime(name + \".%m-%d_%H-%M.txt\", time.localtime())\n fpath = os.path.join(self.home, 'Desktop', fname)\n with open(fpath,'w') as f:\n f.write(diff)\n # os.system('say , do check ' + name)\n sql_insert = \"insert into douyin values(DEFAULT, '%s', '%s', '%s', '%s', '%d', '%d', '%d', '%d', '%d', '%d', '%s')\" % (stmp, name, douyinID, douyinSID, works, like, follow, fans, liked, flag, nickname)\n cursor.execute(sql_insert)\n self.db.commit()\n\n\n def start(self):\n for uid in self.uids:\n self.spider(uid)\n \n\nif __name__ == '__main__':\n douyin().start()","sub_path":"Users/yaccai/iconfig/launch/check/douyin.py","file_name":"douyin.py","file_ext":"py","file_size_in_byte":5595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"24860066","text":"'''\nExecution:\nReal --> dispel4py simple dispel4py_RA.pgm_story.py -d '{\"streamProducer\": [ {\"input\": \"IV.MA9..HHR.START.OTLOC.SAC.20.50.real\"} ] }'\nSynth --> dispel4py simple dispel4py_RA.pgm_story.py -d '{\"streamProducer\": [ {\"input\": \"IV.MA9.HXR.semv.sac.20.50.synt\"} ] }'\n\n\nComparison:\ndispel4py simple dispel4py_RA.pgm_story.py -d '{\"streamProducerReal\": [ {\"input\": \"IV.MA9..HHR.START.OTLOC.SAC.20.50.real\"} ], \"streamProducerSynth\": [ {\"input\": \"IV.MA9.HXR.semv.sac.20.50.synt\"} ] }'\n\nOutput:\nWriteStream3: output_data is {'GroundMotion': {'stream': 'IV.MA9..HHR.START.OTLOC.SAC.20.50.real', 'ty': 'velocity', 'p_norm': 'max', 'pgd': '0.0006945877', 'pgv': '0.0002320527', 'pga': '0.00013708159', 'dmp_spec_acc': '0.00032428280150622804'}}\n\n'''\n\nfrom dispel4py.core import GenericPE\nfrom dispel4py.base import BasePE, IterativePE, ConsumerPE, create_iterative_chain\nfrom dispel4py.workflow_graph import WorkflowGraph\n\nfrom obspy.core.stream import read\nfrom obspy.signal.invsim import corn_freq_2_paz, simulate_seismometer\nfrom obspy.signal import differentiate_and_integrate as di\n\nimport math\nimport numpy as np\nimport os\nimport json\nfrom collections import defaultdict\n\ndef calculate_norm(stream):\n station = stream[0].stats.station\n channels = set()\n for tr in stream:\n if station == tr.stats.station:\n channels.add(tr.stats.channel[-1])\n else:\n return None\n\n data_mean = None\n data_max = None\n if channels < set(['R','T']) or channels < set(['N','E']):\n\n if len(stream) == 1:\n return stream[0].data.copy(), stream[0].data.copy(), None\n\n for tr in stream:\n d = tr.data.copy()\n if data_mean is None:\n data_mean = np.square(d)\n data_max = np.abs(d)\n else:\n data_mean = data + np.square(d)\n data_max = data + np.abs(d)\n\n data_mean = np.sqrt(data)\n data_max = np.max(data)\n\n return data_mean, data_max, d\n\ndef calculate_pgm(data, ty, delta):\n pgm = max(abs(data))\n if ty == 'velocity':\n pgv = pgm\n int_data = di.integrate_cumtrapz(data, delta)\n pgd = max(abs(int_data))\n grad_data = np.gradient(data, delta)\n pga = max(abs(grad_data))\n elif ty == 'displacement':\n pgd = pgm\n grad_data = np.gradient(data, delta)\n pgv = max(abs(grad_data))\n grad2_data = np.gradient(grad_data, delta)\n pga = max(abs(grad2_data))\n elif ty == 'acceleration':\n pga = pgm\n int_data = di.integrate_cumtrapz(data, delta)\n pgv = max(abs(int_data))\n int2_data = di.integrate_cumtrapz(int_data, delta)\n pgd = max(abs(int2_data))\n return pgd, pgv, pga\n\ndef calculate_damped_spectral_acc(data,delta,freq,damp,ty):\n\n samp_rate = 1.0 / delta\n t = freq * 1.0\n d = damp\n omega = (2 * math.pi * t) ** 2\n\n paz_sa = corn_freq_2_paz(t, damp=d)\n paz_sa['sensitivity'] = omega\n paz_sa['zeros'] = []\n\n if ty == 'displacement':\n data = np.gradient(data, delta)\n data = np.gradient(data, delta)\n elif ty == 'velocity':\n data = np.gradient(data, delta)\n\n data = simulate_seismometer(data, samp_rate, paz_remove=None,\n paz_simulate=paz_sa, taper=True,\n simulate_sensitivity=True, taper_fraction=0.05)\n dmp_spec_acc = max(abs(data))\n\n return dmp_spec_acc\n\n\nclass StreamProducer(IterativePE):\n\n def __init__(self, label):\n IterativePE.__init__(self)\n self.label = label\n\n def _process(self, input):\n filename = '{}/{}'.format(os.environ['STAGED_DATA'], input)\n self.write('output', [read(filename), self.label])\n\n\nclass NormPE(GenericPE):\n def __init__(self):\n GenericPE.__init__(self)\n self._add_input(\"input\")\n self._add_output(\"output_mean\")\n self._add_output(\"output_max\")\n\n def _process(self, data):\n stream, filename = data['input']\n data_mean, data_max, d = calculate_norm(stream)\n self.write('output_mean', [stream, filename, data_mean, 'mean'])\n self.write('output_max', [stream, filename, data_max, 'max'])\n\n\nclass PeakGroundMotion(IterativePE):\n def __init__(self,ty,freq=(0.3, 1.0, 3.0),damp=0.1):\n IterativePE.__init__(self)\n self.ty=ty\n self.frequencies = freq\n self.damp = damp\n\n def _process(self, s_data):\n stream, filename, data, p_norm = s_data\n delta = stream[0].stats.delta\n pgd, pgv, pga = calculate_pgm(data, self.ty, delta)\n dmp_spec_acc = {}\n for freq in self.frequencies:\n dmp = calculate_damped_spectral_acc(data, delta, freq, self.damp, self.ty)\n dmp_spec_acc['PSA_{}Hz'.format(freq)] = dmp.item()\n\n results = {\n 'PGD': pgd.item(),\n 'PGV': pgv.item(),\n 'PGA': pga.item(),\n 'p_norm': p_norm\n }\n results.update(dmp_spec_acc)\n self.write('output', [\n stream[0].stats.station,\n filename, stream, self.ty, results]\n )\n\n\nclass Match(GenericPE):\n def __init__(self):\n GenericPE.__init__(self)\n self._add_input('input', grouping=[0])\n self._add_output('output')\n self.store = defaultdict(lambda: {})\n\n def _process(self, data):\n station, label,stream, ty, pgm = data['input']\n p_norm = pgm['p_norm']\n self.store[(station, p_norm)][label] = stream, ty, pgm\n if len(self.store[(station, p_norm)]) >= 2:\n print('output: {} {}'.format(station, p_norm))\n self.write('output', [station, p_norm, self.store[(station, p_norm)]])\n del self.store[station, p_norm]\n\n\ndef comp(real_param, synt_param):\n result_diff = real_param - synt_param\n result_rel_diff = (real_param - synt_param)/real_param\n return result_diff, result_rel_diff\n\n\nclass WriteGeoJSON(ConsumerPE):\n def __init__(self):\n ConsumerPE.__init__(self)\n\n def _process(self, data):\n station, p_norm, matching_data = data\n\n difference = { }\n relative_difference = {}\n stream_r, ty_r, pgm_r = matching_data['real']\n stream_s, ty_s, pgm_s = matching_data['synth']\n try:\n sac = stream_r[0].stats.sac\n coordinates = [sac.stla.item(), sac.stlo.item()]\n except:\n coordinates = []\n for param in pgm_r:\n if param == 'p_norm':\n continue\n diff, rel_diff = comp(pgm_r[param], pgm_s[param])\n difference[param] = diff\n relative_difference[param] = rel_diff\n\n output_dir = os.environ['OUTPUT']\n if not os.path.exists(output_dir):\n try:\n os.makedirs(output_dir)\n except:\n pass\n output_data={\n \"type\": \"Feature\",\n \"properties\": {\n \"station\": station,\n \"data\": pgm_r,\n \"synt\": pgm_s,\n \"difference\": difference,\n \"relative_difference\": relative_difference,\n \"geometry\": {\n \"type\": \"Point\",\n \"coordinates\": coordinates\n }\n }\n }\n # self.log(\"output_data is %s\" % json.dumps(output_data))\n filename = \"{}/{}_{}.json\".format(output_dir, station, p_norm)\n with open(filename, 'w') as outfile:\n json.dump(output_data, outfile)\n\n\nstreamProducerReal=StreamProducer('real')\nstreamProducerReal.name=\"streamProducerReal\"\nstreamProducerSynth=StreamProducer('synth')\nstreamProducerSynth.name='streamProducerSynth'\nnorm=NormPE()\npgm_mean=PeakGroundMotion('velocity')\npgm_max=PeakGroundMotion('velocity')\nmatch = Match()\nwrite_stream = WriteGeoJSON()\n\n\ngraph = WorkflowGraph()\ngraph.connect(streamProducerReal, 'output', norm,'input')\ngraph.connect(streamProducerSynth, 'output', norm,'input')\ngraph.connect(norm, 'output_mean', pgm_mean,'input')\ngraph.connect(norm, 'output_max', pgm_max,'input')\ngraph.connect(pgm_max, 'output', match, 'input')\ngraph.connect(pgm_mean, 'output', match, 'input')\ngraph.connect(match,'output',write_stream,'input')\n","sub_path":"processing_elements/CWL_total_staged/dispel4py_RA.pgm_story.py","file_name":"dispel4py_RA.pgm_story.py","file_ext":"py","file_size_in_byte":8198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"109790470","text":"import pygame\npygame.init()\n#创建窗口\nscreen=pygame.display.set_mode((480,700))\n#加载背景图像\nbg=pygame.image.load(\"./images/background.png\")\nscreen.blit(bg,(0,0))\npygame.display.update()\n#加载飞机图像\nplane=pygame.image.load(\"./images/me1.png\")\nscreen.blit(plane,(185,500))\npygame.display.update()\n#创建时钟对象\nclock=pygame.time.Clock()\n#定义rect记录飞机初始位置\nplane_rect=pygame.Rect(185,500,102,126)\n#游戏循环\nwhile True:\n #游戏循环内部执行频率\n clock.tick(60)\n #修改飞机位置\n plane_rect.y -= 1\n if plane_rect.y+plane_rect.height<=0:\n plane_rect.y=700\n #绘制修改后图像\n screen.blit(bg,(0,0))\n screen.blit(plane,plane_rect)\n #更新显示\n pygame.display.update()\npygame.quit()","sub_path":"text/planegame_04_飞机循环.py","file_name":"planegame_04_飞机循环.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"575236298","text":"#!/usr/bin/env python3\n\nclass Solution:\n def uniqueOccurrences(self, arr) -> bool:\n from collections import Counter\n v = Counter(arr).values()\n return len(v) == len(set(v))\n \n\ns = Solution()\ndef test(arr, expected):\n assert s.uniqueOccurrences(arr) == expected\n\ntest([1,2,2,1,1,3], True)\ntest([1], True)\ntest([1,2], False)\ntest([-3,0,1,-3,1,1,1,-3,10,0], True)","sub_path":"1207_unique_number_of_occurrences.py","file_name":"1207_unique_number_of_occurrences.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"327630202","text":"from tree import TypeTree, volpe_assert, get_obj_key_value\nfrom volpe_types import is_int, is_flt, is_char\n\n\ndef math(self, tree: TypeTree):\n values = self.visit_children(tree)\n if is_int(tree.return_type):\n return getattr(self, tree.data + \"_int\")(values)\n if is_char(tree.return_type):\n # Use unsigned division and modulus for chars\n if tree.data in [\"div\", \"mod\"]: \n return getattr(self, tree.data + \"_uint\")(values)\n return getattr(self, tree.data + \"_int\")(values)\n if is_flt(tree.return_type):\n return getattr(self, tree.data + \"_flt\")(values)\n assert False, \"can't happen\"\n\n\ndef comp(self, tree: TypeTree):\n values = self.visit_children(tree)\n if is_int(tree.children[0].return_type) or is_char(tree.children[0].return_type):\n return getattr(self, tree.data + \"_int\")(values)\n if is_flt(tree.children[0].return_type):\n return getattr(self, tree.data + \"_flt\")(values)\n assert False, \"can't happen\"\n\n\ndef assign(self, tree: TypeTree, value):\n if tree.data == \"object\":\n for i, child in enumerate(tree.children):\n key, attribute = get_obj_key_value(child, i)\n index = list(value.type.type_dict.keys()).index(key)\n assign(self, attribute, self.builder.extract_value(value, index))\n\n elif tree.data == \"attribute\":\n obj = tree.children[0].return_type\n index = list(obj.type_dict.keys()).index(tree.children[1])\n value = self.builder.insert_value(self.visit(tree.children[0]), value, index)\n # update scope\n assign(self, tree.children[0], value)\n\n elif tree.data == \"array\":\n for i, child in enumerate(tree.children):\n assign(self, child, self.builder.extract_element(value, i))\n\n elif tree.data == \"array_index\":\n array, index = self.visit_children(tree)\n new_array = self.builder.insert_element(self.visit(tree.children[0]), value, index)\n assign(self, tree.children[0], new_array)\n\n else:\n volpe_assert(tree.data == \"symbol\", f\"cannot assign to {tree.data}\", tree)\n name = tree.children[0].value\n self.local_scope[name] = value\n","sub_path":"volpe/builder_utils.py","file_name":"builder_utils.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"100647855","text":"# -*- coding: utf-8 -*-\n\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.datasets import load_iris\nfrom sklearn_porter import Porter\n\n\niris_data = load_iris()\nX = iris_data.data\ny = iris_data.target\n\nclf = KNeighborsClassifier(algorithm='brute',\n n_neighbors=3,\n weights='uniform')\nclf.fit(X, y)\n\nporter = Porter(clf, language='js')\noutput = porter.export(export_data=True)\nprint(output)\n\n\"\"\"\nif (typeof XMLHttpRequest === 'undefined') {\n var XMLHttpRequest = require(\"xmlhttprequest\").XMLHttpRequest;\n}\n\nvar KNeighborsClassifier = function(jsonFile) {\n this.data = undefined;\n\n var Neighbor = function(y, dist) {\n this.y = y;\n this.dist = dist;\n };\n\n var promise = new Promise(function(resolve, reject) {\n var httpRequest = new XMLHttpRequest();\n httpRequest.onreadystatechange = function() {\n if (httpRequest.readyState === 4) {\n if (httpRequest.status === 200) {\n resolve(JSON.parse(httpRequest.responseText));\n } else {\n reject(new Error(httpRequest.statusText));\n }\n }\n };\n httpRequest.open('GET', jsonFile, true);\n httpRequest.send();\n });\n\n var compute = function(temp, cand, q) {\n var dist = 0.,\n diff;\n for (var i = 0, l = temp.length; i < l; i++) {\n \t diff = Math.abs(temp[i] - cand[i]);\n \t if (q==1) {\n \t dist += diff;\n \t } else if (q==2) {\n \t dist += diff*diff;\n \t } else if (q==Number.POSITIVE_INFINITY) {\n \t if (diff > dist) {\n \t dist = diff;\n \t }\n \t } else {\n \t dist += Math.pow(diff, q);\n }\n }\n if (q==1 || q==Number.POSITIVE_INFINITY) {\n return dist;\n } else if (q==2) {\n return Math.sqrt(dist);\n } else {\n return Math.pow(dist, 1. / q);\n }\n };\n\n this.predict = function(features) {\n return new Promise(function(resolve, reject) {\n promise.then(function(data) {\n if (typeof this.data === 'undefined') {\n this.data = data;\n this.nTemplates = this.data.X.length;\n }\n var classIdx = 0, i, dist;\n if (this.data.nNeighbors == 1) {\n var minDist = Number.POSITIVE_INFINITY;\n for (i = 0; i < this.data.nTemplates; i++) {\n dist = compute(this.data.X[i], features, this.data.power);\n if (dist <= minDist) {\n minDist = dist;\n classIdx = this.data.y[i];\n }\n }\n } else {\n var classes = new Array(this.data.nClasses).fill(0);\n var dists = [];\n for (i = 0; i < this.nTemplates; i++) {\n dist = compute(this.data.X[i], features, this.data.power);\n dists.push(new Neighbor(this.data.y[i], dist));\n }\n dists.sort(function compare(n1, n2) {\n return (n1.dist < n2.dist) ? -1 : 1;\n });\n for (i = 0; i < this.data.kNeighbors; i++) {\n classes[dists[i].y]++;\n }\n for (i = 0; i < this.data.nClasses; i++) {\n classIdx = classes[i] > classes[classIdx] ? i : classIdx;\n }\n }\n resolve(classIdx);\n }, function(error) {\n reject(error);\n });\n });\n };\n\n};\n\nif (typeof process !== 'undefined' && typeof process.argv !== 'undefined') {\n if (process.argv[2].trim().endsWith('.json')) {\n\n // Features:\n var features = process.argv.slice(3);\n\n // Parameters:\n var json = process.argv[2];\n\n // Estimator:\n var clf = new KNeighborsClassifier(json);\n\n // Prediction:\n clf.predict(features).then(function(prediction) {\n console.log(prediction);\n });\n\n }\n}\n\"\"\"\n","sub_path":"examples/estimator/classifier/KNeighborsClassifier/js/basics_imported.py","file_name":"basics_imported.py","file_ext":"py","file_size_in_byte":4254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"614995519","text":"# -*- coding: utf-8 -*-\n# Soohwan Kim @sooftware\n# This source code is licensed under the Apache 2.0 License license found in the\n# LICENSE file in the root directory of this source tree\n\nimport torch\nimport numpy as np\nfrom tacotron2.model.tacotron2 import Tacotron2\nfrom .args import DefaultArgument\n\nbatch_size = 3\nseq_length = 3\n\ninputs = torch.LongTensor(np.arange(batch_size * seq_length).reshape(batch_size, seq_length))\ninput_lengths = torch.LongTensor([3, 3, 2])\ntargets = torch.FloatTensor(batch_size, 100, 80).uniform_(-0.1, 0.1)\n\nargs = DefaultArgument()\nmodel = Tacotron2(args)\noutput = model(inputs, targets, input_lengths)\n\nprint(model)\nprint(output)\n","sub_path":"test/test_tacotron2.py","file_name":"test_tacotron2.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"133077646","text":"import cv2\n\ndef find(img,x,y):\n\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\n dst = cv2.inRange(gray,x,y)\n\n cv2.imshow(\"2\",dst)\n\n cnt = cv2.findContours(dst,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2]\n\n c = max(cnt,key = cv2.contourArea)\n\n ((x,y),radius) = cv2.minEnclosingCircle(c)\n\n M = cv2.moments(c)\n print(M)\n center = (int(M[\"m10\"]/(0.01+M[\"m00\"])), int(M[\"m01\"]/(0.01+M[\"m00\"]))) \n ptx=int(M[\"m10\"]/(0.01+M[\"m00\"]))\n pty=int(M[\"m01\"]/(0.01+M[\"m00\"]))\n\n cv2.circle(img,center,20,(255,0,0),3)\n\n cv2.imshow(\"after\",img)\n\n cv2.waitKey(0)\n\n\n\nimg = cv2.imread(\"t.jpeg\")\n\nfind(img,200,255)\n","sub_path":"Python/task1-3/c3/Find_C.py","file_name":"Find_C.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"15899671","text":"import numpy as np\nfrom scipy.sparse import csr_matrix\nimport sys\nfrom sklearn.datasets import load_svmlight_file\nimport random\nfrom datetime import datetime\nimport math\n\ndef main():\n\n\t# Get training file name from the command line\n\ttraindatafile = sys.argv[1];\n\t# For how many iterations do we wish to execute SCD?\n\tn_iter = int(sys.argv[2]);\n\t# After how many iterations do we want to timestamp?\n\tspacing = int(sys.argv[3]);\n\t\n\t# The training file is in libSVM format\n\ttr_data = load_svmlight_file(traindatafile);\n\n\tXtr = tr_data[0]; # Training features in sparse format\n\tYtr = tr_data[1]; # Training labels\n\t\n\t# We have n data points each in d-dimensions\n\tn, d = Xtr.get_shape();\n\t\n\t# The labels are named 1 and 2 in the data set. Convert them to our standard -1 and 1 labels\n\tYtr = 2*(Ytr - 1.5);\n\tYtr = Ytr.astype(int);\n\t#convert Ytr into csr matrix for calculatioins\n\tYtr=csr_matrix(Ytr).T;\n\t\n\t# Optional: densify the features matrix.\n\t# Warning: will slow down computations\n\tX = Xtr.toarray();\n\t\n\t# Initialize model\n\t# For dual SCD, you will need to maintain d_alpha and w\n\t# Note: if you have densified the Xt matrix then you can initialize w as a NumPy array\n\tw = csr_matrix((1, d));\n\td_alpha = np.zeros((n,));\n\t\n\t# We will take a timestamp after every \"spacing\" iterations\n\ttime_elapsed = np.zeros(math.ceil(n_iter/spacing));\n\ttick_vals = np.zeros(math.ceil(n_iter/spacing));\n\tobj_val = np.zeros(math.ceil(n_iter/spacing));\n\t\n\ttick = 0;\n\t\n\tttot = 0.0;\n\tt_start = datetime.now();\n\t\n\tfor t in range(n_iter):\t\t\n\t\t### Doing dual SCD ###\n\t\t\n\t\t# Choose a random coordinate from 0 to n-1\n\t\ti_rand = random.randint(0,n-1);\n\t\t\n\t\t# Store the old and compute the new value of alpha along that coordinate\n\t\td_alpha_old = d_alpha[i_rand];\n\t\tQ=X[i_rand].T.dot(X[i_rand]);\n\t\t#print(Xtr[i_rand].shape)\n\t\t#print(w.shape)\n\t\tA=w*Xtr[i_rand].T\n\t\tDelta=A*(Ytr[i_rand]).toarray()-1;\n\t\td_alpha[i_rand] = min(max(d_alpha_old-Delta/Q,0),1);\n\t\t\n\t\t# Update the model - takes only O(d) time!\n\t\tw = w + (d_alpha[i_rand] - d_alpha_old)*Ytr[i_rand]*Xtr.getrow(i_rand);\n\t\t\n\t\t# Take a snapshot after every few iterations\n\t\t# Take snapshots after every spacing = 5000 or so SCD iterations since they are fast\n\t\t# if t%spacing == 0:\n\t\t# \t# Stop the timer - we want to take a snapshot\n\t\t# \tt_now = datetime.now();\n\t\t# \tdelta = t_now - t_start;\n\t\t# \ttime_elapsed[tick] = ttot + delta.total_seconds();\n\t\t# \tttot = time_elapsed[tick];\n\t\t# \ttick_vals[tick] = tick;\n\t\t# \tip=Xtr.dot(w.T);\n\t\t# \thinge=1-(Ytr.multiply(ip)).toarray();\n\t\t# \tgrmlt=np.sign(hinge);\n\t\t# \thinge=(hinge+hinge*grmlt)/2;\n\t\t# \tobj_val[tick] = 0.5* w.dot(w.T) + hinge.sum(axis=0); # Calculate the objective value f(w) for the current model w^t or the current averaged model \\bar{w}^t\n\t\t# \tprint(delta.total_seconds(),obj_val[tick]);\n\t\t# \ttick = tick+1;\n\t\t# \t# Start the timer again - training time!\n\t\t# \tt_start = datetime.now();\n\t\t\t\n\tw_final = w.toarray();\n\tnp.save(\"model_SCD.npy\", w_final);\n\t# np.save(\"obj_val_SCD.npy\",obj_val);\n\t# np.save(\"time_elapsed_SCD.npy\",time_elapsed);\n\t\t\nif __name__ == '__main__':\n main()","sub_path":"CS771/Assignments/assn2/solver_SCD.py","file_name":"solver_SCD.py","file_ext":"py","file_size_in_byte":3060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"242430318","text":"class Solution(object):\n def subarraySum(self, nums, k):\n count = 0\n sums = 0\n d = dict()\n d[0] = 1\n\n for i in range(len(nums)):\n sums += nums[i]\n count += d.get(sums - k, 0)\n d[sums] = d.get(sums, 0) + 1\n\n return (count)\nif __name__ == '__main__':\n f = Solution()\n nums = [-1,-1,1]\n k = 0\n print(f.subarraySum(nums,k))","sub_path":"FB/subarrayEqK.py","file_name":"subarrayEqK.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"591020224","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport pandas as pd\n\n# create a synthetic dataset and save to data.csv\nif __name__ == '__main__':\n # create a linear series y= 10 + 0.5 * x plus random gaussian noise\n n = 100\n x = np.random.uniform(0, 5, n)\n y = 10 + 0.5 * x + np.random.normal(0, 0.2, n)\n\n # make outliers\n mean = np.mean(y)\n sd = np.std(y)\n cutoff = [2.4, 2.9, 3.4]\n for i in range(len(cutoff)):\n y[i * 2] = mean + cutoff[i] * sd\n y[i * 2 + 1] = mean - cutoff[i] * sd\n\n # save file\n df = pd.DataFrame(np.column_stack((x, y)), columns=['x', 'y'])\n df.to_csv('data.csv', index=False)\n","sub_path":"example/simple/gen_data.py","file_name":"gen_data.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"304470490","text":"#! C:/Bat/python.bat -3.10\nimport sys\nimport os\n\n\ndef getdirs(repertoire):\n dirs = []\n files = []\n try:\n for fichier in os.scandir(repertoire):\n if fichier.is_dir():\n if os.path.exists(os.path.join(repertoire, fichier.name, \"__init__.py\")):\n dirs.append(fichier.name)\n\n elif fichier.name.endswith(\".py\"):\n files.append(fichier.name)\n\n print(\"-\", repertoire, \"(\", len(dirs) + len(files), \")\")\n for rep in sorted(dirs):\n print(f\" /{rep}\")\n for file in sorted(files):\n print(f\" > {file}\")\n print()\n\n except NotADirectoryError:\n pass\n\n\nif True:\n print(\"Liste des modules :\")\n for module in sorted(sys.modules):\n if module[0] != \"_\":\n print(\"-\", f\"{module:20} :\", sys.modules[module])\n\nif sys.version_info[:2] < (3, 10):\n print(\"Veuillez vérifier que vous utilisez la version 3.10 de python\")\n exit(1)\n\nprint(\"Current diretory:\", os.getcwd())\nprint(\"Version de Python:\", sys.version)\n\nprint(\"\\nRecherche des modules :\")\nfor chemin in sys.path:\n getdirs(chemin)\n","sub_path":"environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"172832071","text":"numList = [9,8,7,6,5,4,3,2,1]\n\ndef quic_sort(L, left, right):\n if left <= right:\n key = L[left]\n i = left\n j = right\n while i < j:\n while i < j and key <= L[j]:\n j -= 1\n L[i] = L[j]\n while i < j and L[i] <= key:\n i += 1\n L[j] = L[i]\n L[i] = key\n quic_sort(L, left, i - 1)\n quic_sort(L, i + 1, right)\n\nif __name__ == '__main__':\n quic_sort(numList, 0, len(numList) - 1)\n print(numList)\n\n","sub_path":"quic_sort.py","file_name":"quic_sort.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"174423476","text":"#!/usr/bin/env python3\n\nfrom datetime import date, datetime\n\nfrom telethon import TelegramClient\nfrom telethon.tl.functions.channels import GetParticipantsRequest\nfrom telethon.tl.functions.contacts import ResolveUsernameRequest\nfrom telethon.tl.types import ChannelParticipantsSearch\n\nfrom IryoAirdrop.groupUsers import config\n\n\nclass TelegramGroupChecker():\n client = None\n\n def json_serial(obj):\n \"\"\"JSON serializer for objects not serializable by default json code\"\"\"\n\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n raise TypeError(\"Type %s not serializable\" % type(obj))\n\n\n LIMIT = 100\n def connect(self):\n self.client = TelegramClient(config.phone, config.api_id, config.api_hash)\n self.client.connect()\n\n print (\"Is user connected:\" + \"YES\" if self.client.is_user_authorized() else \" NO\")\n\n if not self.client.is_user_authorized():\n self.client.send_code_request(config.phone)\n self.client.sign_in(code=int(input('Enter code: ')))\n\n #client.sign_in(\"\"\"phone=config.phone\"\"\")\n #client.start(phone=config.phone)\n #me = client.sign_in(code=int(input('Enter code: ')))\n\n def isUserJoined(self, userID, channelName):\n try:\n LIMIT = 200\n channel = self.client(ResolveUsernameRequest(channelName)).chats[0]\n\n offset = 0\n output = []\n while True:\n participants = self.client(GetParticipantsRequest(\n channel, ChannelParticipantsSearch(''), offset, LIMIT, hash=0))\n if not participants.users:\n break\n\n offset += len(participants.users)\n\n for user in participants.users:\n if userID == user.id:\n return True\n return False\n except Exception as e:\n return 0","sub_path":"IryoAirdrop-master/groupUsers/fetcher.py","file_name":"fetcher.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"436900554","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/unimr/red5/protectedvod/utils.py\n# Compiled at: 2009-08-19 12:31:49\nfrom zope.interface import implements\nfrom Acquisition import aq_inner\nfrom Products.Five.browser import BrowserView\nfrom Products.PythonScripts.standard import url_quote_plus\nfrom Products.CMFCore.utils import getToolByName\nfrom DateTime import DateTime\nfrom plone.memoize.view import memoize\nfrom interfaces import IRed5ProtectedVodTool\nimport logging, hmac\nlogger = logging.getLogger('unimr.red5.protectedvod')\n\nclass Red5ProtectedVodTool(BrowserView):\n \"\"\"A view that implements a hmac algorithm for url signatures\n in interaction with a red5 streaming server\n \"\"\"\n __module__ = __name__\n implements(IRed5ProtectedVodTool)\n\n def netConnectionUrl(self, fieldname='file'):\n \"\"\" returns the netConnectionUrl including path, signature and expire date\"\"\"\n data = self._signature_data(fieldname=fieldname)\n return '%(server_url)s/%(path)s/%(signature)s/%(expires)s' % data\n\n def clip(self, fieldname='file'):\n \"\"\" return clip's name \"\"\"\n data = self._signature_data(fieldname=fieldname)\n return '%(filename)s' % data\n\n @memoize\n def _signature_data(self, fieldname='file'):\n context = aq_inner(self.context)\n request = self.request\n properties_tool = getToolByName(context, 'portal_properties')\n hmac_properties = getattr(properties_tool, 'red5_protectedvod_properties', None)\n red5_server_url = hmac_properties.getProperty('red5_server_url')\n red5_server_url = red5_server_url.rstrip('/')\n secret_phrase = hmac_properties.getProperty('secret')\n try:\n ttl = int(hmac_properties.getProperty('ttl'))\n except ValueError:\n ttl = 60\n\n clientip = request.get('HTTP_X_FORWARDED_FOR', None)\n if not clientip:\n clientip = request.get('REMOTE_ADDR', None)\n expires = '%08x' % (DateTime().timeTime() + ttl)\n (path, filename) = self._fss_info(fieldname)\n sign_path = '/%s/' % (path,)\n signature = hmac_hexdigest(secret_phrase, [sign_path, filename, clientip, expires])\n data = {'server_url': red5_server_url, 'sign_path': sign_path, 'path': path, 'filename': filename, 'expires': expires, 'clientip': clientip, 'signature': url_quote_plus(signature)}\n logger.debug(data)\n return data\n\n def _fss_info(self, fieldname='file'):\n context = aq_inner(self.context)\n field = context.getField(fieldname)\n storage = field.storage\n try:\n info = storage.getFSSInfo(fieldname, context)\n strategy = storage.getStorageStrategy(fieldname, context)\n props = storage.getStorageStrategyProperties(fieldname, context, info)\n except AttributeError:\n logger.error('cannot retrieve fss properties. fss installed?')\n return\n\n valueDirectoryPath = strategy.getValueDirectoryPath(**props)\n valueFilename = strategy.getValueFilename(**props)\n length = len(strategy.storage_path.split('/'))\n path = ('/').join(valueDirectoryPath.split('/')[length - 1:]).strip('/')\n return (\n path, valueFilename)\n\n\ndef hmac_hexdigest(secret, update_list):\n \"\"\" returns a hex encoded digest of signature \"\"\"\n mac = hmac.new(secret)\n for s in update_list:\n mac.update(s)\n\n return mac.hexdigest()","sub_path":"pycfiles/unimr.red5.protectedvod-0.1rc1_r96721-py2.4/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"198426875","text":"from mstrio.utils.helper import response_handler\n\n\ndef projects(connection, error_msg=None, verbose=False):\n \"\"\"\n Args:\n connection: MicroStrategy REST API connection object\n verbose (bool, optional): Verbosity of server responses; defaults to False.\n Returns:\n Complete HTTP response object\n \"\"\"\n\n response = connection.session.get(url=connection.base_url + '/api/projects',\n headers={'X-MSTR-ProjectID': None})\n if verbose:\n print(response.url)\n if not response.ok:\n if error_msg is None:\n error_msg = \"Error connecting to project. Check project name and try again.\"\n response_handler(response, error_msg)\n return response\n","sub_path":"mstrio/api/projects.py","file_name":"projects.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"343122455","text":"# coding=gbk\nimport json\nimport math\nimport os\nimport sys\nimport time\nimport time\nimport requests\nimport multiprocessing\nimport execjs\nfrom urllib.parse import urlencode\nimport re\nimport shutil\nimport js2xml\nfrom lxml import etree\nclass douyu:\n def __init__(self,query):\n self.query=query\n\n pass\n\n def loder(self,url):\n \"\"\"直接请求ts文件的url然后在写入到本地\"\"\"\n html = requests.get(url).content\n l=url.find(\".ts?\")\n\n i = url[l-7:l]\n print(i)\n folder = \"F:\\movies\\%s\" % self.query[\"owner\"]\n if os.path.isdir(folder) is False:\n os.system(\"md %s\" % folder)\n with open(r\"%s\\%07s.ts\" % (folder, i), \"wb\") as f:\n f.write(html)\n\n def ts_to_mp4(self):\n print('ts文件正在进行转录mp4......')\n folder=\"F:\\movies\\%s\" % self.query[\"owner\"]\n\n str = \"cd /d %s\\\\ && copy /b *.ts %s.mp4\"%(folder,self.query[\"title\"]) # copy /b 命令\n print(str)\n os.system(str)\n filename = folder+\"\\/\"+ self.query[\"title\"] + '.mp4'\n if os.path.isfile(filename):\n te = \"del %s\\\\*.ts\"%folder\n print(te)\n os.system(te)\n print('转换完成,祝你观影愉快')\n\n # shutil.rmtree(\"test\")\n\n def get_js(self):\n f = open(\".\\key.js\", 'r', encoding='UTF-8')\n line = f.readline()\n htmlstr = ''\n while line:\n htmlstr = htmlstr + line\n line = f.readline()\n return htmlstr\n\n\n def run(self):\n dom = requests.get(self.query[\"url\"])\n dom = etree.HTML(dom.content)\n\n jstext=\"var CryptoJS = require('crypto-js');\"+dom.xpath(\"//script\")[2].text\n ub98484234 = execjs.compile(jstext)\n data = ub98484234.call('ub98484234', self.query[\"vid\"], \"10000000000000000000000000001501\", int(time.time()))\n data = data + \"&vid=\" + self.query[\"hashId\"]\n headers = {\"Content-Type\": \"application/x-www-form-urlencoded\"}\n print(data)\n res = requests.post(\"https://v.douyu.com/api/stream/getStreamUrl\", data=data, headers=headers)\n print(res.json())\n list = res.json()[\"data\"][\"thumb_video\"][\"high\"][\"url\"]\n tss = requests.get(list)\n\n url = list[:list.find(\"playlist.m3u8\")]\n lis = [url + x for x in tss.text.split(\"\\n\") if x != \"\" and x[0] != \"#\"]\n pool = multiprocessing.Pool(processes=3)\n pool.map(self.loder, lis)\n pool.close()\n pool.join()\n self.ts_to_mp4()\n\nif __name__ == \"__main__\":\n query = {\n \"kw\": 7302297,\n \"page\": 1,\n \"pageSize\": 20,\n \"filterType\": 0,\n \"tabType\": 1\n\n }\n res = requests.get(\"https://www.douyu.com/japi/search/api/searchVideo?\" + urlencode(query))\n total = res.json()[\"data\"][\"total\"]\n lists = []\n for i in range(1, math.ceil(total / 20) + 1):\n query[\"page\"] = i\n print(\"current \"+str(i))\n res = requests.get(\"https://www.douyu.com/japi/search/api/searchVideo?\" + urlencode(query))\n videos = res.json()[\"data\"][\"relateVideo\"]\n lists.append(videos)\n print(len(videos))\n for v in range(len(videos)):\n temp = videos[v]\n temp[\"owner\"] = \"V\" + temp[\"owner\"]\n temp[\"title\"] = temp[\"title\"].replace(\" \", \".\").replace(\":\", \".\")+\"-\"+temp[\"hashId\"]\n\n url = temp[\"url\"]\n folder = \"F:\\movies\\%s\" % temp[\"owner\"]\n filename = folder + \"\\/\" + temp[\"title\"] + '.mp4'\n print(filename)\n if os.path.isfile(filename):\n print(temp[\"title\"] + '.mp4' + \"已存在\")\n continue\n dou = douyu(temp)\n dou.run()\n f=open(\"F:\\movies\\catalogue.json\", 'w', encoding='utf-8')\n json.dump(lists, f)\n\n\n\n\n\n","sub_path":"hack/include/douyu.py","file_name":"douyu.py","file_ext":"py","file_size_in_byte":3798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"507404184","text":"from google.appengine.ext import db\nfrom google.appengine.tools import bulkloader\n\nfrom treasure_raider.models import Experience_limits\n\nclass Experience_limits_loader(bulkloader.Loader):\n def __init__(self):\n bulkloader.Loader.__init__(self, 'Experience_limits',\n [('key_name', str),\n ('group', str),\n ('experience_level', int),\n ('min_experience_points', int),\n ('next_experience_points', int),\n ('air_capacity_reward', int),\n ('cash_reward', int),\n ('coins_reward', int),\n ])\n\nloaders = [Experience_limits_loader]\n\n#newline characters may need to be converted with tr command\n#tr '\\r' '\\n' < macfile.txt > unixfile.txt","sub_path":"app/apps/treasure_raider/loaders/experience_limit_loader.py","file_name":"experience_limit_loader.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"311770491","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 12 12:04:28 2018\n\n@author: Luc\n\"\"\"\n\nimport numpy as np\nfrom scipy.stats import vonmises\nimport matplotlib.pyplot as plt\n\n\n# Create parameter space and initialize prior and likelihood\nclass PSI_RiF:\n\n def __init__(self, rods, frames, kappa_oto, kappa_ver, kappa_hor, tau):\n\n self.rods = rods\n self.frames = frames\n self.kappa_oto = kappa_oto\n self.kappa_ver = kappa_ver\n self.kappa_hor = kappa_hor\n self.tau = tau\n\n # dimensions of the 2D stimulus space\n self.rod_num = len(self.rods);\n self.frame_num = len(self.frames);\n\n # dimensions of the parameter space\n kappa_oto_num = len(self.kappa_oto);\n kappa_ver_num = len(self.kappa_ver);\n kappa_hor_num = len(self.kappa_hor);\n tau_num = len(self.tau);\n \n # the rods I need for the cumulative density function\n theta_rod=np.linspace(-np.pi,np.pi,10000)\n \n # allocate memory for the lookup table (P)\n P = np.zeros([kappa_oto_num,kappa_ver_num,kappa_hor_num,tau_num,self.rod_num,self.frame_num])\n \n self.kappa_oto_mtx=np.zeros([kappa_oto_num,kappa_ver_num,kappa_hor_num,tau_num])\n self.kappa_ver_mtx=np.zeros([kappa_oto_num,kappa_ver_num,kappa_hor_num,tau_num])\n self.kappa_hor_mtx=np.zeros([kappa_oto_num,kappa_ver_num,kappa_hor_num,tau_num])\n self.tau_mtx=np.zeros([kappa_oto_num,kappa_ver_num,kappa_hor_num,tau_num])\n \n \n for k in range(0,kappa_oto_num):\n for l in range(0,kappa_ver_num):\n for m in range(0,kappa_hor_num):\n for n in range(0,tau_num):\n kappa_oto2 = kappa_oto[k]\n kappa_ver2 = kappa_ver[l]\n kappa_hor2 = kappa_hor[m]\n tau2 = tau[n]\n kappa1 = kappa_ver2-(1-np.cos(np.abs(2*self.frames)))*tau2*(kappa_ver2-kappa_hor2)\n kappa2 = kappa_hor2+(1-np.cos(np.abs(2*self.frames)))*(1-tau2)*(kappa_ver2-kappa_hor2)\n\n for i in range(0,self.frame_num):\n \n # the context provided by the frame\n P_frame1 = vonmises.pdf(theta_rod-self.frames[i],kappa1[i])\n P_frame2 = vonmises.pdf(theta_rod-np.pi/2-self.frames[i],kappa2[i])\n P_frame3 = vonmises.pdf(theta_rod-np.pi-self.frames[i],kappa1[i])\n P_frame4 = vonmises.pdf(theta_rod-3*np.pi/2-self.frames[i],kappa2[i])\n \n P_frame = (P_frame1+P_frame2+P_frame3+P_frame4)\n P_frame = P_frame/np.sum(P_frame)\n\n \n # the otoliths\n P_oto = vonmises.pdf(theta_rod,kappa_oto2)\n \n # the upright prior\n \n # compute the cumulative density of all distributions convolved\n cdf=np.cumsum(np.multiply(P_oto, P_frame))/np.sum(np.multiply(P_oto, P_frame))\n cdf=np.nan_to_num(cdf)\n cdf[cdf==0]=1e-10 \n cdf[cdf>1.0]=1.0 \n for j in range(0,self.rod_num):\n index = np.argmax(theta_rod>=rods[j])\n P[k][l][m][n][j][i]=cdf[index]\n \n self.kappa_oto_mtx[k][l][m][n]=kappa_oto2\n self.kappa_ver_mtx[k][l][m][n]=kappa_ver2\n self.kappa_hor_mtx[k][l][m][n]=kappa_hor2\n self.tau_mtx[k][l][m][n]=tau2\n \n\n self.lookup = np.reshape(P,(kappa_oto_num*kappa_ver_num*kappa_hor_num*tau_num,self.rod_num,self.frame_num),order=\"F\")\n self.prior = np.ones(kappa_oto_num*kappa_ver_num*kappa_hor_num*tau_num)/(kappa_oto_num*kappa_ver_num*kappa_hor_num*tau_num)\n self.calcNextStim()\n\n \n def calcNextStim(self):\n \n \n # Compute posterior\n self.paxs = np.empty([self.lookup.shape[0], self.lookup.shape[1], self.lookup.shape[2]])\n self.paxf = np.empty([self.lookup.shape[0], self.lookup.shape[1], self.lookup.shape[2]])\n h = np.empty([self.frame_num, self.rod_num])\n \n self.paxs = np.einsum('i,ijk->ijk', self.prior, self.lookup)\n self.paxf = np.einsum('i,ijk->ijk', self.prior, 1 - self.lookup)\n self.paxs[self.paxs==0]=1e-10;\n self.paxf[self.paxf==0]=1e-10;\n \n ps = np.sum(self.paxs,0) \n pf = np.sum(self.paxf,0)\n \n\n self.paxs = np.einsum('jk,ijk->ijk', 1/ps, self.paxs)\n self.paxf = np.einsum('jk,ijk->ijk', 1/pf, self.paxf)\n \n self.paxs[self.paxs==0]=1e-10;\n self.paxf[self.paxf==0]=1e-10;\n\n hs = np.einsum('ijk,ijk->jk', -self.paxs, np.log(self.paxs))\n hf = np.einsum('ijk,ijk->jk', -self.paxf, np.log(self.paxf))\n\n\n\n # Compute entropy\n #hs = np.sum(-self.paxs * np.log(self.paxs),0)\n #hf = np.sum(-self.paxf * np.log(self.paxf),0)\n \n \n # Compute expected entropy\n h = ps * hs + pf * hf\n \n plt.pcolormesh(h)\n plt.show(block=False)\n \n ind = np.unravel_index(h.argmin(), h.shape) # index of smallest expected entropy\n\n \n #x_f = np.expand_dims(self.rods,axis=1) \n #x_f = np.tile(x_f,(1,self.frame_num))\n #x_f = x_f.flatten('F')\n #y_f = np.expand_dims(self.frames,axis=0) \n #y_f = np.tile(y_f,(self.rod_num,1))\n #y_f = y_f.flatten('F')\n\n\n # Find stimulus that minimizes expected entropy\n self.stim = ([self.rods[ind[0]],self.frames[ind[1]]])\n #self.stim1_index = np.argmin(np.abs(self.rods - self.stim[0]))\n #self.stim2_index = np.argmin(np.abs(self.frames - self.stim[1]))\n self.stim1_index = ind[0]\n self.stim2_index = ind[1]\n \n \n def addData(self,response):\n\n self.stim = None\n\n # Update prior based on response\n if response == 1:\n self.prior = self.paxs[:,self.stim1_index,self.stim2_index]\n elif response == 0:\n self.prior = self.paxf[:,self.stim1_index,self.stim2_index]\n else:\n self.prior = self.prior\n\n ## WARNING: solution for value,index is not unique!\n ## take MAP instead of Expected Value\n \n #self.theta = np.array([self.kappa_oto_mtx[:,:,:,:].flatten('F'), self.kappa_ver_mtx[:,:,:,:].flatten('F'),self.kappa_hor_mtx[:,:,:,:].flatten('F'),self.tau_mtx[:,:,:,:].flatten('F')])\n # dimensions of the parameter space\n kappa_oto_num = len(self.kappa_oto);\n kappa_ver_num = len(self.kappa_ver);\n kappa_hor_num = len(self.kappa_hor);\n tau_num = len(self.tau);\n self.theta =np.array([np.reshape(self.kappa_oto_mtx[:,:,:,:],kappa_oto_num*kappa_ver_num*kappa_hor_num*tau_num), np.reshape(self.kappa_ver_mtx[:,:,:,:],kappa_oto_num*kappa_ver_num*kappa_hor_num*tau_num),np.reshape(self.kappa_hor_mtx[:,:,:,:],kappa_oto_num*kappa_ver_num*kappa_hor_num*tau_num),np.reshape(self.tau_mtx[:,:,:,:],kappa_oto_num*kappa_ver_num*kappa_hor_num*tau_num)])\n \n self.parms = np.matmul(self.theta,self.prior)\n \n diff = (self.theta.transpose()-self.parms).transpose()\n self.var_parms = np.matmul(np.power(diff,2), self.prior)\n \n self.stim = None\n self.calcNextStim()\n #self.stim1_index = np.random.randint(25)\n #self.stim2_index = np.random.randint(8)\n #self.stim = ([self.rods[self.stim1_index],self.frames[self.stim2_index]])\n # print('Variance', self.var_parms)\n return self.parms, self.var_parms\n\n \n\n\n","sub_path":"Alberts/PSI_RiF.py","file_name":"PSI_RiF.py","file_ext":"py","file_size_in_byte":7970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"441974652","text":"import csv\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch.utils.data import Dataset\n\n\ndef get_adjacent_matrix(distance_file: str, num_nodes: int, id_file: str = None, graph_type='connect') -> np.array:\n '''\n\n :param distance_file: str ,path of csv file to save the distance between nodes\n :param num_nodes:int , number of nodes in the graph\n :param id_file:str , path of txt file to save the order of the nodes\n :param graph_type:str, ['connet','distance']\n :return:\n np.array[N,N]\n '''\n A = np.zeros([num_nodes, num_nodes]) # 构造NXN节点数量\n if id_file:\n '''处理存在在矩阵中的真实点'''\n with open(id_file, 'r') as f_id:\n node_id_dict = {int(node_id): idx for idx, node_id in enumerate(f_id.read().strip().split('\\n'))}\n\n with open(distance_file, 'r') as f_d:\n f_d.readline()\n reader = csv.reader(f_d)\n for item in reader:\n if len(item) != 3:\n continue\n i, j, distance = int(item[0]), int(item[1]), float(item[2])\n # 构造邻接矩阵\n if graph_type == 'connect':\n A[node_id_dict[i], node_id_dict[j]] = 1.\n A[node_id_dict[j], node_id_dict[i]] = 1.\n elif graph_type == 'distance':\n A[node_id_dict[i], node_id_dict[j]] = 1. / distance\n A[node_id_dict[j], node_id_dict[i]] = 1. / distance\n else:\n raise ValueError('graph type is not correct(connect or distance)')\n return A\n\n reader = pd.read_csv(distance_file).values\n for item in reader:\n if len(item) != 3:\n continue\n i, j, distance = int(item[0]), int(item[1]), float(item[2])\n\n # 构造邻接矩阵\n if graph_type == 'connect':\n A[i, j] = 1.\n A[j, i] = 1.\n elif graph_type == 'distance':\n A[i, j] = 1. / distance\n A[j, i] = 1. / distance\n else:\n raise ValueError('graph type is not correct(connect or distance)')\n return A\n\n\ndef get_flow_data(flow_file: str) -> np.array:\n '''\n :param flow_file:flow_file:str,path of .npz file to save the traffic flow data\n :return:\n np.array(N,T,D)\n '''\n data = np.load(flow_file)\n flow_data = data['data'].transpose([1, 0, 2])[:, :, 0][:, :, np.newaxis]\n return flow_data\n\n\nclass LoadData(Dataset):\n def __init__(self, data_path, num_nodes, divide_days, time_interval, history_length, train_mode):\n '''\n :param data_path:list ,['graph file name', 'flow data file name'],path to save the data file names;\n :param num_nodes:int, numbers of nodes;\n :param divide_days:list,[days of train data, days of test data],list to divide the original data;\n :param time_interval:int, time interval between two traffic data records(mins);\n :param history_length:int,length of history data to be used;\n :param train_mode:list,['train','test']\n '''\n\n self.data_path = data_path\n self.num_nodes = num_nodes\n self.train_mode = train_mode\n self.train_days = divide_days[0] # 59 - 14 = 45 天\n self.test_days = divide_days[1] # 7*2 天\n self.history_length = history_length # 6\n self.time_interval = time_interval # 5 min 间隔一次数据\n\n self.one_day_length = int(24 * 60 / self.time_interval)\n\n self.graph = get_adjacent_matrix(distance_file=data_path[0], num_nodes=num_nodes)\n\n self.flow_norm, self.flow_data = self.pre_process_data(data=get_flow_data(data_path[1]),\n norm_dim=1) # base , normalization\n\n def __len__(self):\n if self.train_mode == 'train':\n return self.train_days * self.one_day_length - self.history_length\n elif self.train_mode == 'test':\n return self.test_days * self.one_day_length\n else:\n raise ValueError('train mode:[{}] is not in defined'.format(self.train_mode))\n\n def __getitem__(self, index):\n '''\n :param index: int , range of dataset length [0, length-1]\n :return:\n '''\n if self.train_mode == 'train':\n index = index\n elif self.train_mode == 'test':\n index += self.train_days * self.one_day_length\n else:\n raise ValueError('train mode:[{}] is not in defined'.format(self.train_mode))\n\n data_x, data_y = LoadData.slice_data(self.flow_data, self.history_length, index, self.train_mode)\n data_x = LoadData.to_tensor(data_x) # [N,H,D]\n data_y = LoadData.to_tensor(data_y).unsqueeze(1) # [N,1,D]\n\n return {'graph': LoadData.to_tensor(self.graph), 'flow_x': data_x, 'flow_y': data_y}\n\n @staticmethod\n def slice_data(data, history_length, index, train_mode):\n if train_mode == 'train':\n start_index = index\n end_index = index + history_length\n elif train_mode == 'test':\n start_index = index - history_length\n end_index = index\n else:\n raise ValueError('train model:[{}] is not defined'.format(train_mode))\n\n data_x = data[:, start_index:end_index]\n data_y = data[:, end_index]\n return data_x, data_y\n\n @staticmethod\n def pre_process_data(data, norm_dim):\n '''\n :param data:np.array,original traffic data without normalization\n :param norm_dim:int, normalization dimension\n :return:\n norm_base,norm_data\n '''\n norm_base = LoadData.normalization_base(data, norm_dim)\n norm_data = LoadData.normalize_data(norm_base[0], norm_base[1], data)\n\n return norm_base, norm_data\n\n @staticmethod\n def normalization_base(data, norm_dim):\n '''\n :param data:np.array,original traffic data without normalization\n :param norm_dim:int, normalization dimension\n :return:\n max_data:np.array\n min_data:np.array\n '''\n max_data = np.max(data, norm_dim, keepdims=True) # [N,T,D],norm = 1 -> [N, 1, D]\n min_data = np.min(data, norm_dim, keepdims=True)\n return max_data, min_data\n\n @staticmethod\n def normalize_data(max_data, min_data, data):\n mid = min_data\n base = max_data - min_data\n normalized_data = (data - mid) / base\n return normalized_data\n\n @staticmethod\n def recover_data(max_data, min_data, data):\n mid = min_data\n base = max_data - min_data\n recoverd_data = data * base + mid\n\n return recoverd_data\n\n @staticmethod\n def to_tensor(data):\n return torch.tensor(data, dtype=torch.float)\n\n\nif __name__ == '__main__':\n train_Data = LoadData(data_path=['..\\\\PeMS_04\\\\PeMS04.csv', '..\\\\PeMS_04\\\\PeMS04.npz'], num_nodes=307,\n divide_days=[45, 14], time_interval=5, history_length=6, train_mode='train')\n print(train_Data)\n print(train_Data[0]['flow_x'].size())\n print(train_Data[0]['flow_y'].size())\n","sub_path":"script/traffic_dataset.py","file_name":"traffic_dataset.py","file_ext":"py","file_size_in_byte":7180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"257602989","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\n\ndef rgb2gray(rgb):\n return np.dot(rgb[...,:3], [0.299, 0.587, 0.114]) # converts RBG image to grayscale using very specific factors\n\nI = mpimg.imread('a_image.tif')\n# I = plt.imread(\"a_image.tif\")\n# I = plt.imread(\"test_8bit.jpg\")\n\nI_gray = np.array(rgb2gray(I))\nI_gray2 = I_gray.astype(int)\n# I_gray = int(rgb2gray(I))\nplt.imshow(I_gray, cmap = plt.get_cmap('gray'))\nplt.show()\n\nprint(I_gray.ravel)\n\n# print(f)\n#\n# plt.hist(f)\n# plt.show()\n\n# in addition, here is some more\n\n\n'''\ntemp=np.asarray(Image.open('map.jpeg'))\n\nx=temp.shape[0]\ny=temp.shape[1]*temp.shape[2]\ntemp.resize((x,y)) # a 2D array\nprint(temp)\n\n# I = plt.imread(\"a_image.tif\")\n# I = plt.imread(\"MARBLES.tif\")\nI = plt.imread(\"barbara_gray.bmp\", )\nI = np.array(I)\n\nplt.imshow(I)\nplt.show()\n\nx=I.shape[0]\ny=I.shape[1]*I.shape[2]\nI.resize((x,y)) # a 2D array\nprint(I)\n\nplt.plot(I)\nplt.show()\n\n\n\n\n\n#\n# aa= np.array([[1,2,3,4,5],[2,2,2,2,2]])\n# aaa= np.array([[5,6,7,8,9],[10,11,12,14,15]])\n# a = np.array([aa,aaa])\n# print(a)\n# print(np.shape(a))\n# b = a[0]\n# print(b)\n\nprint (I)\nprint(\"poep\")\nprint(I[0])\n\n'''","sub_path":"Tracking/Tracking.py","file_name":"Tracking.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"300676756","text":"# Author: \tSchuyler Rank\n# E-mail: \tsrank@brandeis.edu\n# Date: \t28 February 2017\n# Class:\tCS 132 - Information Retrieval @ Brandeis University\n# Desc:\t\tThis program will build an inverted index and doc-data for a movie corpus\n\nimport json, shelve, sys, math, os\nfrom boolean_terms import boolean_terms\nfrom collections import defaultdict\nfrom vs_search import vs_search\n\n\n# This class takes a Wikipedia movie corpus in JSON and turns it into four shelf files: one to hold film term vectors,\n# one to hold information on the movies in the corpus from which article pages can be generated, one for doc length, and\n# one for inverse document frequencies.\n# film_vectors: {film1: {term1: term_freq, term2: term_freq, ...}, film2: {...}, ...}\n# doc lengths: {movie_id1: length, movie_id2: length, ...}\nclass corpus_to_index:\n def __init__(self, corpus_file):\n # Open the file and get the corpus\n fh = open(corpus_file, 'r')\n self.__corpus = json.load(fh)\n fh.close()\n\n # Set up our class variables\n # It seems smarter to do everything in memory and then write to disk once at the end,\n # rather than write every single little thing to disk constantly as we do it\n self.__film_vectors = dict() # A dictionary from film ids to a dict of terms to weighted term frequencies\n self.__docdata = dict() # A dictionary from film ids to a dictionary of movie data\n self.__doc_lengths = dict() # A dictionary from film ids to the magnitude of the document's term vector\n self.__idf = dict() # A dictionary from terms to float log inverse document frequency\n self.__term_dict = defaultdict(list) # A dictionary from terms to a list of movie ids\n\n # An object that will handle turning strings into a list of boolean search terms\n self.__terminator = boolean_terms()\n\n # Build the index\n self.__build_index()\n\n # Write the index\n self.__write_index()\n\n # Extra credit: for each film, find the 30 most similar films. Since these won't change and it takes a long\n # time to do, it makes sense to pay the cost upfront here in the indexer and cache the results for later.\n # self.__similar = defaultdict(list) # A dictionary from films to a sorted list of tuples (sim_score, sim_film)\n # self.__find_similar()\n\n # This pairs each film with a list of similar films using vs_search\n # This function is not called, since it takes so long to run; the functionality was moved to vs_search\n def __find_similar(self):\n # First, we'll need a vs_search object to find similar movies for us\n films_file = 'data' + os.sep + 'film_vectors'\n idf_file = 'data' + os.sep + 'idf'\n term_file = 'data' + os.sep + 'term_dict'\n vs = vs_search(films_file, idf_file, term_file)\n\n # Now, for each film, find similar films using the first film's terms as a query\n all_films = self.__film_vectors.keys()\n for film_id in all_films:\n # Get a query of all the terms in the vector (which have already been run through boolean_terms,\n # so no need to run them through vs_search's terminator again).\n query = self.__film_vectors[film_id].keys()\n self.__similar[film_id].append(vs.v_search(query, all_films, 30, terminate=False))\n\n # Write the similarity database\n sim_shelf = shelve.open('data' + os.sep + 'similarity', flag='n', writeback=False)\n sim_shelf.update(self.__similar)\n sim_shelf.close()\n\n # This function goes through the corpus file and builds the term index and doc-data files\n def __build_index(self):\n # Go through the corpus one movie at a time\n for film in self.__corpus:\n self.__docdata[str(film)] = self.__corpus[film] # Fill in doc-data\n\n # Get the terms for this movie. Terms are taken from the movie's title and its text\n terms = self.__terminator.get_terms(self.__corpus[film]['title'] + ' ' + self.__corpus[film]['text'])\n\n # Get the counts for each term and the length of the document\n vector = dict()\n squared_sum = 0\n for term in set(terms):\n term_freq = terms.count(term)\n vector[term] = 1 + math.log10(term_freq) # Store the weighted term frequency in this film's vector\n squared_sum += term_freq**2 # Sums of the squares of term freqs to be used in length\n self.__idf[term] = self.__idf.get(term, 0) + 1.0\n self.__term_dict[term].append(film)\n\n self.__film_vectors[str(film)] = vector\n\n # Add the length to the dictionary for this movie\n self.__doc_lengths[str(film)] = math.sqrt(squared_sum)\n\n # Get the inverse document frequency for all terms on the corpus\n num_films = 1.0 * len(self.__corpus)\n for term in self.__idf:\n self.__idf[term] = math.log10(num_films/self.__idf[term])\n\n # Now add idf weighting and length normalization to film vectors.\n for film in self.__film_vectors:\n for term in self.__film_vectors[film]:\n self.__film_vectors[film][term] *= self.__idf[term]\n self.__film_vectors[film][term] /= self.__doc_lengths[film]\n\n # This function writes the indexes to disk\n def __write_index(self):\n # Write doc-data to disk\n doc_shelf = shelve.open('data' + os.sep + 'doc-data', flag='n', writeback=False)\n doc_shelf.update(self.__docdata) # Since doc-data is a dictionary, we can just copy the whole thing at once\n doc_shelf.close()\n\n # Write the document index to disk\n vectors_shelf = shelve.open('data' + os.sep + 'film_vectors', flag='n', writeback=False)\n vectors_shelf.update(self.__film_vectors)\n vectors_shelf.close()\n\n # Write inverse document frequencies to disk\n lengths_shelf = shelve.open('data' + os.sep + 'idf', flag='n', writeback=False)\n lengths_shelf.update(self.__idf)\n lengths_shelf.close()\n\n # Write the term dictionary\n index_shelf = shelve.open('data' + os.sep + 'term_dict', flag='n', writeback=False)\n for term in self.__term_dict:\n postings = sorted([int(u) for u in self.__term_dict[term]]) # Sort as ints because '20' > '100'\n index_shelf[term] = [str(num) for num in postings] # Back into strings for shelf\n index_shelf.close()\n\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n corpus_to_index(sys.argv[1])\n else:\n corpus_to_index('data' + os.sep + '2016_movies_standard.json')\n","sub_path":"vs_index.py","file_name":"vs_index.py","file_ext":"py","file_size_in_byte":6700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"129219664","text":"def collatz(num) :\n while num != 1 :\n if num % 2 == 0 :\n num = num / 2\n else :\n num = num * 3 + 1\n print(int(num))\n\n\nwhile True:\n print(\"Please enter an integer: \")\n num = input()\n \n try:\n num = int(num)\n collatz(num)\n except:\n print(\"you didnt enter an integer, please try again.\")\n\n","sub_path":"collatz.py","file_name":"collatz.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"649819656","text":"from conans import ConanFile, tools, AutoToolsBuildEnvironment\nimport os\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass OpenH264Conan(ConanFile):\n name = \"openh264\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"http://www.openh264.org/\"\n description = \"Open Source H.264 Codec\"\n topics = (\"h264\", \"codec\", \"video\", \"compression\", )\n license = \"BSD-2-Clause\"\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": \"False\",\n \"fPIC\": True,\n }\n\n exports_sources = \"patches/*\"\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _settings_build(self):\n return getattr(self, \"settings_build\", self.settings)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n\n def build_requirements(self):\n if self.settings.arch in (\"x86\", \"x86_64\"):\n self.build_requires(\"nasm/2.15.05\")\n if self._settings_build.os == \"Windows\" and not tools.get_env(\"CONAN_BASH_PATH\"):\n self.build_requires(\"msys2/cci.latest\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n destination=self._source_subfolder, strip_root=True)\n\n def _patch_sources(self):\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n tools.patch(**patch)\n if self.settings.compiler == \"Visual Studio\":\n tools.replace_in_file(os.path.join(self._source_subfolder, \"build\", \"platform-msvc.mk\"),\n \"CFLAGS_OPT += -MT\",\n \"CFLAGS_OPT += -{}\".format(self.settings.compiler.runtime))\n tools.replace_in_file(os.path.join(self._source_subfolder, \"build\", \"platform-msvc.mk\"),\n \"CFLAGS_DEBUG += -MTd -Gm\",\n \"CFLAGS_DEBUG += -{} -Gm\".format(self.settings.compiler.runtime))\n if self.settings.os == \"Android\":\n tools.replace_in_file(os.path.join(self._source_subfolder, \"codec\", \"build\", \"android\", \"dec\", \"jni\", \"Application.mk\"),\n \"APP_STL := stlport_shared\",\n \"APP_STL := {}\".format(self.settings.compiler.libcxx))\n tools.replace_in_file(os.path.join(self._source_subfolder, \"codec\", \"build\", \"android\", \"dec\", \"jni\", \"Application.mk\"),\n \"APP_PLATFORM := android-12\",\n \"APP_PLATFORM := {}\".format(self._android_target))\n\n @property\n def _library_filename(self):\n prefix = \"\" if self.settings.compiler == \"Visual Studio\" else \"lib\"\n if self.options.shared:\n if tools.is_apple_os(self.settings.os):\n suffix = \".dylib\"\n elif self.settings.os == \"Windows\":\n suffix = \".dll\"\n else:\n suffix = \".so\"\n else:\n if self.settings.compiler == \"Visual Studio\":\n suffix = \".lib\"\n else:\n suffix = \".a\"\n return prefix + \"openh264\" + suffix\n\n @property\n def _make_arch(self):\n return {\n \"armv7\": \"arm\",\n \"armv8\": \"arm64\",\n \"x86\": \"i386\",\n \"x86_64\": \"x86_64\",\n }.get(str(self.settings.arch), str(self.settings.arch))\n\n @property\n def _android_target(self):\n return \"android-{}\".format(self.settings.os.api_level)\n\n @property\n def _make_args(self):\n prefix = os.path.abspath(self.package_folder)\n if tools.os_info.is_windows:\n prefix = tools.unix_path(prefix)\n args = [\n \"ARCH={}\".format(self._make_arch),\n \"PREFIX={}\".format(prefix),\n ]\n autotools = AutoToolsBuildEnvironment(self)\n if self.settings.compiler == \"Visual Studio\":\n autotools.flags.extend([\"-nologo\", \"-{}\".format(self.settings.compiler.runtime)])\n autotools.link_flags.insert(0, \"-link\")\n if tools.Version(self.settings.compiler.version) >= \"12\":\n autotools.flags.append(\"-FS\")\n elif self.settings.compiler in (\"apple-clang\",):\n if self.settings.arch in (\"armv8\",):\n autotools.link_flags.append(\"-arch arm64\")\n if self.options.shared:\n autotools.fpic = True\n args.extend([\"{}={}\".format(k, v) for k,v in autotools.vars.items()])\n\n if self.settings.compiler == \"Visual Studio\":\n args.append(\"OS=msvc\")\n autotools.flags.append(\"-FS\")\n else:\n if self.settings.os == \"Windows\":\n args.append(\"OS=mingw_nt\")\n if self.settings.os == \"Android\":\n libcxx = str(self.settings.compiler.libcxx)\n stl_lib = \"$(NDKROOT)/sources/cxx-stl/llvm-libc++/libs/$(APP_ABI)/lib{}\".format(\"c++_static.a\" if libcxx == \"c++_static\" else \"c++_shared.so\") \\\n + \"$(NDKROOT)/sources/cxx-stl/llvm-libc++/libs/$(APP_ABI)/libc++abi.a\"\n ndk_home = os.environ[\"ANDROID_NDK_HOME\"]\n args.extend([\n \"NDKLEVEL={}\".format(self.settings.os.api_level),\n \"STL_LIB={}\".format(stl_lib),\n \"OS=android\",\n \"NDKROOT={}\".format(ndk_home), # not NDK_ROOT here\n \"TARGET={}\".format(self._android_target),\n \"CCASFLAGS=$(CFLAGS) -fno-integrated-as\",\n ])\n\n return args\n\n def build(self):\n self._patch_sources()\n with tools.vcvars(self) if self.settings.compiler == \"Visual Studio\" else tools.no_op():\n with tools.chdir(self._source_subfolder):\n env_build = AutoToolsBuildEnvironment(self)\n env_build.make(args=self._make_args, target=self._library_filename)\n\n def package(self):\n self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n with tools.vcvars(self) if self.settings.compiler == \"Visual Studio\" else tools.no_op():\n with tools.chdir(self._source_subfolder):\n env_build = AutoToolsBuildEnvironment(self)\n env_build.make(args=self._make_args, target=\"install-\" + (\"shared\" if self.options.shared else \"static-lib\"))\n\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n\n def package_info(self):\n if self.settings.compiler == \"Visual Studio\" and self.options.shared:\n self.cpp_info.libs = [\"openh264_dll\"]\n else:\n self.cpp_info.libs = [\"openh264\"]\n if self.settings.os in (\"FreeBSD\", \"Linux\"):\n self.cpp_info.system_libs.extend([\"m\", \"pthread\"])\n if self.settings.os == \"Android\":\n self.cpp_info.system_libs.append(\"m\")\n self.cpp_info.names[\"pkg_config\"] = \"openh264\"\n libcxx = tools.stdcpp_library(self)\n if libcxx:\n self.cpp_info.system_libs.append(libcxx)\n","sub_path":"recipes/openh264/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":7206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"447161707","text":"from functools import wraps\nfrom django.http import Http404\n\n\n#Decorator for protect admin from intruders\ndef staff_or_404(view_func):\n \"\"\"\n Decorator for views that checks that the user is logged in and is a staff\n member, raising a 404 if necessary.\n \"\"\"\n @wraps(view_func)\n def new_view_func(request, *args, **kwargs):\n if request.user.is_authenticated():\n if request.user.is_admin:\n # The user is valid. Continue to the admin page.\n return view_func(request, *args, **kwargs)\n\n raise Http404\n return new_view_func\n","sub_path":"inquitv/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"476047819","text":"def setArguments():\n usage = \"usage: %s [options]\" % sys.argv[0]\n config = Parser(usage=usage).check_args()\n logger.setLevel(logging.INFO)\n if config.console:\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(logging.Formatter(\"%(name)s - %(levelname)s - %(message)s\"))\n logger.addHandler(console_handler)\n if config.log_file:\n if not os.path.isdir(os.path.dirname(config.log_file)):\n # fallback to console only if directory for logs does not exists and\n # continue to run\n raise ValueError('Please create directory %r to store %r log file' % (\n os.path.dirname(config.log_file), config.log_file))\n else:\n file_handler = logging.FileHandler(config.log_file)\n file_handler.setFormatter(logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"))\n logger.addHandler(file_handler)\n logger.info('Configured logging to file %r' % config.log_file)\n if config.pid_file:\n if not os.path.isdir(os.path.dirname(config.pid_file)):\n raise ValueError('Please create directory %r to store %r pid file' % (\n os.path.dirname(config.pid_file), config.pid_file))\n else:\n open(config.pid_file, 'w').write(str(os.getpid()))\n if config.directory:\n if not os.path.isdir(config.directory):\n raise ValueError('Please create directory %r to store local files' % (\n config.directory))\n else:\n os.chdir(config.directory)\n config.cwd = os.getcwd()\n\n return config\n","sub_path":"software/redisdg/source/setArguments.py","file_name":"setArguments.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"559079657","text":"#encoding=utf-8\r\n\r\n\r\nfrom util.db import MySqlHelper\r\nimport util.timeHelper as timeHelper\r\n\r\n\r\nclass ProgramPlayPageDbHelper():\r\n\r\n def __init__(self):\r\n pass\r\n\r\n def saveProgramPlayPage(self, channelId, playDate, pageUrl, html, dbHelper=None):\r\n sql = \"insert ignore into programplaypage (channelId, playDate, url, html, savingTime) values (%s, %s, %s, %s, %s)\"\r\n #sql = \"insert ignore into programplaypage (channelId, playDate, url, html, savingTime) values (%s, %s, %s, %s, %s) on duplicate key update html = values(html)\"\r\n params = (channelId, playDate, pageUrl, html, timeHelper.now())\r\n if not dbHelper:\r\n dbHelper = MySqlHelper()\r\n dbHelper.openByConf()\r\n dbHelper.execute(sql, params)\r\n dbHelper.close()\r\n else:\r\n dbHelper.execute(sql, params)\r\n\r\n def clearProgramPlayPages(self, dbHelper=None):\r\n sql = \"delete from programplaypage\"\r\n if not dbHelper:\r\n dbHelper = MySqlHelper()\r\n dbHelper.openByConf()\r\n dbHelper.execute(sql)\r\n dbHelper.close()\r\n else:\r\n dbHelper.execute(sql)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n dbHelper = MySqlHelper(True)\r\n programPlayPageDbHelper = ProgramPlayPageDbHelper()\r\n programPlayPageDbHelper.clearProgramPlayPages(dbHelper)","sub_path":"data-auto-updater-projects/pytvmao/dal/programPlayPageHelper.py","file_name":"programPlayPageHelper.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"156795572","text":"from aws_xray_sdk.core import patch, xray_recorder\nfrom pandas import DataFrame\n\nfrom raster_analysis.data_cube import DataCube\nfrom raster_analysis.data_environment import DataEnvironment\nfrom raster_analysis.geometry import GeometryTile\nfrom raster_analysis.globals import LOGGER\nfrom raster_analysis.query import Query\nfrom raster_analysis.query_executor import QueryExecutor\nfrom raster_analysis.results_store import AnalysisResultsStore, ResultStatus\n\npatch([\"boto3\"])\n\n\n@xray_recorder.capture(\"Raster Analysis\")\ndef handler(event, context):\n try:\n LOGGER.info(f\"Running analysis with parameters: {event}\")\n results_store = AnalysisResultsStore()\n\n if \"geometry\" in event:\n source_geom = event[\"geometry\"]\n is_encoded = False\n elif \"encoded_geometry\" in event:\n source_geom = event[\"encoded_geometry\"]\n is_encoded = True\n else:\n raise KeyError(\"No valid geometry field\")\n\n tile_geojson = event.get(\"tile\", None)\n geom_tile = GeometryTile(source_geom, tile_geojson, is_encoded)\n\n if not geom_tile.geom:\n LOGGER.info(f\"Geometry for tile {context.aws_request_id} is empty.\")\n results_store.save_result({}, context.aws_request_id)\n return {}\n\n data_environment = DataEnvironment(layers=event[\"environment\"])\n query = Query(event[\"query\"], data_environment)\n\n data_cube = DataCube(geom_tile.geom, geom_tile.tile, query)\n\n query_executor = QueryExecutor(query, data_cube)\n results: DataFrame = query_executor.execute()\n\n LOGGER.debug(f\"Ran analysis with results: {results.head(100)}\")\n results_store.save_result(results, event[\"cache_id\"])\n except Exception as e:\n LOGGER.exception(e)\n\n results_store = AnalysisResultsStore()\n results_store.save_status(event[\"cache_id\"], ResultStatus.error, 0, str(e))\n raise e\n","sub_path":"lambdas/raster_analysis/src/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"558514697","text":"from random import *\n# random() 随机生成一个[0, 1.0]之间的小数\nprint(\n random()\n)\n# randint(a, b) 随机生成一个a到b之间的一个整数\nprint(\n randint(10, 100)\n)\n# uniform(a, b) 随机生成一个a到b之间的一个小数\nprint(\n uniform(10, 1000)\n)\n# randrange(a, b, c) 在a和b之间随机生成一个以c递增的数\nprint(\n randrange(10, 20, 9)\n)\nprint('================================================================')\n# choice() 从列表中随机返回一个元素\nprint(\n choice([1, 2, 3, 4, 5, 66])\n)\n# shuffle() 将列表排序打乱\nabc = ['a', 'a1', 'a2', 'a3', 'a4']\nprint(\n shuffle(abc), abc\n)\n# sample(, k) 从指定列表里获取k个元素\nprint(\n sample(abc, 2)\n)\nprint('========================================================')\na = range(2, 10)\nprint(\n a\n)\n","sub_path":"before/python3/练习/Mic008.随机函数.py","file_name":"Mic008.随机函数.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"42393086","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport numpy as np\nfrom numpy.linalg import eig\nimport matplotlib.pyplot as plt\n\n\n# In[ ]:\n\n\ndef plot_vectors(V, EigVec):\n base = [0 for e in EigVec]\n vx = [np.array(e).ravel()[0] for e in EigVec]\n vy = [np.array(e).ravel()[1] for e in EigVec]\n plt.quiver(base, base, vx, vy, color = ['b', 'b'], angles = 'xy', scale=1.0, scale_units='xy')\n base = [0 for v in V]\n x = [np.array(v).ravel()[0] for v in V]\n y = [np.array(v).ravel()[1] for v in V]\n plt.quiver(base, base, x, y, color = 'r', angles = 'xy', scale=1.0, scale_units='xy')\n maxy = max([abs(i)+1 for i in y])\n maxx = max([abs(i)+1 for i in y])\n plt.ylim(-1*maxy, maxy)\n plt.xlim(-1*maxx, maxx)\n\n\n# $A : \\left(\\begin{matrix}\n# 3 & 4\\\\\n# 4 & 3\\\\\n# \\end{matrix}\\right) \\hspace{25pt}\n# v0 : \\left(\\begin{matrix}\n# 1\\\\\n# 1\\\\\n# \\end{matrix}\\right) \\hspace{25pt}\n# v1 : \\left(\\begin{matrix}\n# 1\\\\\n# -1\\\\\n# \\end{matrix}\\right)$\n\n# In[ ]:\n\n\nA = np.matrix([[3,4],[4,3]])\nv0 = [[1],[1]]\nv1 = [[1],[-1]]\nx = [[0],[1]]\nfor i in range(400):\n x = A.dot(x)\nplot_vectors([x], [v0,v1])\n\n\n# In[ ]:\n\n\nA = np.matrix([[3,4],[4,3]])\nv0 = [[1],[1]]\nv1 = [[1],[-1]]\nx = [[0],[1]]\nfor i in range(3):\n x = A.dot(x)\n x = x / np.linalg.norm(x,np.inf)\nplot_vectors([x], [v0,v1])\n\n\n# $A : \\left(\\begin{matrix}\n# 2 & 0\\\\\n# 0 & 2\\\\\n# \\end{matrix}\\right) \\hspace{25pt}\n# v0 : \\left(\\begin{matrix}\n# 1\\\\\n# 0\\\\\n# \\end{matrix}\\right) \\hspace{25pt}\n# v1 : \\left(\\begin{matrix}\n# 0\\\\\n# 1\\\\\n# \\end{matrix}\\right)$\n\n# In[ ]:\n\n\nA = np.matrix([[2, 0],[0,2]])\nv0 = [[1],[0]]\nv1 = [[0],[1]]\nx = [[1],[1]]\nfor i in range(100):\n x = A.dot(x)\n x = x / np.linalg.norm(x,np.inf)\nplot_vectors([x], [v0,v1])\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"demos/upload/toshow4/Power Method.py","file_name":"Power Method.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"232943750","text":"import os\nimport urllib\nimport webapp2\nimport json\nfrom google.appengine.ext import ndb\nfrom google.appengine.ext.webapp.mail_handlers import InboundMailHandler\nimport logging\nimport base64\nimport datetime\n\nFILTER_NEWLINES = False\nUSE_HTML = False\n# returns a datastore key for a given message\ndef messagedb_key(messagedb_name):\n return ndb.Key('Message', messagedb_name)\n\n# stores when requests were last received (for determining which ones are new)\nclass TimeStamp(ndb.Model):\n time_stored = ndb.DateTimeProperty()\n name = ndb.StringProperty()\n def update(self):\n self.time_stored = datetime.datetime.now()\n self.put()\n\n# data for each message. time is the time when the message was receieved/stored\nclass Message(ndb.Model):\n subject = ndb.StringProperty(indexed=False)\n content = ndb.TextProperty()\n town = ndb.StringProperty(indexed=True)\n time = ndb.DateTimeProperty()\n\n# returns the new messages that have arrived in JSON since the last request\nclass API(webapp2.RequestHandler):\n def get(self):\n # enable requests from any domain\n self.response.headers['Access-Control-Allow-Origin'] = '*'\n self.response.headers['Access-Control-Allow-Headers'] = 'Origin, X-Requested-With, Content-Type, Accept'\n self.response.headers['Access-Control-Allow-Methods'] = 'POST, GET, PUT'\n town_name = self.request.get('town_name')\n last_time = TimeStamp.query(TimeStamp.name == town_name).fetch(1)\n # making sure only new data was returned\n if len(last_time) == 0: # if there was no \"last request\", make a new timestamp\n last_time = TimeStamp()\n last_time.name = town_name\n last_time.update()\n logging.info('making new timestamp')\n else:\n last_time = last_time[0]\n logging.info('last request made at' + str(last_time.time_stored))\n logging.info('looking for messages for town: ' + town_name)\n # building response\n messages = Message.query(Message.town == town_name, Message.time > last_time.time_stored).fetch(100)\n response = {}\n response['messages'] = []\n num_new = len(messages)\n response['num_new'] = num_new \n for i in range(num_new):\n content = messages[i].content\n if FILTER_NEWLINES:\n content = content.replace('\\n', \" \")\n content = content.replace('\\r', \"\")\n if messages[i].subject is None: #if there was no subject, give this default\n response['messages'].append({'subject': town_name + \" School Update\", 'body':content})\n else:\n response['messages'].append({'subject': messages[i].subject, 'body':content})\n \n self.response.write(json.dumps(response, separators=(',',':'), sort_keys=True))\n last_time.update()\n logging.info('this request made at' + str(last_time.time_stored))\n\n# takes incoming mail, parses it and stores it in the database\nclass LogSenderHandler(InboundMailHandler):\n def receive(self, mail_message):\n logging.info(\"Received a message from: \" + mail_message.sender + \" addressed to \" + mail_message.to)\n # gets the \"pembroke\" in pembroke@biw-school-news.appspot.com\n town_name = mail_message.to.split('@')[0][1:]\n message = Message()\n plain_bodies = mail_message.bodies('text/plain')\n html_bodies = mail_message.bodies('text/html')\n message.content = \"\"\n # parse email body and subject\n if USE_HTML:\n for content_type, body in html_bodies:\n message.content += body.decode()\n else:\n for content_type, body in plain_bodies:\n message.content += body.decode()\n if mail_message.subject is not None:\n message.subject = mail_message.subject\n # put everything in database\n message.time = datetime.datetime.now()\n message.town = town_name\n message.put()\n logging.info('this message stored @ ' + str(message.time))\n\nclass MainPage(webapp2.RequestHandler):\n def get(self):\n self.response.write('
BIW School News Server
contact charles.meyer@tufts.edu if you got here by accident')\n\napp = webapp2.WSGIApplication([\n LogSenderHandler.mapping(),\n ('/api', API),\n ('/', MainPage)\n], debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"137481686","text":"#!/usr/bin/env python3\nfrom collections import OrderedDict\nimport sys\nimport re\nDBG = False\n\n#add_argument, set_defaults only available.\nListPatt = re.compile('(\\[.*?\\])')\nGbgPatt = re.compile('(.*)\\)[A-z0-9*]')\nLpRegex = re.compile('\\({1,}\\s{0,}')\nRpRegex = re.compile('\\s{0,}\\){1,}')\nPrRegex = re.compile('\\((.*)(\\))(?!.*\\))') # from \\( to last \\)\nCmRegex = re.compile('\\s{0,},\\s{0,}')\nStrRegex = re.compile('\\'(.*?)\\'')\n\n# Argument dict : store {arg_name : value}\nargDct=OrderedDict()\n\n# Remove empty line & Concatenate line-separated syntax.\ndef preprocess(fname):\n try :\n with open(fname, 'r', encoding='UTF8') as f:\n txt = f.read()\n t = txt.splitlines(True)\n t = str_list = list( filter(None, t) )\n # remove empty line\n t = [x for x in t if not re.match('\\s{0,}\\n',x)]\n # concatenate multiple lined arguments.\n empl = []\n for i in range(len(t)-1, 0, -1):\n if not re.search('add_argument|set_defaults', t[i]):\n t[i-1] += t[i]\n t[i-1]=re.sub('\\s{0,}\\n{0,}\\s{0,}','',t[i-1])\n empl.append(t[i])\n\n for d in empl:\n t.remove(d)\n for i, line in enumerate(t):\n t[i] = line.replace('\\\"', '\\'')\n return t\n\n except IOError:\n print('IOError : no such file.', fname)\n\n# Handling add_argument()\ndef add_argument(arg_line):\n global argDct\n\n arg_line = arg_line\n if DBG:\n print('in add_argument : **Pr regex : ' + str(arg_line))\n\n #argname = DdRegex.split(arg_line)[1] # Dash or regex for arg name.\n argname = re.search('\\'--(.*?)\\'',arg_line)\n if not argname:\n argname = re.search('\\'-+(.*?)\\'',arg_line)\n if argname:\n argname = argname.group(1).replace('-', '_')\n else :\n argname = StrRegex.search(arg_line).group(1)\n if not argname:\n return # no argument name\n\n argDct[argname]=''\n dtype = re.search(',\\s*type\\s*=(.*)', arg_line)\n if dtype:\n dtype = dtype.group(1)\n dtype = CmRegex.split(dtype)[0]\n else :\n dtype = ''\n\n dfult = re.search(',\\s*default\\s*=(.*)',arg_line)\n rquird = re.search(',\\s*required\\s*=(.*)',arg_line)\n action = re.search(',\\s*action\\s*=(.*)',arg_line)\n\n tval = ''\n if dfult:\n if DBG:\n print('in default ext')\n # type exist\n if re.search('int|float|long|bool|complex', dtype):\n tval = dfult.group(1)\n if DBG:\n print('type exist tval :' +str(tval))\n\n if ListPatt.search(tval):\n tval = ListPatt.search(tval).group(1)\n if DBG:\n print('list exit-list patt : ' + str(tval))\n\n # if not list, use comma as separator.\n else :\n tval = CmRegex.split(tval)[0]\n if DBG:\n print('not list tval :' +str(tval))\n\n if not re.search('int|float|long|bool|complex', tval) and not LpRegex.search(tval):\n tval = re.split('\\s{0,}\\){1,}',tval)[0]\n gbg = re.search(GbgPatt, tval)\n if gbg:\n tval = gbg.group(1)\n\n # type not specified str() assumed.\n else:\n tval = dfult.group(1)\n \n regres = StrRegex.match(tval)\n if regres:\n tval = regres.group(0)\n elif ListPatt.search(tval):\n tval = ListPatt.search(tval).group(1)\n else:\n tval = CmRegex.split(tval)[0]\n \n \n if DBG:\n print('tval : ' + str(tval) +'\\n')\n\n # action or required syntax exist\n elif action or rquird :\n if DBG:\n print('in action handling')\n msg_str = ''\n if action:\n tval = action.group(1)\n msg_str = 'action'\n else :\n tval = rquird.group(1)\n msg_str = 'required'\n\n regres = StrRegex.search(tval)\n if regres:\n tval = regres.group(0)\n else :\n tval = CmRegex.split(tval)[0]\n tval = '## ' + msg_str + ' ' + tval + ' ##'\n \n else :\n argDct[argname] = '## default None ##'\n\n if tval:\n argDct[argname] = tval\n\n# Handling set_default()\ndef set_defaults(arg_line):\n global argDct\n if DBG:\n print('Set_defaults : ' + str(arg_line))\n\n dfult = re.split('\\s{0,}=\\s{0,}', arg_line)\n tn = dfult[0] # arg name\n tv = RpRegex.split(dfult[1])[0] #arg value\n argDct[tn]=tv\n\ndef transform(fname):\n # t : list() contains add_argument|set_defaults lines.\n arg_line_list = preprocess(fname)\n\n for i, arg_line in enumerate(arg_line_list):\n\n t = PrRegex.search(arg_line)\n if t:\n t = t.group(1) # t: content of add_argument Parentheses.\n else :\n continue # nothing to parse.\n\n if re.search('add_argument\\s*\\(', arg_line):\n add_argument(t)\n elif re.search('set_defaults\\s*\\(',arg_line):\n set_defaults(t)\n else :\n # Nothing to parse.\n continue\n\n print('\\nclass args:')\n for i in argDct:\n print(' ',i, '=', argDct[i])\n print()\n\ndef main():\n if len(sys.argv) <2:\n print('Usage : python arg2cls.py [target.py] [target2.py(optional)] ...')\n sys.exit(0)\n sys.argv.pop(0)\n\n #handling multiple file input.\n for fname in sys.argv:\n transform(fname)\n\n# TODO : choices=, multiple keywords occurence fix. \n\nif(__name__ == \"__main__\"):\n main()\n","sub_path":"arg2cls_v0.8.py","file_name":"arg2cls_v0.8.py","file_ext":"py","file_size_in_byte":4957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"215139364","text":"#!/usr/bin/python\n\n# Face Detection using OpenCV. Based on sample code by Roman Stanchak\n# Nirav Patel http://eclecti.cc 5/20/2008\n\nimport sys, os\nfrom opencv.cv import *\nfrom opencv.highgui import *\n\t\ndef detectObject(image):\n grayscale = cvCreateImage(cvSize(640, 480), 8, 1)\n cvCvtColor(image, grayscale, CV_BGR2GRAY)\n storage = cvCreateMemStorage(0)\n cvClearMemStorage(storage)\n cvEqualizeHist(grayscale, grayscale)\n cascade = cvLoadHaarClassifierCascade('haarcascade_frontalface_alt.xml',\n cvSize(1,1))\n faces = cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, \n CV_HAAR_DO_CANNY_PRUNING, cvSize(100,100))\n \n if faces:\n for i in faces:\n cvRectangle(image, cvPoint( int(i.x), int(i.y)),\n cvPoint(int(i.x+i.width), int(i.y+i.height)),\n CV_RGB(0,255,0), 3, 8, 0)\n \ndef displayObject(image):\n cvNamedWindow(\"face\", 1)\n cvShowImage(\"face\", image)\n cvWaitKey(0)\n cvDestroyWindow(\"face\")\n \ndef main():\n # Uses xawtv. Gstreamer can be used instead, but I found it much slower\n os.system(\"v4lctl snap jpeg 640x480 /tmp/face.jpg\")\n image = cvLoadImage(\"/tmp/face.jpg\")\n detectObject(image)\n displayObject(image)\n cvSaveImage(\"/tmp/face.jpg\", image)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"face.py","file_name":"face.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"591659242","text":"__all__ = ['State', 'StateView', 'StateScene', 'StateGItem', 'StateConnItem']\n\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\n\nfrom Framework.Flowchart.State import State as ST\nfrom CRad.View.FCStep.DlgSet.DlgSet import SetsManager\nfrom CRad.Model.ContextMenuCmpt.CRadCtxtMenuCmpt import CRadCtxtMenuCmpt as TP\n\nfrom CRad.View.Resources import pycreme_res\nimport sys\n\n\nclass State(QObject):\n Empty = ST.Empty\n ReadyToRun = ST.ReadyToRun\n WaitJob = ST.WaitJob\n WaitMachine= ST.WaitMachine\n InProgress = ST.InProgress\n Finished = ST.Finished\n RunFailed = ST.RunFailed\n Off = ST.Off # The step's state is Off.\n FinishedOff= ST.FinishedOff # The step's state is Off, but the parent steps are finished and\n # the current step stores the parent step's data.\n Ready = ST.Ready # The current step is Ready, but the target and task is not inited.\n\n def __init__(self, label, imageName, name, fchart, parent=None):\n super(State, self).__init__(parent)\n\n self._label = label\n self._imageName = imageName\n\n self._name = name\n self._fc = fchart\n\n self._EditDlg = None\n self._editDlg = None\n\n self._status = self.Empty\n self._progress = 0\n self._isTargetState = False\n\n fchart.addObserver(self)\n self.update()\n\n # Context menu\n self._acts = {}\n self._iTPToKeys = {} # ItemType to action key mapping\n self._ctxtMenu = None\n\n self.initActions()\n self.initCtxtMenu()\n\n def name(self):\n return self._name\n\n def parents(self):\n return self._parents\n\n def setParents(self, p):\n self._parents = p\n\n def label(self):\n return self._label\n\n def wrapedLabel(self):\n return self._label\n\n def imageName(self):\n return self._imageName\n\n def isTargetState(self):\n return self._isTargetState\n\n def setTargetState(self, val=True):\n self._isTargetState = val\n\n def setEditDlg(self, Dlg):\n self._EditDlg = Dlg\n\n def update(self):\n st, pro = self._fc.status(self.name())\n self._status = st\n self._progress = pro\n self.emit(SIGNAL(\"statusChanged\"), self)\n\n def status(self):\n return self._status\n\n def progress(self):\n return self._progress\n\n def addAction(self, key, text, method, itemType, icon=None):\n if icon is not None:\n act = QAction(icon, text, self.parent())\n else:\n act = QAction(text, self.parent())\n\n self._acts[key] = act\n\n if self._iTPToKeys.has_key(itemType):\n self._iTPToKeys[itemType].add(key)\n else:\n self._iTPToKeys[itemType] = set([key])\n\n self.connect(act, SIGNAL(\"triggered()\"), method)\n\n def updateActions(self):\n itemList = self._iTPToKeys.keys()\n ret = self._fc.state(self.name()).contextMenuCmpt().isEnabled(itemList)\n for i, itemType in enumerate(itemList):\n isEnabled = ret[i]\n actKeys = self._iTPToKeys[itemType]\n for key in actKeys:\n self._acts[key].setEnabled(isEnabled)\n\n def initActions(self):\n self.addAction(\"Run\", self.tr(\"&Run Selected Step\"), self.run, TP.Run, QIcon(\":general/run_sel.png\"))\n self.addAction(\"Edit\", self.tr(\"&Edit Settings\"), self.editSets, TP.SetSettings)\n self.addAction(\"IptSets\", self.tr(\"&Import Settings From Project\"), self.importSets,TP.ImportSettings)\n self.addAction(\"IptRet\", self.tr(\"&Import Result From File\"), self.importRet, TP.ImportResult)\n self.addAction(\"ShowRet\", self.tr(\"&Show Result\"), self.showRet, TP.GetResult)\n self.addAction(\"GenRpt\", self.tr(\"&Generate Report\"), self.genReport, TP.GenReport)\n\n def initCtxtMenu(self):\n menu = QMenu(self.parent())\n menu.addAction(self._acts[\"Run\"])\n menu.addSeparator()\n menu.addAction(self._acts[\"Edit\"])\n menu.addAction(self._acts[\"IptSets\"])\n menu.addAction(self._acts[\"IptRet\"])\n menu.addSeparator()\n menu.addAction(self._acts[\"ShowRet\"])\n menu.addSeparator()\n menu.addAction(self._acts[\"GenRpt\"])\n\n self._ctxtMenu = menu\n\n # slots\n def showCtxtMenu(self):\n self.updateActions()\n self._ctxtMenu.exec_(QCursor.pos())\n\n def run(self):\n self._fc.state(self.name()).run()\n\n def editSets(self):\n if self._EditDlg is not None:\n if self._editDlg is None:\n setsMngr = SetsManager( self._fc.state(self.name()).settingsCmpt() )\n self._editDlg = self._EditDlg(setsMngr)\n\n self._editDlg.fromData()\n self._editDlg.exec_()\n\n def importSets(self):\n raise NotImplementedError\n\n def importRet(self):\n raise NotImplementedError\n\n def showRet(self):\n raise NotImplementedError\n\n def genReport(self):\n raise NotImplementedError\n\n\nclass StateConnItem(QAbstractGraphicsShapeItem):\n def __init__(self, parentNode, childNodes=[], parent=None):\n super(StateConnItem, self).__init__(parent)\n self._children = []\n self.setPos(parentNode)\n for child in childNodes:\n self._children.append(child - parentNode)\n\n def paint(self, painter, option, widget):\n if len(self._children) == 0:\n return\n\n painter.setPen(Qt.darkGray)\n midy = self._children[0].y() / 2\n\n # vertical line from parent\n painter.drawLine(0, 0, 0, int(midy))\n\n # horizontal line\n xmin = 1e100; xmax = -1e100\n for child in self._children:\n xmin = min(child.x(), xmin)\n xmax = max(child.x(), xmax)\n painter.drawLine(int(xmin), int(midy), int(xmax), int(midy))\n\n # vertical line to children\n for child in self._children:\n painter.drawLine(int(child.x()), int(midy), int(child.x()), int(child.y()))\n\n def boundingRect(self):\n # -----\n # xmin = qreal(0.0); qMin(xmin, child.x())\n # -----\n\n xmin = 0.0\n xmax = 0.0\n ymin = 0.0\n ymax = 0.0\n\n for child in self._children:\n xmin = min(xmin, child.x())\n xmax = max(xmax, child.x())\n ymin = min(ymin, child.y())\n ymax = max(ymax, child.y())\n return QRectF(xmin, ymin, xmax-xmin, ymax-ymin)\n\n\nclass StateGItem(QAbstractGraphicsShapeItem):\n ConnTop = 0\n ConnBottom = 1\n stateIconSize = QImage(\":traj.png\").width()\n\n def __init__(self, state, parent=None):\n super(StateGItem, self).__init__(parent)\n self.setFlag(QGraphicsItem.ItemIsSelectable, True)\n self._state = state\n self.__initImage(imageEmpty= QIcon(':'+self._state.imageName()))\n self._iconRect = QRectF(0, 0, StateGItem.stateIconSize-1, StateGItem.stateIconSize-1)\n\n def __initImage(self,\n imageFinished = QImage(\":finished.png\"),\n imageRunning = QImage(\":running.png\"),\n imageFailed = QImage(\":failed.png\"),\n imageEmpty = QImage(\":traj.png\"),\n imageWaiting = QImage(\"\"),\n imageWaitJob = QImage(\"\"),\n imageWaitMachine = QImage(\"\")):\n\n self._imageFinished = imageFinished\n self._imageRunning = imageRunning\n self._imageFailed = imageFailed\n self._imageEmpty = imageEmpty\n self._imageWaiting = imageWaiting\n self._imageWaitJob = imageWaitJob\n self._imageWaitMachine = imageWaitMachine\n\n def boundingRect(self):\n rect = self.textRect() # TODO...\n #return rect.united(self._iconRect)\n return self._iconRect\n\n def paint(self, painter, option, widget):\n if self._state is None:\n # TODO something is wrong, report error\n return\n\n pt = QPoint(0, 0)\n status = self._state.status()\n if status == State.Finished:\n painter.drawPixmap(pt, self._imageEmpty.pixmap(self.stateIconSize,\n self.stateIconSize, mode = QIcon.Normal, state = QIcon.Off))\n painter.drawImage(70, 70, self._imageFinished)\n elif status == State.InProgress:\n painter.drawPixmap(pt, self._imageEmpty.pixmap(self.stateIconSize,\n self.stateIconSize, mode = QIcon.Selected, state = QIcon.Off))\n painter.drawImage(70, 70, self._imageRunning)\n elif status == State.RunFailed:\n painter.drawImage(pt, self._imageFailed)\n painter.drawPixmap(pt, self._imageEmpty.pixmap(self.stateIconSize,\n self.stateIconSize, mode = QIcon.Disabled, state = QIcon.Off))\n painter.drawImage(70, 70, self._imageFailed)\n elif status == State.WaitJob:\n painter.drawImage(pt, self._imageWaitJob)\n elif status == State.WaitMachine:\n painter.drawImage(pt, self._imageWaitMachine)\n elif status in [State.ReadyToRun, State.Ready]:\n painter.drawPixmap(pt, self._imageEmpty.pixmap(self.stateIconSize,\n self.stateIconSize, mode = QIcon.Selected, state = QIcon.Off))\n else:\n # _imageEmpty\n painter.drawPixmap(pt, self._imageEmpty.pixmap(self.stateIconSize,\n self.stateIconSize, mode = QIcon.Disabled, state = QIcon.Off))\n\n label = self._state.wrapedLabel()\n painter.drawText(self.textRect(), Qt.AlignLeft, label)\n\n if status == State.InProgress:\n pen = QPen()\n pen.setStyle(Qt.SolidLine)\n pen.setMiterLimit(0)\n pen.setBrush(Qt.darkMagenta)\n pen.setCapStyle(Qt.FlatCap)\n pen.setJoinStyle(Qt.MiterJoin)\n\n pen.setWidth(2)\n painter.setPen(pen)\n painter.drawLine(QPoint(3, 88), QPoint( 80 * self._state.progress()/100 + 3 , 88))\n painter.drawText(QRectF(40, 73, 70, 79), Qt.AlignLeft, '%3.0f'%self._state.progress()+'%' )\n\n if self._state.isTargetState():\n size = self.stateIconSize\n path = QPainterPath()\n path.moveTo(size, size)\n path.lineTo(size, size/1.5)\n path.lineTo(size/1.5, size)\n path.lineTo(size, size)\n painter.fillPath(path, QBrush(Qt.blue))\n\n # If selected\n if self.isSelected():\n iconRectpen = QPen()\n iconRectpen.setWidth(2)\n iconRectpen.setColor(QColor(255, 0, 0, 127))\n iconRectpen.setStyle(Qt.DashLine)\n painter.setPen(iconRectpen)\n painter.drawRoundedRect(self._iconRect, 6.0, 6.0)\n\n def connectorPos(self, conn):\n if conn == StateGItem.ConnTop:\n return mapToScene(0.5 * StateGItem.stateIconSize, 0)\n else:\n return mapToScene(0.5 * StateGItem.stateIconSize,\n StateConnItem.stateIconSize)\n\n def state(self):\n return self._state\n\n def textRect(self):\n return QRectF(2, 2, 94, 15)\n\n\nclass StateScene(QGraphicsScene):\n def __init__(self, parent=None):\n super(StateScene, self).__init__(parent)\n\n def updateScene(self):\n super(StateScene, self).update(QRectF())\n\n\nclass StateView(QGraphicsView):\n def __init__(self, scene, parent=None):\n super(StateView, self).__init__(parent)\n if scene is not None:\n self.setScene(scene)\n self.setDragMode(self.NoDrag)\n self.setMouseTracking(True)\n self.setAcceptDrops(False)\n\n def mouseDoubleClickEvent(self, event):\n pos = self.mapToScene(event.pos())\n stateItem = self.scene().itemAt(pos)\n\n # if stateItem type is StateConnItem return\n if stateItem is None or (not isinstance(stateItem, StateGItem)):\n return\n\n state = stateItem.state()\n if state is not None:\n event.accept()\n self.emit(SIGNAL('mouseDoubleCliked'), state)\n\n def mousePressEvent(self, event):\n pos = self.mapToScene(event.pos())\n\n stateItem = self.scene().itemAt(pos)\n if stateItem is not None and isinstance(stateItem, StateGItem):\n # Record mouse is on which item\n self._latestItem = stateItem\n else:\n self._latestItem = None\n\n selectedPressed = False\n selectedList = self.scene().selectedItems()\n for item in selectedList:\n if (item.contains(item.mapFromScene(pos))):\n selectedPressed = True\n\n if True == selectedPressed:\n if Qt.RightButton == event.button():\n self.mouseReleaseEvent(event)\n else:\n super(StateView, self).mousePressEvent(event)\n\n def mouseReleaseEvent(self, event):\n pos = self.mapToScene(event.pos())\n stateItem = self.scene().itemAt(pos)\n\n if stateItem is None or (not isinstance(stateItem, StateGItem)):\n if Qt.RightButton == event.button():\n event.accept()\n self.emit(SIGNAL('mouseRightReleased'), None)\n else:\n state = stateItem.state()\n if state is not None:\n if Qt.RightButton == event.button():\n event.accept()\n self.emit(SIGNAL('mouseRightReleased'), state)\n elif Qt.LeftButton == event.button():\n event.accept()\n # TODO.. for drag command\n else:\n super(StateView, self).mouseReleaseEvent(event)\n\n def selectedState(self):\n selectedList = self.scene().selectedItems()\n\n # If not state is selected, return state which mouse is on\n if 0 == len(selectedList):\n return [self.latestSelectedState()]\n\n states = []\n for item in selectedList:\n states.append(item.state())\n return states\n\n def latestSelectedState(self):\n if self._latestItem is not None:\n return self._latestItem.state()\n else:\n return None\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n state = State()\n state.setStatus(State.InProgress)\n state.setLabel(QString(\"label\"))\n state.setTargetState(True)\n\n\n scene = StateScene()\n doc = DocExptEditor()\n doc.setStateScene(scene)\n\n l = []\n l.append(QPointF(100.0, -100.0))\n l.append(QPointF(-100.0, -100.0))\n item = StateConnItem(QPointF(0.0, 0.0), l)\n scene.addItem(item)\n\n item = StateGItem(state)\n item.setPos(-23, 0)\n item.setSelected(True)\n view = StateView(doc)\n scene.addItem(item)\n #view.setScene(scene)\n\n view.show()\n app.exec_()","sub_path":"lib/CRad/View/FCStep/State/State.py","file_name":"State.py","file_ext":"py","file_size_in_byte":14872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"459919321","text":"#!/usr/bin/env python\n'''\nCreated on Dec 3, 2010\n\n@author: ale\n'''\nimport checkpython\nimport optparse\nimport PrawnTools\nimport os\n\ndef main():\n usage = 'usage: %prog [options]'\n parser = optparse.OptionParser(usage)\n\n parser.add_option('--dbpath', dest='database', help='Database path', default=PrawnTools.jmDBPath())\n parser.add_option('-s', '--session', dest='sessionName', help='Name of the session')\n parser.add_option('-g', '--group', dest='sessionGroup', help='Comma separated list of groups')\n\n (opt, args) = parser.parse_args()\n \n if opt.sessionName is None and opt.sessionGroup is None:\n parser.error('The session name is undefined')\n \n dbPath = os.path.abspath(os.path.expanduser(opt.database))\n m = PrawnTools.Manager(dbPath)\n m.connect()\n\n sessions = m.getListOfSessions(opt.sessionName,opt.sessionGroup)\n \n for s in sessions:\n m.removeSession(s.name)\n m.disconnect()\n \n \nif __name__ == \"__main__\":\n main()","sub_path":"src/pwn_RemoveSession.py","file_name":"pwn_RemoveSession.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"341076332","text":"from PIL import Image\ndef sliceImage(im):\n width,height = im.size\n bound = 175#decide if this pixel is black or white\n xStartFlag,xEndFlag,yStartFlag,yEndFlag = False,False,False,False\n xStart,xEnd,yStart,yEnd = 0,0,0,0\n result = []\n\n for i in range(width):\n if(xStartFlag==False):\n #find the start x\n for j in range(height):\n if(im.getpixel((i,j))=2 and yEnd-yStart>=5):\n result.append((xStart,yStart,xEnd,yEnd))\n return result\n","sub_path":"lib/sliceImage.py","file_name":"sliceImage.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"454438082","text":"__author__ = 'andrei'\n\nfrom flask import Flask, jsonify\nfrom SQLengine import andrei_dict\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef hello():\n return \"Hello World!\"\n\n\n@app.route(\"/user/\")\ndef show_user_profile(username):\n if username == 'chiffa' or username == 'andrei':\n return jsonify(andrei_dict)\n else:\n return jsonify({'Error':'No Such User!'})\n\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"source/MainServer.py","file_name":"MainServer.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"356566544","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport wx\r\nimport os\r\nfrom wrapper import Cipher\r\nfrom crypt_bat_bk import AES_Batch\r\n\r\nclass AES_GUI(wx.Frame):\r\n \r\n def __init__(self, parent, title):\r\n super(AES_GUI, self).__init__(parent, title=title,\r\n size=(480, 200))\r\n self.cipher = AES_Batch()\r\n self.InitUI()\r\n self.Centre()\r\n self.Show() \r\n \r\n def InitUI(self):\r\n \r\n panel = wx.Panel(self)\r\n vbox = wx.BoxSizer(wx.VERTICAL)\r\n\r\n hbox1 = wx.BoxSizer(wx.HORIZONTAL)\r\n input_lb = wx.StaticText(panel, label='Batch from:')\r\n hbox1.Add(input_lb, flag=wx.ALIGN_RIGHT|wx.RIGHT, border=10)\r\n self.input = wx.TextCtrl(panel)\r\n hbox1.Add(self.input, proportion=1)\r\n load = wx.Button(panel, label='Load')\r\n load.Bind(wx.EVT_BUTTON, self.onOpenFile)\r\n hbox1.Add(load, flag=wx.ALIGN_RIGHT|wx.LEFT, border=10)\r\n vbox.Add(hbox1, flag=wx.EXPAND|wx.ALL, border=20)\r\n\r\n hbox2 = wx.BoxSizer(wx.HORIZONTAL)\r\n output_lb = wx.StaticText(panel, label='Batch to:')\r\n hbox2.Add(output_lb, flag=wx.ALIGN_RIGHT|wx.RIGHT, border=10)\r\n self.output = wx.TextCtrl(panel)\r\n hbox2.Add(self.output, proportion=1)\r\n load2 = wx.Button(panel, label='Load')\r\n load2.Bind(wx.EVT_BUTTON, self.onOpenFile2)\r\n hbox2.Add(load2, flag=wx.ALIGN_RIGHT|wx.LEFT, border=10)\r\n vbox.Add(hbox2, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, border=20)\r\n\r\n hbox3 = wx.BoxSizer(wx.HORIZONTAL)\r\n process = wx.Button(panel, label='Process')\r\n process.Bind(wx.EVT_BUTTON, self.onProcess)\r\n hbox3.Add(process, flag=wx.ALIGN_CENTER|wx.EXPAND, proportion=1)\r\n vbox.Add(hbox3, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, border=20)\r\n\r\n panel.SetSizer(vbox)\r\n\r\n def onProcess(self, event):\r\n msg = self.cipher.parseBatch(self.input.GetValue(), self.output.GetValue())\r\n if not msg == '':\r\n wx.MessageBox(msg, 'Error', wx.OK | wx.ICON_ERROR)\r\n else:\r\n wx.MessageBox('Completed!', 'Completed', wx.OK | wx.ICON_INFORMATION)\r\n def onOpenFile(self, event):\r\n \"\"\"\r\n Create and show the Open FileDialog\r\n \"\"\"\r\n\r\n wildcard = \"All files (*.csv)|*.csv\"\r\n dlg = wx.FileDialog(\r\n self, message=\"Choose a file\",\r\n defaultDir= os.getcwd(),\r\n defaultFile=\"\",\r\n wildcard=wildcard,\r\n style=wx.OPEN | wx.CHANGE_DIR\r\n )\r\n if dlg.ShowModal() == wx.ID_OK:\r\n self.input.SetValue(dlg.GetPath())\r\n dlg.Destroy()\r\n\r\n def onOpenFile2(self, event):\r\n \"\"\"\r\n Create and show the Open FileDialog\r\n \"\"\"\r\n wildcard = \"All files (*.csv)|*.csv\"\r\n dlg = wx.FileDialog(\r\n self, message=\"Choose a file\",\r\n defaultDir= os.getcwd(),\r\n defaultFile=\"\",\r\n wildcard=wildcard,\r\n style=wx.OPEN | wx.CHANGE_DIR\r\n )\r\n if dlg.ShowModal() == wx.ID_OK:\r\n self.output.SetValue(dlg.GetPath())\r\n dlg.Destroy()\r\n\r\n\r\nif __name__ == '__main__':\r\n app = wx.App()\r\n AES_GUI(None, title='AES Crypto Tool (Batch)')\r\n app.MainLoop()","sub_path":"crypt-bat-GUI.py","file_name":"crypt-bat-GUI.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"}
+{"seq_id":"153193412","text":"\t#!/usr/bin/env python\r\n# -*- coding: utf8 -*-\r\n#\r\n# Copyright 2007 Google Inc.\r\n#\r\n# Licensed under the Apache License, Version 2.0(the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n#\t http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n\r\nimport webapp2\r\nimport os\r\nimport re\r\nimport jinja2\r\nimport random\r\nimport string\r\nimport hmac\r\nimport logging\r\nimport datetime\r\nfrom google.appengine.ext import db\r\nfrom google.appengine.api import memcache\r\n\r\nfrom pybcrypt import bcrypt\r\nimport pytils\r\nimport gmemsess\r\n\r\ntemplate_dir = os.path.join(os.path.dirname(__file__), 'templates')\r\njinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir))#,\r\n\t\t\t\t\t\t\t #autoescape = True)\r\nSESSION_EXPIRES = 3 #сколько дней храним сессию\r\n\r\npermits = {#!!!!!!!!!!необходимо убрать хардкод и хранить разрешения в бд\r\n\t\t\t'admin'\t\t: {'blog_post':True, 'comment_post':True,'power_control':True},\r\n\t\t\t'blogger'\t: {'blog_post':True, 'comment_post':True,'power_control':False},\r\n\t\t\t'member'\t: {'blog_post':False, 'comment_post':True,'power_control':False},\r\n\t\t\t'guest'\t\t: {'blog_post':False, 'comment_post':False,'power_control':False}\r\n\t\r\n}\r\n\r\n\r\napp_path = {'main'\t: '/',\r\n\t\t\t'login'\t: '/login',\r\n\t\t\t'signup': '/signup',\r\n\t\t\t'logout': '/logout',\r\n\t\t\t'blog'\t: '/blog',\r\n\t\t\t'profile': '/profile',\r\n\t\t\t'comment': '/comment',\r\n\t\t\t'ajax'\t: '/ajx'\r\n\t\t\t}\r\nsecret = '_Long_123_Secret_456_String_789_' #следует сохранить отдельно\r\n\r\n##########################################################################\r\n#Вспомогательные функции #\r\n##########################################################################\r\n\r\ndef make_hash(*args): # создание хеша из полученных аргументов\r\n\tline_for_hashing = \"\"\r\n\tfor arg in args:\r\n\t\tline_for_hashing += str(arg)\r\n\treturn bcrypt.hashpw(line_for_hashing, bcrypt.gensalt())\r\n\r\ndef valid_hash(h, *args): # проверка хеша\r\n\tline_for_hashing = \"\"\r\n\tfor arg in args:\r\n\t\tline_for_hashing += str(arg)\r\n\tif bcrypt.hashpw(line_for_hashing, h) == h:\r\n\t\t\treturn True\r\n\r\ndef make_secure_val(val): #простое хеширование параметра(на выходе параметр|хеш)\r\n\treturn '%s|%s' %(val, hmac.new(secret, val).hexdigest())\r\n\r\ndef check_secure_val(secure_val): #проверка соответствия параметр-хеш\r\n\tval = secure_val.split('|')[0]\r\n\tif secure_val == make_secure_val(val):\r\n\t\treturn val\r\n\r\ndef render_str(template, **params): #подготовка шаблона\r\n\tt = jinja_env.get_template(template)\r\n\treturn t.render(params)\r\n\r\n\r\ndef clone_entity(e, **extra_args): #клон сущности\r\n\tklass = e.__class__ #получаем класс сущности который копируем\r\n\tprops = dict((k, v.__get__(e, klass)) for k, v in klass.properties().iteritems()) #копируем значения свойств из старой сущности в словарь\r\n\tprops.update(extra_args) #обновляем созданный словарь значениями из аргументов функции\r\n\treturn klass(**props) # создаем новую сущность и возвращаем её\r\n\r\nUSER_RE = re.compile(r\"^[\\w-]{3,20}$\")\r\ndef valid_username(username):\r\n\treturn username and USER_RE.match(username)\r\n\r\nPASS_RE = re.compile(r\"^.{3,20}$\")\r\ndef valid_password(password):\r\n\treturn password and PASS_RE.match(password)\r\n\r\nEMAIL_RE = re.compile(r'^[\\S]+@[\\S]+\\.[\\S]+$')\r\ndef valid_email(email):\r\n\treturn email and EMAIL_RE.match(email)\r\n\r\n#########################################################\r\n\r\nclass Nestedobject (object):\r\n\r\n\tdef __init__ (self, m, r, nest_level = 0):\r\n\t\tself.msg = m\r\n\t\tself.replies = r\r\n\t\tself.nest_level = nest_level # иерархический уровень комментария (нужно для определения какого уровня не делаем отступ в html-шаблоне)\r\n\t\t\t\r\ndef nest (flow, root_rep_id_list, deep = 0): #рекурсивное создание древовидной структуры из плоского списка предков и потомков\r\n\tmsglist = []\r\n\tnested_comments = []\r\n\tdeep += 1 #глубина рекурсии = иерархический уровень комментария\r\n\r\n\tfor rep_id in root_rep_id_list: #с помощью полученного списка ключей корневых ответов ветки составляем список объектов-ответов выбирая из плоского списка\r\n\t\tif rep_id in flow:\r\n\t\t\tmsglist.append(flow[rep_id])\t\t\r\n\r\n\tfor msg in msglist: # добавляем к массиву-результату сообщен��я. если у них есть ответы (replies), то вызываем рекурсивно функцию, со списком ключей ответов. если нет ответов то присваеваем значение None\r\n\t\tnested_comments.append(Nestedobject (msg, nest(flow, msg.replies, deep) if msg.replies else None, deep))\r\n\t\tlogging.error(msg.replies)\t\t\r\n\treturn nested_comments\r\n\r\n\r\n#########################################################\r\n\r\n##########################################################################\r\n#Модель пользователя\r\n##########################################################################\r\n\r\ndef users_key(group = 'default'): #задает путь к сущности(для разделения по группам)\r\n\treturn db.Key.from_path('users', group)\r\n\r\n\r\nclass Group (db.Model):\r\n\tname = db.StringProperty(required = True)\r\n\r\n\t@classmethod\r\n\tdef by_name(cls, name): # возвращает объект содержащий сущность из datastore с указанным именем\r\n\t\tu = Group.all().filter('name =', name).get()\r\n\t\treturn u\r\n\r\n\r\nclass User(db.Model):\r\n\t\"\"\"Класс для модели пользователя для сохранения и получения \r\n\t\t\t\t\tданных из datastore(и ни для чего другого)\"\"\"\r\n\r\n\tname = db.StringProperty(required = True)\r\n\tpw_hash = db.StringProperty(required = True)\r\n\tregister = db.DateTimeProperty(auto_now_add = True)\r\n\temail = db.StringProperty()\r\n\tpower = db.StringProperty()\r\n\tcomments = db.IntegerProperty()\r\n\tposts = db.IntegerProperty()\r\n\t\r\n\t@classmethod\r\n\tdef by_id(cls, uid): # возвращает объект содержащий сущность из datastore с указанным id\r\n\t\treturn User.get_by_id(uid, parent = users_key()) \r\n\r\n\t@classmethod\r\n\tdef by_name(cls, name): # возвращает объект содержащий сущность из datastore с указанным именем\r\n\t\tu = User.all().filter('name =', name).get()\r\n\t\treturn u\r\n\r\n\t@classmethod\r\n\tdef register(cls, name, pw, email = None):# создает объект-модель для записи в datastore\r\n\t\tpw_hash = make_hash(name, pw)\r\n\t\treturn User(parent = users_key(),\r\n\t\t\t\t\t name = name,\r\n\t\t\t\t\t pw_hash = pw_hash,\r\n\t\t\t\t\t email = email, \r\n\t\t\t\t\t power = 'member')\r\n\r\n\t@classmethod\r\n\tdef check_user(cls, name, pw): #проверка: 1) пользователь существует 2) пароль совпадает\r\n\t\tu = cls.by_name(name)\r\n\t\tif u and valid_hash(u.pw_hash, name, pw):\r\n\t\t\treturn u\r\n\r\n\t\r\n\tdef set_power (self, power):\r\n\t\t\r\n\t\tself.power = power\r\n\t\tif self.put(): return True\r\n\t\telse: return False\r\n\r\n\tdef check_power (self, action):\r\n\t\tif self.power == None: return False\r\n\t\treturn permits[self.power][action]#!!!!!!!!!!необходимо убрать хардкод и хранить разрешения в бд\r\n\r\n\r\n\t\t\t\r\n##########################################################################\r\n#Модель поста\r\n##########################################################################\r\n\r\nclass BlogEntry (db.Model):\r\n\r\n\tdef make_rudate(self, date_set = None):\t\t\t\r\n\r\n\t\tif not date_set: return pytils.dt.ru_strftime(u\"%d %b %Y %H\"+u\":\"+u\"%M\",inflected=True, date=self.created)\r\n\r\n\t\tday, month, year, hm = re.split(' ', pytils.dt.ru_strftime(u\"%d %b %Y %H\"+u\":\"+u\"%M\",inflected=True, date=self.created))\t\t\r\n\t\tif date_set == 'day' or 'month' or 'year': return vars()[date_set]\r\n\t\telse: return 'Date Error'\t\t\r\n\r\n\tdef make_rucomment(self, com_number):\r\n\t\treturn pytils.numeral.get_plural(0, u\"Комментарий, Комментария, Комментариев\", absence=u\"Комментариев пока нет\")\r\n\r\nclass Post (BlogEntry):\r\n\ttitle = db.StringProperty(required = True)\r\n\ttext = db.TextProperty(required = True)\r\n\tcomments = db.IntegerProperty(default = 0)\r\n\tauthor = db.StringProperty()\r\n\tcreated = db.DateTimeProperty(auto_now_add = True)\r\n\tedited = db.DateTimeProperty(auto_now_add = True)\r\n\treplies = db.ListProperty(int)\r\n\r\nclass Comment (BlogEntry):\r\n\ttext = db.TextProperty(required = True)\r\n\tauthor = db.StringProperty()\r\n\tcreated = db.DateTimeProperty(auto_now_add = True)\r\n\tedited = db.DateTimeProperty(auto_now_add = True)\r\n\treplies = db.ListProperty(int)\r\n##########################################################################\r\n#Модели сраниц\r\n##########################################################################\r\nclass MainHandler(webapp2.RequestHandler):\r\n\t\"\"\"Базовый класс для обработчиков запросов браузера\r\n\t\twrite() - отправляет аргументы на вывод браузеру\r\n\t\trender_str() - перегрузка технической функции(добавление параметра \"имя пользователя\")\r\n\t\trender() - отправляет шаблон на вывод браузера(предварительно вызывает рендер шаблона render_str)\r\n\t\"\"\"\r\n\t\r\n\tdef write(self, *a, **kw): #вывод текста на экран\r\n\t\tself.response.out.write(*a, **kw)\r\n\r\n\tdef render_str(self, template, **params): # добавление различных параметров в рендер шаблона \r\n\t\tparams['user'] = self.user\r\n\t\tparams.update(app_path)\t\t\r\n\t\treturn render_str(template, **params) # вызов технической функции с новым параметром\r\n\r\n\tdef render(self, template, **kw): # вывод шаблона на экран\r\n\t\tself.write(self.render_str(template, **kw))\r\n\r\n\tdef set_cookie(self, name, val, expires): # установка куки для сессии\t\t\t\r\n\t\texpires = (datetime.datetime.now() + datetime.timedelta(days=expires)).strftime('%a, %d %b %Y %H:%M:%S GMT')#Пока нет записи в датастор, а только в мемкэш больше трех дней не стоит делать.\r\n\t\tself.response.headers.add_header(\r\n\t\t\t'Set-Cookie',\r\n\t\t\t'%s=%s; expires=%s; Path=/' %(name, val, expires))\r\n\r\n\tdef read_secure_cookie(self, name): #чтение сессионной куки\r\n\t\tcookie_val = self.request.cookies.get(name)\t\t\r\n\t\treturn cookie_val and check_secure_val(cookie_val)\r\n\r\n\tdef check_session(self):\t\t\r\n\t\tif self.session.is_new():\t\t\t\r\n\t\t\treturn None\r\n\t\tcookie_val = self.request.cookies.get('ssid') #хеш из uid и ip\r\n\t\tif valid_hash(cookie_val, self.session['uid'], self.request.remote_addr):\t\t\t\r\n\t\t\treturn self.session['uid']\r\n\t\telse: \r\n\t\t\treturn None\r\n\t\r\n\tdef login(self, user): #логин пользователя (установка сессионной куки)\r\n\r\n\t\tssid = make_hash(user.key().id(), self.request.remote_addr)\r\n\t\tself.set_cookie('ssid', ssid, expires=SESSION_EXPIRES)\t\t\r\n\t\tself.session['uid'] = user.key().id()\r\n\r\n\t\tself.session['ssid'] = ssid\r\n\t\tself.session.save()\r\n\r\n\tdef logout(self): #логаут\r\n\t\t# self.response.headers.add_header('Set-Cookie', 'uid=; Path=/')\r\n\t\tself.session.invalidate()\r\n\r\n\tdef initialize(self, *a, **kw):\r\n\r\n\t\twebapp2.RequestHandler.initialize(self, *a, **kw)\r\n\t\t#uid = self.read_secure_cookie('user_id')\t\t\r\n\t\tself.session = gmemsess.Session(self)\r\n\t\tuid = self.check_session()\t\t\r\n\t\tself.user = uid and User.by_id(int(uid))\r\n\t\tif self.user is not None: #если пользователь существует сохраняем в объект его uid из датастора\r\n\t\t\tself.user.uid = int(uid)\r\n\t\t\t\r\n\t\r\nclass Blog(MainHandler):\t\r\n\tdef get(self, owner = \"Spinningmill\", page = 1):\t\t\t\t\r\n\t\ttext_flow = Post.all().ancestor(users_key(owner)).order('-created').fetch(10)\t\t\r\n\t\tif text_flow: \r\n\t\t\tfor msg in text_flow:\r\n\t\t\t\tif len(msg.text) > 1000:\r\n\t\t\t\t\tmsg.text = msg.text[0:1000] + \"...\"\r\n\t\t\tself.render(\"blog.html\", text_flow = text_flow, owner = owner)\r\n\t\telse: \r\n\t\t\ttext_flow = {'error':u'Пусто'}\r\n\t\t\tself.render(\"blog.html\", text_flow = text_flow)\r\n\r\n\tdef post(self, owner = \"Spinningmill\", page = 1):\r\n\t\ttitle = self.request.get(\"subject\")\r\n\t\ttext = self.request.get(\"content\")\r\n\t\tif self.user and self.user.check_power('blog_post'): #может ли юзер постить\r\n\t\t\tif title and text:\r\n\t\t\t\ta = Post(parent = users_key(self.user.name), title = title, text = text, author = self.user.name)\r\n\t\t\t\ta.put()\r\n\t\t\t\tmsg_id = str (a.key().id())\t\t\t\t\r\n\t\t\t\tself.redirect(app_path['main'])\r\n\t\t\t\t\r\n\t\t\telse:\r\n\t\t\t\terror = \"We need some text and it's title. Both.\"\r\n\t\t\t\tself.render_front(title = title, text = text, error = error)\r\n\t\telse:\r\n\t\t\tself.redirect(app_path['login'])\r\n\r\nclass PostHandler (MainHandler):\r\n\r\n\tdef make_path(self, post_id, id_string):\r\n\t\tpath = ['Post', int(post_id)] #путь всегда начинается с поста к которому коментарии\r\n\t\tif id_string:#если строка с id пустая, значит родителем будет пост, если не пустая, то добавляем всех по очереди к пути\r\n\t\t\tid_list = re.split(',', id_string)\t\t\r\n\t\t\tfor comm_id in id_list:\r\n\t\t\t\tpath +=['Comment', int(comm_id)]\r\n\t\treturn path\r\n\r\n\tdef add_reply(self, owner, post_id, id_string = None, edit_mode = False):\r\n\t\ttext = self.request.get(\"content\")\t\t\r\n\t\tif text:\r\n\t\t\tparent = users_key(owner)\r\n\t\t\tp = Post.get_by_id(int(post_id), parent = parent)\r\n\r\n\t\t\tif id_string: # если передан id комментария (строка с ид предков и самого коммента), то операция с комментарием\r\n\t\t\t\tlogging.error(\"entry - comment\")\r\n\t\t\t\tentry_path = self.make_path(post_id, id_string)#собираем путь до сущности из id переданных из браузера\r\n\t\t\t\tentry_key = db.Key.from_path(*entry_path, parent = parent)\t#создаем из пути ключ\r\n\t\t\t\tentry = db.Model.get(parent_key) #получаем сущность из датастора\t\t\t\t\r\n\t\t\telse: # если нет строки с id то значит операция с постом\r\n\t\t\t\tlogging.error(\"entry - post\")\r\n\t\t\t\tentry = p\t\t\r\n\r\n\t\t\tif edit_mode: #редактируем сущность\r\n\r\n\t\t\t\tentry.text = new_text\r\n\t\t\t\tentry.edited = datetime.datetime.now()\r\n\t\t\t\tentry.put()\r\n\t\t\t\treturn entry\r\n\r\n\t\t\telse:#добавление комментария, entry - родитель нового комментария\r\n\t\t\t\tc = Comment (parent = entry, text = text, author = self.user.name)# сохраняем комментарий\r\n\t\t\t\tc.put()\t\t\t\r\n\t\t\t\tentry.replies.append(c.key().id())#добавляем id к списку id коментариев-потомков(ответов) родителя\r\n\t\t\t\tp.comments +=1\t\t#увеличиваем счетчик комментариев в посте\r\n\t\t\t\t#!!!сделать проверку успешной записи комментария и если ок, то увеличить счетчик комментариев.\r\n\t\t\t\t\r\n\t\t\t\tp.put()\t\t\t\r\n\t\t\t\tif p != entry: entry.put() #если родитель не пост, то тоже его сохраняем\r\n\t\t\t\treturn c\r\n\t\telse:\r\n\t\t\tself.redirect(app_path['main'])#!!!!!!обработка ошибки пустого текста\r\n\r\n\tdef get (self, owner, post_id, com_id): #выводим пост с комментариями\r\n\t\tp=Post.get_by_id(int(post_id), parent = users_key(owner))\t\t\r\n\t\tif p:\r\n\t\t\tcom_flow = Comment.all().ancestor(p)\r\n\r\n\t\t\tcom_index = {}\r\n\t\t\troot_com_list = []\r\n\t\t\tfor com in com_flow:\r\n\t\t\t\tcom_index[com.key().id()] = com #создаем хеш ключ:объект (индекс по ид)\t\t\t\t\t\r\n\r\n\t\t\tnested_comments = nest (com_index, p.replies)\t\t\t\r\n\t\t\tself.render(\"post.html\", msg = p, com_flow = nested_comments, owner = owner)\r\n\t\t\r\n\r\n\r\n\tdef post (self, owner, post_id, comment_id): #добавляем комментарий\r\n\t\tif self.user and self.user.check_power('comment_post'):\t\t\t\r\n\t\t\tself.add_reply (post_id, comment_id, owner)\t\t\t\t\t\r\n\t\t\tself.redirect('/'+owner+app_path['blog']+'/'+post_id)\r\n\t\telse:\r\n\t\t\tself.redirect(app_path['login'])\r\n\r\nclass Signup(MainHandler):\r\n\t\"\"\"Модель для страницы регистрации\"\"\"\r\n\tdef get(self):\r\n\t\tself.render(\"signup.html\")\r\n\r\n\tdef post(self):\r\n\t\thave_error = False\r\n\t\tself.username = self.request.get('username')\r\n\t\tself.password = self.request.get('password')\r\n\t\tself.verify = self.request.get('verify')\r\n\t\tself.email = self.request.get('email')\r\n\r\n\t\tparams = dict(username = self.username,\r\n\t\t\t\t\t email = self.email) #cохраняем параметры для передачи обратно в форму в случае ошибки\r\n\r\n\t\tif not valid_username(self.username):\r\n\t\t\tparams['error_username'] = True\r\n\t\t\thave_error = True\r\n\r\n\t\tif not valid_password(self.password):\r\n\t\t\tparams['error_password'] = True\r\n\t\t\thave_error = True\r\n\r\n\t\telif self.password != self.verify:\r\n\t\t\tparams['error_verify'] = True\r\n\t\t\thave_error = True\r\n\r\n\t\tif not valid_email(self.email):\r\n\t\t\tparams['error_email'] = True\t\t\t\r\n\t\t\thave_error = True\r\n\r\n\t\tif have_error:\r\n\t\t\tself.render('signup.html', **params)\r\n\t\telse:\r\n\t\t\tself.done()\r\n\r\n\tdef done(self):\r\n\t\t#проверяем что такой пользователь не существует\r\n\t\tu = User.by_name(self.username)\r\n\t\tif u:\r\n\t\t\tmsg = u\"Пользователь с таким именем уже есть.\"\r\n\t\t\tself.render('signup.html', error_username = msg)\r\n\t\telse:\t\t\t\r\n\t\t\tu = User.register(self.username, self.password, self.email)\r\n\t\t\tu.put()\r\n\r\n\t\t\tself.login(u)\r\n\t\t\tself.redirect(app_path['main'])\r\n\r\nclass Login(MainHandler):\r\n\t\"\"\"Модель для страницы входа\"\"\"\r\n\tdef get(self):\r\n\t\tself.render('login.html')\r\n\r\n\tdef post(self):\r\n\t\tusername = self.request.get('username')\r\n\t\tpassword = self.request.get('password')\r\n\r\n\t\tu = User.check_user(username, password)\r\n\t\tif u:\r\n\t\t\tself.login(u)\r\n\t\t\tself.redirect(app_path['main'])\r\n\t\telse:\r\n\t\t\terror = u\"Имя пользователя или пароль введены не верно.\"\r\n\t\t\tself.render ('login.html', error = error)\r\n\r\n\r\nclass Logout(MainHandler):\r\n\t\"\"\"Модель для страницы выхода\"\"\"\r\n\tdef get(self):\r\n\r\n\t\tself.logout()\r\n\t\tself.redirect(app_path['main'])\r\n\r\n\r\nclass AjaxHandler(PostHandler):\r\n\r\n\tdef post(self, case, owner, post_id):\r\n\r\n\t\tif self.user:\r\n\t\t\tif case == 'addreply':\t\t\t\r\n\t\t\t\ttribe_id = self.request.get('ancestors') #список id предков\t\t\t\r\n\t\t\t\tif self.user.check_power('comment_post'):\r\n\t\t\t\t\ttext = self.request.get(\"content\")\t\t\t\t\r\n\t\t\t\t\tc = self.add_reply (owner, post_id, tribe_id)\r\n\t\t\t\t\tself.render('reply.html', com = c, nest_level = len(re.split(',',tribe_id)))\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.render('reply.html', com = \"Error\", nest_level = len(re.split(',',tribe_id)))\r\n\t\t\t\t#self.write('Hello from server! We get and save: '+text)\r\n\t\t\telse:\r\n\t\t\t\t\tself.render('reply.html', com = \"Error\", nest_level = len(re.split(',',tribe_id)))\r\n\r\n\r\nclass Profile(MainHandler):\r\n\r\n\tdef get(self):\r\n\t\t#вывод странички с данными пользователя\r\n\t\tpass\r\n\r\nclass Maintance (MainHandler):\r\n\r\n\tdef get(self):\r\n\t\t\r\n\t\tif self.user and self.user.power == 'admin':\r\n\t\t\tusername = self.request.get('username')\r\n\t\t\tpower = self.request.get('power')\r\n\t\t\tif power and username:\r\n\t\t\t\tuser = User.by_name(username)\r\n\t\t\t\tuser.set_power(power)\t\t\t\r\n\t\t\t\toutput = u\"