diff --git "a/1383.jsonl" "b/1383.jsonl"
new file mode 100644--- /dev/null
+++ "b/1383.jsonl"
@@ -0,0 +1,1125 @@
+{"seq_id":"37077142134","text":"from ANPR import ANPR\nimport cv2 as cv\nimport argparse\nimport os\nfrom imutils import paths\nimport imutils\ndir_path = 'D:/Car_Parking/Images/self_cap_Data/test_Data/data/images/train'\n\n\ndef cleanup_text(text):\n # strip out non ASCII text so we can draw the text on the image\n # using openCV\n return \"\".join([c if ord(c) < 128 else \"\" for c in text]).strip()\n\n\ndef get_license_plate_text():\n # construct the argument parser and parse the arguments\n ap = argparse.ArgumentParser()\n\n # ap.add_argument(\"-i\", \"--input\", required=True,\n # \thelp=\"path to input directory of images\")\n ap.add_argument(\"-c\", \"--clear-border\", type=int, default=True,\n help=\"whether or to clear border pixels before OCR'ing\")\n ap.add_argument(\"-p\", \"--psm\", type=int, default=7,\n help=\"default PSM mode for OCR'ing license plates\")\n ap.add_argument(\"-d\", \"--debug\", type=int, default=-1,\n help=\"whether or not to show additional visualizations\")\n args = vars(ap.parse_args())\n # initialize our ANPR class\n anpr = ANPR(debug=args[\"debug\"] > 0)\n # grab all image paths in the input directory\n # imagePaths = sorted(list(paths.list_images(args[\"input\"])))\n # loop over all image paths in the input directory\n for imagePath in os.listdir(dir_path):\n # load the input image from disk and resize it\n print(os.path.join(dir_path, imagePath))\n image = cv.imread(os.path.join(dir_path, imagePath))\n image = imutils.resize(image, width=600)\n # apply automatic license plate recognition\n (lpText, lpCnt) = anpr.find_and_ocr(image, psm=args[\"psm\"],\n clearBorder=args[\"clear_border\"] > 0)\n # only continue if the license plate was successfully OCR'd\n if lpText is not None and lpCnt is not None:\n # fit a rotated bounding box to the license plate contour and\n # draw the bounding box on the license plate\n box = cv.boxPoints(cv.minAreaRect(lpCnt))\n box = box.astype(\"int\")\n cv.drawContours(image, [box], -1, (0, 255, 0), 2)\n # compute a normal (unrotated) bounding box for the license\n # plate and then draw the OCR'd license plate text on the\n # image\n (x, y, w, h) = cv.boundingRect(lpCnt)\n cv.putText(image, cleanup_text(lpText), (x, y-15),\n cv.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), 2)\n # show the output ANPR image\n print(\"[INFO] {}\".format(lpText))\n cv.imshow(\"Output ANPR\", image)\n return lpText\n # cv.waitKey(0)\n else:\n print(\"License Plate not found\")\n","repo_name":"TranDuyNghia1402/Car-Parking","sub_path":"storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"31941797626","text":"import sys\nimport os\nimport glob\nimport subprocess\nimport shutil\nimport argparse\nimport tempfile\n\nfrom typing import Optional, NamedTuple\n\nimport logging\nimport re\n\n\"\"\"\nCPA-witness2test module for validating witness files by using a generate-and-validate approach.\nCreates a test harness based on the violation witness given for an input file,\ncompiles the file with the created harness and checks whether the created program\nreaches the target location specified by the violation witness.\n\nCurrently, reachability, overflow and memory safety properties are supported.\n\"\"\"\n\n__version__ = \"0.1\"\n\n\nCOMPILE_ARGS_FIXED = [\"-D__alias__(x)=\"]\n\"\"\"List of compiler arguments that are always passed to the compiler.\"\"\"\n\n# Strings used to match expected error messages\nEXPECTED_ERRMSG_REACH = \"CPAchecker test harness: property violation reached\"\nEXPECTED_ERRMSG_OVERFLOW = \"runtime error:\"\nEXPECTED_ERRMSG_MEM_FREE = \"ERROR: AddressSanitizer: attempting free\"\nEXPECTED_ERRMSG_MEM_DEREF = \"ERROR: AddressSanitizer:\"\nEXPECTED_ERRMSG_MEM_MEMTRACK = \"ERROR: AddressSanitizer:\"\n\n# Used machine models\nMACHINE_MODEL_32 = \"32bit\"\nMACHINE_MODEL_64 = \"64bit\"\n\n# Possible results of CPAchecker for C harness generation\nRESULT_ACCEPT = \"FALSE\"\nRESULT_REJECT = \"TRUE\"\nRESULT_UNK = \"UNKNOWN\"\n\n# Regular expressions used to match given specification properties\nREGEX_REACH = re.compile(r\"G\\s*!\\s*call\\(\\s*([a-zA-Z0-9_]+)\\s*\\(\\)\\s*\\)\")\nREGEX_OVERFLOW = re.compile(r\"G\\s*!\\s*overflow\")\n_REGEX_MEM_TEMPLATE = r\"G\\s*valid-%s\"\nREGEX_MEM_FREE = re.compile(_REGEX_MEM_TEMPLATE % \"free\")\nREGEX_MEM_DEREF = re.compile(_REGEX_MEM_TEMPLATE % \"deref\")\nREGEX_MEM_MEMTRACK = re.compile(_REGEX_MEM_TEMPLATE % \"memtrack\")\n\nSPEC_REACH = \"unreach-call\"\nSPEC_OVERFLOW = \"no-overflow\"\nSPEC_MEM_FREE = \"valid-free\"\nSPEC_MEM_DEREF = \"valid-deref\"\nSPEC_MEM_MEMTRACK = \"valid-memtrack\"\n\n\nclass ValidationResult(NamedTuple):\n verdict: str\n violated_property: Optional[str] = None\n successful_harness: Optional[str] = None\n\n\nclass Specification(NamedTuple):\n no_overflow: bool\n mem_free: bool\n mem_deref: bool\n mem_memtrack: bool\n reach_method_call: Optional[str]\n\n def is_reach_call(self):\n return self.reach_method_call is not None\n\n @property\n def mem(self):\n return any((self.mem_free, self.mem_deref, self.mem_memtrack))\n\n def invalid(self):\n return not (\n self.no_overflow\n or self.mem_free\n or self.mem_deref\n or self.mem_memtrack\n or self.reach_method_call\n )\n\n\nclass ValidationError(Exception):\n \"\"\"Exception representing a validation error.\"\"\"\n\n def __init__(self, msg):\n self._msg = msg\n\n @property\n def msg(self):\n return self._msg\n\n\ndef get_cpachecker_version():\n \"\"\"Return the CPAchecker version used.\"\"\"\n\n executable = get_cpachecker_executable()\n result = execute([executable, \"-help\"], quiet=True)\n for line in result.stdout.split(os.linesep):\n if line.startswith(\"CPAchecker\"):\n return line.replace(\"CPAchecker\", \"\").strip()\n return None\n\n\ndef create_parser():\n descr = \"Validate a given violation witness for an input file.\"\n if sys.version_info >= (3, 5):\n parser = argparse.ArgumentParser(\n description=descr, add_help=False, allow_abbrev=False\n )\n else:\n parser = argparse.ArgumentParser(description=descr, add_help=False)\n\n parser.add_argument(\"-help\", action=\"help\")\n\n parser.add_argument(\n \"-version\", action=\"version\", version=\"{}\".format(get_cpachecker_version())\n )\n\n machine_model_args = parser.add_mutually_exclusive_group(required=False)\n machine_model_args.add_argument(\n \"-32\",\n dest=\"machine_model\",\n action=\"store_const\",\n const=MACHINE_MODEL_32,\n help=\"use 32 bit machine model\",\n )\n machine_model_args.add_argument(\n \"-64\",\n dest=\"machine_model\",\n action=\"store_const\",\n const=MACHINE_MODEL_64,\n help=\"use 64 bit machine model\",\n )\n machine_model_args.set_defaults(machine_model=MACHINE_MODEL_32)\n\n parser.add_argument(\n \"-outputpath\",\n dest=\"output_path\",\n type=str,\n action=\"store\",\n default=\"output\",\n help=\"path where output should be stored\",\n )\n\n parser.add_argument(\"-stats\", action=\"store_true\", help=\"show statistics\")\n\n parser.add_argument(\n \"-gcc-args\",\n dest=\"compile_args\",\n type=str,\n action=\"store\",\n nargs=argparse.REMAINDER,\n default=[],\n help=\"list of arguments to use when compiling the counterexample test\",\n )\n\n parser.add_argument(\n \"-spec\",\n dest=\"specification_file\",\n type=str,\n action=\"store\",\n required=True,\n help=\"specification file\",\n )\n\n parser.add_argument(\n \"-witness\",\n dest=\"witness_file\",\n required=True,\n type=str,\n action=\"store\",\n help=\"witness file\",\n )\n\n parser.add_argument(\"file\", help=\"file to validate witness for\")\n\n return parser\n\n\ndef _determine_file_args(argv):\n parameter_prefix = \"-\"\n files = []\n logging.debug(\"Determining file args from %s\", argv)\n for fst, snd in zip(argv[:-1], argv[1:]):\n if not fst.startswith(parameter_prefix) and not snd.startswith(\n parameter_prefix\n ):\n files.append(snd)\n logging.debug(\"Determined file args: %s\", files)\n return files\n\n\ndef _parse_args(argv):\n parser = create_parser()\n args, remainder = parser.parse_known_args(argv)\n args.file = _determine_file_args(argv)\n if not args.file:\n raise ValueError(\"The following argument is required: program file\")\n if len(args.file) > 1:\n raise ValueError(\n \"Too many values for argument: Only one program file supported\"\n )\n args.file = args.file[0]\n\n return args\n\n\ndef _create_compile_basic_args(args):\n compile_args = COMPILE_ARGS_FIXED + [x for x in args.compile_args if x is not None]\n if args.machine_model == MACHINE_MODEL_64:\n compile_args.append(\"-m64\")\n elif args.machine_model == MACHINE_MODEL_32:\n compile_args.append(\"-m32\")\n else:\n raise ValidationError(\"Neither 32 nor 64 bit machine model specified\")\n\n return compile_args\n\n\ndef _create_compiler_cmd_tail(harness, file, target):\n return [\"-o\", target, \"-include\", file, harness]\n\n\ndef create_compile_cmd(\n harness, program, target, args, specification, c_version=\"gnu11\"\n):\n \"\"\"Create the compile command.\n\n :param str harness: path to harness file\n :param str target: path to program under test\n :param args: arguments as parsed by argparse\n :param Specification specification: specification to compile for\n :param str c_version: C standard to use for compilation\n :return: list of command-line keywords that can be given to method `execute`\n \"\"\"\n\n if shutil.which(\"clang\"):\n compiler = \"clang\"\n else:\n compiler = \"gcc\"\n\n compile_cmd = [compiler] + _create_compile_basic_args(args)\n compile_cmd.append(\"-std={}\".format(c_version))\n\n sanitizer_in_use = False\n if specification.no_overflow:\n sanitizer_in_use = True\n compile_cmd += [\n \"-fsanitize=signed-integer-overflow\",\n \"-fsanitize=float-cast-overflow\",\n ]\n if specification.mem:\n sanitizer_in_use = True\n compile_cmd += [\"-fsanitize=address\", \"-fsanitize=leak\"]\n\n if sanitizer_in_use:\n # Do not continue execution after a sanitize error\n compile_cmd.append(\"-fno-sanitize-recover\")\n compile_cmd += _create_compiler_cmd_tail(harness, program, target)\n return compile_cmd\n\n\ndef _create_cpachecker_args(args, harness_output_dir):\n # It's important that we work with a copy of sys.argv here,\n # because we may modify cpachecker_args later,\n # but do not want to have sys.argv modified.\n # Slicing creates a copy in python, so no explicit copy necessary.\n cpachecker_args = sys.argv[1:]\n\n for compile_arg in [\"-gcc-args\"] + args.compile_args:\n if compile_arg in cpachecker_args:\n cpachecker_args.remove(compile_arg)\n\n cpachecker_args.append(\"-witness2test\")\n try:\n index_of_outputpath_param = cpachecker_args.index(\"-outputpath\")\n except ValueError:\n cpachecker_args += [\"-outputpath\", harness_output_dir]\n else:\n assert (index_of_outputpath_param + 1) < len(cpachecker_args), (\n \"Parameter -outputpath is missing an argument: \" + cpachecker_args\n )\n # Replace existing argument of outputpath with harness_output_dir,\n # so that the harness is written there for compilation.\n cpachecker_args[index_of_outputpath_param + 1] = harness_output_dir\n\n return cpachecker_args\n\n\ndef get_cpachecker_executable():\n \"\"\"Return the path to the CPAchecker executable 'cpa.sh'.\n If the executable is available in the systeme PATH, this executable is\n used. Otherwise, it is checked whether an executable 'cpa.sh' is\n available in the current directory './' or the './scripts' directory.\n\n :return str: the path to the executable.\n :raise ValidationError: if no CPAchecker executable found.\n \"\"\"\n executable_name = \"cpa.sh\"\n\n def is_exe(exe_path):\n return os.path.isfile(exe_path) and os.access(exe_path, os.X_OK)\n\n # Directories the CPAchecker executable may ly in.\n # It's important to put '.' and './scripts' last, because we\n # want to look at the \"real\" PATH directories first\n script_dir = os.path.dirname(os.path.realpath(__file__))\n path_candidates = os.environ[\"PATH\"].split(os.pathsep) + [\n script_dir,\n \".\",\n \".\" + os.sep + \"scripts\",\n ]\n for path in path_candidates:\n path = path.strip('\"')\n exe_file = os.path.join(path, executable_name)\n if is_exe(exe_file):\n return exe_file\n\n raise ValidationError(\"CPAchecker executable not found or not executable!\")\n\n\ndef create_harness_gen_cmd(args, harness_output_dir):\n cpa_executable = get_cpachecker_executable()\n harness_gen_args = _create_cpachecker_args(args, harness_output_dir)\n return [cpa_executable] + harness_gen_args\n\n\ndef find_harnesses(output_path):\n \"\"\"Returns a list of all harness files found in the given directory.\"\"\"\n return glob.glob(output_path + \"/*harness.c\")\n\n\ndef get_target_name(harness_name):\n \"\"\"Returns a name for the given harness file name.\"\"\"\n harness_number = re.search(r\"(\\d+)\\.harness\\.c\", harness_name).group(1)\n\n return \"test_cex\" + harness_number\n\n\ndef execute(command, quiet=False):\n \"\"\"Execute the given command.\n\n :param List[str] command: list of words that describe the command line.\n :param Bool quiet: whether to log the executed command line as INFO.\n :return subprocess.CompletedProcess: information about the execution.\n \"\"\"\n if not quiet:\n logging.info(\" \".join(command))\n return subprocess.run(\n command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True\n )\n\n\ndef analyze_result(test_result, harness, specification):\n \"\"\"Analyze the given test result and return its verdict.\n\n :param CompletedProcess test_result: result of test execution\n :param str harness: path to harness file\n :param Specification specification: specification to check result against\n :return: tuple of the verdict of the test execution and the violated property, if any.\n The verdict is one of RESULT_ACCEPT, RESULT_REJECT and RESULT_UNK.\n The violated property is one element of the given specification.\n \"\"\"\n results_and_violated_props = []\n\n def check(code, err_msg, spec_property):\n results_and_violated_props.append(\n _analyze_result_values(test_result, harness, code, err_msg, spec_property)\n )\n\n # For each specification property, check whether an error message\n # showing its violation was printed\n # TODO: Turn into dict() with loop to be more flexible and remove magic numbers\n if specification.reach_method_call:\n check(107, EXPECTED_ERRMSG_REACH, SPEC_REACH)\n if specification.no_overflow:\n check(1, EXPECTED_ERRMSG_OVERFLOW, SPEC_OVERFLOW)\n if specification.mem_free:\n check(1, EXPECTED_ERRMSG_MEM_FREE, SPEC_MEM_FREE)\n if specification.mem_deref:\n check(1, EXPECTED_ERRMSG_MEM_DEREF, SPEC_MEM_DEREF)\n if specification.mem_memtrack:\n check(1, EXPECTED_ERRMSG_MEM_MEMTRACK, SPEC_MEM_MEMTRACK)\n\n results = [r[0] for r in results_and_violated_props]\n if RESULT_ACCEPT in results:\n violated_prop = results_and_violated_props[results.index(RESULT_ACCEPT)][1]\n return RESULT_ACCEPT, violated_prop\n elif RESULT_UNK in results:\n return RESULT_UNK, None\n else:\n return RESULT_REJECT, None\n\n\ndef _analyze_result_values(\n test_result, harness, expected_returncode, expected_errmsg, spec_prop\n):\n if (\n test_result.returncode == expected_returncode\n and test_result.stderr\n and expected_errmsg in test_result.stderr\n ):\n logging.info(\n \"Harness %s reached expected property violation (%s).\", harness, spec_prop\n )\n return RESULT_ACCEPT, spec_prop\n elif test_result.returncode == 0:\n logging.info(\"Harness %s did not encounter _any_ error\", harness)\n return RESULT_REJECT, None\n else:\n logging.info(\"Run with harness %s was not successful\", harness)\n return RESULT_UNK, None\n\n\ndef _log_multiline(msg, level=logging.INFO):\n if type(msg) is list:\n msg_lines = msg\n else:\n msg_lines = msg.split(\"\\n\")\n for line in msg_lines:\n logging.log(level, line)\n\n\ndef get_spec(specification_file):\n \"\"\"Return the specification defined by the given specification file.\n\n :param str specification_file: specification file to read.\n :return Specification: specification described by file\n :raise ValidationError: if no specification file given, invalid or doesn't exist\n \"\"\"\n\n if not specification_file:\n raise ValidationError(\"No specification file given.\")\n if not os.path.isfile(specification_file):\n raise ValidationError(\n \"Specification file does not exist: %s\" % specification_file\n )\n\n with open(specification_file, \"r\") as inp:\n content = inp.read().strip()\n\n spec_matches = re.match(r\"CHECK\\(\\s*init\\(.*\\),\\s*LTL\\(\\s*(.+)\\s*\\)\", content)\n spec = None\n if spec_matches:\n no_overflow = REGEX_OVERFLOW.search(content)\n mem_free = REGEX_MEM_FREE.search(content)\n mem_deref = REGEX_MEM_DEREF.search(content)\n mem_memtrack = REGEX_MEM_MEMTRACK.search(content)\n try:\n method_call = REGEX_REACH.search(content).group(1)\n except AttributeError:\n # search returned None\n method_call = None\n\n spec = Specification(\n no_overflow=no_overflow,\n mem_free=mem_free,\n mem_deref=mem_deref,\n mem_memtrack=mem_memtrack,\n reach_method_call=method_call,\n )\n\n if spec is None or spec.invalid():\n raise ValidationError(\"No SV-COMP specification found in \" + specification_file)\n return spec\n\n\ndef _preprocess(program: str, spec: Specification, target: str):\n with open(program, \"r\") as inp:\n content = inp.read()\n\n # IMPORTANT: This assumes that any target function is not defined or defined on a single line of code\n if spec.reach_method_call:\n method_def_to_rename = spec.reach_method_call\n new_content = re.sub(\n method_def_to_rename + r\"(\\s*\\(.*\\) ){.*}\",\n method_def_to_rename + r\"\\1;\",\n content,\n )\n else:\n new_content = content\n\n with open(target, \"w\") as outp:\n outp.write(new_content)\n\n\ndef _execute_harnesses(\n created_harnesses, program_file, specification, output_dir, args\n):\n final_result = None\n violated_property = None\n successful_harness = None\n iter_count = 0 # Count how many harnesses were tested\n compile_success_count = 0 # Count how often compilation overall was successful\n c11_success_count = 0 # Count how often compilation with C11 standard was sucessful\n reject_count = 0\n for harness in created_harnesses:\n iter_count += 1\n logging.info(\"Looking at %s\", harness)\n exe_target = output_dir + os.sep + get_target_name(harness)\n compile_cmd = create_compile_cmd(\n harness, program_file, exe_target, args, specification\n )\n compile_result = execute(compile_cmd)\n\n _log_multiline(compile_result.stderr, level=logging.INFO)\n _log_multiline(compile_result.stdout, level=logging.DEBUG)\n\n if compile_result.returncode != 0:\n compile_cmd = create_compile_cmd(\n harness, program_file, exe_target, args, specification, \"gnu90\"\n )\n compile_result = execute(compile_cmd)\n _log_multiline(compile_result.stderr, level=logging.INFO)\n _log_multiline(compile_result.stdout, level=logging.DEBUG)\n\n if compile_result.returncode != 0:\n logging.warning(\"Compilation failed for harness %s\", harness)\n continue\n\n else:\n c11_success_count += 1\n compile_success_count += 1\n\n test_result = execute([exe_target])\n test_stdout_file = output_dir + os.sep + \"stdout.txt\"\n test_stderr_file = output_dir + os.sep + \"stderr.txt\"\n if test_result.stdout:\n with open(test_stdout_file, \"w+\") as output:\n output.write(test_result.stdout)\n logging.info(\"Wrote stdout of test execution to %s\", test_stdout_file)\n if test_result.stderr:\n with open(test_stderr_file, \"w+\") as error_output:\n error_output.write(test_result.stderr)\n logging.info(\"Wrote stderr of test execution to %s\", test_stderr_file)\n\n result, new_violated_property = analyze_result(\n test_result, harness, specification\n )\n if result == RESULT_ACCEPT:\n successful_harness = harness\n final_result = RESULT_ACCEPT\n if not violated_property: # Use first violated property\n violated_property = new_violated_property\n break\n elif result == RESULT_REJECT:\n reject_count += 1\n if not final_result:\n # Only set final result to 'reject' if no harness produces any error\n final_result = RESULT_REJECT\n else:\n final_result = RESULT_UNK\n\n if compile_success_count == 0:\n raise ValidationError(\"Compilation failed for every harness/file pair.\")\n\n statistics.append((\"Harnesses tested\", iter_count))\n statistics.append((\"C11 compatible\", c11_success_count))\n statistics.append((\"Harnesses rejected\", reject_count))\n\n return ValidationResult(\n verdict=final_result,\n violated_property=violated_property,\n successful_harness=successful_harness,\n )\n\n\nstatistics = []\n\n\ndef run(argv=None):\n if argv is None:\n argv = sys.argv[1:]\n args = _parse_args(argv)\n output_dir = args.output_path\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n\n specification = get_spec(args.specification_file)\n\n with tempfile.TemporaryDirectory(suffix=\"cpa_witness2test_\") as harness_output_dir:\n try:\n harness_gen_cmd = create_harness_gen_cmd(args, harness_output_dir)\n harness_gen_result = execute(harness_gen_cmd)\n print(harness_gen_result.stderr)\n _log_multiline(harness_gen_result.stdout, level=logging.DEBUG)\n\n created_harnesses = find_harnesses(harness_output_dir)\n statistics.append((\"Harnesses produced\", len(created_harnesses)))\n\n if created_harnesses:\n with tempfile.NamedTemporaryFile(suffix=\".c\") as preprocessed_program:\n _preprocess(args.file, specification, preprocessed_program.name)\n result = _execute_harnesses(\n created_harnesses,\n preprocessed_program.name,\n specification,\n harness_output_dir,\n args,\n )\n else:\n result = ValidationResult(RESULT_UNK)\n finally:\n for i in os.listdir(harness_output_dir):\n source = os.path.join(harness_output_dir, i)\n target = os.path.join(output_dir, i)\n try:\n shutil.copytree(\n source,\n target,\n dirs_exist_ok=True,\n )\n except NotADirectoryError:\n shutil.move(source, target)\n\n if args.stats:\n print(os.linesep + \"Statistics:\")\n for prop, value in statistics:\n print(\"\\t\" + str(prop) + \": \" + str(value))\n print()\n\n if result.successful_harness:\n print(\"Harness %s was successful.\" % result.successful_harness)\n\n result_str = \"Verification result: %s\" % result.verdict\n if result.violated_property:\n result_str += (\n \". Property violation (%s) found by chosen configuration.\"\n % result.violated_property\n )\n print(result_str)\n\n\nlogging.basicConfig(format=\"%(levelname)s: %(message)s\", level=logging.INFO)\n\nif __name__ == \"__main__\":\n try:\n run()\n except ValidationError as e:\n logging.error(e.msg)\n print(\"Verification result: ERROR.\")\n sys.exit(1)\n","repo_name":"sosy-lab/cpachecker","sub_path":"scripts/cpa_witness2test.py","file_name":"cpa_witness2test.py","file_ext":"py","file_size_in_byte":21751,"program_lang":"python","lang":"en","doc_type":"code","stars":202,"dataset":"github-code","pt":"3"}
+{"seq_id":"4365114684","text":"from django.contrib import admin\nfrom django.urls import include, path, re_path\nfrom drf_yasg import openapi\nfrom drf_yasg.views import get_schema_view\nfrom rest_framework import permissions\n\nfrom conf import DEBUG\n\nif DEBUG:\n permission_classes = (permissions.AllowAny,)\nelse:\n permission_classes = (permissions.IsAdminUser,)\n\nopenapi_info = openapi.Info(\n title='{{ cookiecutter.project_name|capitalize }} API',\n default_version='v1',\n description='Server API for data store',\n)\nschema_view = get_schema_view(\n openapi_info,\n public=True,\n permission_classes=permission_classes,\n)\n\nurlpatterns = [\n path('api/', include('api.urls')),\n path('admin/', admin.site.urls),\n path('auth/', include('rest_framework.urls')),\n path('accounts/', include('django.contrib.auth.urls')),\n re_path(\n r'^api/swagger(?P\\.json|\\.yaml)$',\n schema_view.without_ui(cache_timeout=0),\n name='schema-json',\n ),\n re_path(\n r'^api/swagger/$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'\n ),\n]\n","repo_name":"ProjectTemplates/django-webpack-app","sub_path":"{{cookiecutter.project_name}}/core/server/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"21313293062","text":"#!/python3\nimport chess_lib\nimport chess\nimport time\n# import numpy as np\n\nstr_seq = \"\"\"e2e4\ne7e5\nd1h5\nb8c6\nf1c4\ng8f6\nh5f7\"\"\"\n\nseq = [x.strip() for x in str_seq.split(\"\\n\")]\n\ndef main():\n a = chess_lib.Board()\n a.start_game()\n i = 0\n while not a.game_has_ended():\n display_bard(a)\n moves = a.get_legal_moves(False)\n print(\"Legal Moves: \")\n for index, move in enumerate(moves):\n print(\"\\t\", index, move)\n\n if i >= len(seq):\n move_index = int(input(\"which move? \"))\n else:\n for index, move in enumerate(moves):\n if repr(move) == seq[i]:\n move_index = index\n break\n i += 1\n a.put_move(moves[move_index])\n\n print(\"winner: \", a.winner())\n \ndef display_bard(board):\n board_arr = board.get_board()\n print(\"+ - - - - - - - - +\")\n for i in range(len(board_arr)-1, -1, -1):\n print(\"| \",end=\"\")\n for j in range(len(board_arr[i])):\n id = board_arr[i][j]\n piece = board.get_piece(id)\n if piece == None:\n print(\" \", end=\"\")\n else:\n print(piece.get_char(0 if id < 0 else 1), end=\"\")\n print(\" \", end=\"\")\n print(\"|\")\n print(\"+ - - - - - - - - +\")\n\n\ndef volume_test():\n moves = 10000\n i = 0\n prev_time = time.time()\n while i < moves:\n a = chess_lib.Board()\n a.start_game()\n while not a.game_has_ended():\n if i >= moves:\n break\n\n move = a.get_legal_moves(False)[0]\n a.put_move(move)\n i += 1\n\n delta = time.time() - prev_time\n print(\"total time =\", delta)\n print(\"speed:\", moves / delta)\n\ndef chess_test():\n moves = 10000\n i = 0\n prev_time = time.time()\n while i < moves:\n a = chess.Board()\n while not a.is_game_over():\n if i >= moves:\n break\n\n move = list(a.legal_moves)[0]\n a.push(move)\n i += 1\n\n delta = time.time() - prev_time\n print(\"total time =\", delta)\n print(\"speed:\", moves / delta)\n\n\nif __name__ == \"__main__\":\n # volume_test()\n chess_test()\n # main()\n\n\n","repo_name":"danielkopp4/ChessBot_old","sub_path":"ChessLibary/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"32761819991","text":"# Задача №5\nq = 0\ntime = 0\n\nspeed = [] # скорости\nbridge_length = int(input(\"Введите длину моста: \"))\nnumber_people = int(input(\"Введите количество человек, переходящих мост: \"))\n\nfor i in range(number_people):\n s = float(input(\"Скорость \" + str(q) + \"-го \" + \"=\"));\n q += 1\n speed.append(s)\nfaster = max(speed)\n\n\ndef bridge(n, faster, time):\n if not speed:\n return\n if n == 2:\n fast = max(speed)\n time += bridge_length / fast\n\n speed.remove(fast)\n fast = max(speed)\n\n print(speed)\n time += bridge_length / fast\n print(\"Общее время: \" + str(time))\n else:\n time += bridge_length / faster\n speed.remove(max(speed))\n\n fast = max(speed)\n\n time += bridge_length / fast\n\n n -= 1\n bridge(n, faster, time)\n\n\nbridge(number_people, faster, time)\n","repo_name":"VenikDev/python_graph_and_recursion","sub_path":"bridge.py","file_name":"bridge.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"34999843596","text":"import os\n\nfrom django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nfrom django.contrib.staticfiles import views\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^users/', include('users.urls')),\n url(r'', include('quizzes.urls')),\n]\n\nif settings.DEBUG:\n import debug_toolbar\n urlpatterns += [\n url(r'^static/(?P.*)$', views.serve, {\n 'document_root': os.path.join(\n settings.BASE_DIR, 'core/static'\n )\n }),\n url(r'^__debug__/', include(debug_toolbar.urls))\n ]\n","repo_name":"srgypetrov/test.testing_service","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"34104719101","text":"#Frog River One\n# Find the Earliest time when a frog can jump to the other side of a river\n# Easy\n\n'''\nYou are given an array A consisting of N integers representing the falling leaves.\nA[K] represents the position where one leaf falls at time K, measured in seconds.\n\nThe goal is to find the earliest time when the frog can jump to the other side of the river. \nThe frog can cross only when leaves appear at every position across the river from 1 to X \n(that is, we want to find the earliest moment when all the positions from 1 to X are covered by leaves).\n\nWrite an efficient algorithm for the following assumptions:\n\nN and X are integers within the range [1..100,000];\neach element of array A is an integer within the range [1..X].\n'''\n\ndef solution(X, A):\n times = [-1] * X\n \n for i in range(len(A)):\n if times[A[i] - 1] == -1:\n times[A[i] - 1] = i\n X = X - 1\n else:\n continue\n \n if X == 0:\n return i\n \n return -1\n","repo_name":"wookiekim/CodingPractice","sub_path":"codility/frogriverone.py","file_name":"frogriverone.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"42146568180","text":"from setuptools import setup\n\npackage_name = 'voice'\n\nsetup(\n name=package_name,\n version='0.0.0',\n packages=[package_name],\n data_files=[\n ('share/ament_index/resource_index/packages',\n ['resource/' + package_name]),\n ('share/' + package_name, ['package.xml']),\n ],\n install_requires=['setuptools'],\n zip_safe=True,\n maintainer='r1',\n maintainer_email='r1@todo.todo',\n description='TODO: Package description',\n license='TODO: License declaration',\n tests_require=['pytest'],\n entry_points={\n 'console_scripts': [\n 'voice_sub = voice.detection_subscriber:main',\n # 'main = voice.continuous_listener:main',\n ],\n },\n)\n \n","repo_name":"Shivansh2703/r-1","sub_path":"src/voice/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"42794387505","text":"\"\"\"empty message\n\nRevision ID: af553fa94b1f\nRevises: \nCreate Date: 2018-06-12 13:41:49.968147\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'af553fa94b1f'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('organizations',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('organization_name', sa.String(length=10), nullable=False),\n sa.Column('organization_code', sa.String(length=10), nullable=True),\n sa.Column('create_time', sa.TIMESTAMP(), nullable=True),\n sa.Column('update_time', sa.TIMESTAMP(), nullable=True),\n sa.PrimaryKeyConstraint('id', name=op.f('pk_organizations')),\n sa.UniqueConstraint('organization_code', name=op.f('uq_organizations_organization_code')),\n sa.UniqueConstraint('organization_name', name=op.f('uq_organizations_organization_name'))\n )\n op.create_table('roles',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('role_name', sa.String(length=50), nullable=True),\n sa.Column('role_permissions', sa.Integer(), nullable=True),\n sa.Column('create_time', sa.TIMESTAMP(), nullable=True),\n sa.Column('update_time', sa.TIMESTAMP(), nullable=True),\n sa.PrimaryKeyConstraint('id', name=op.f('pk_roles')),\n sa.UniqueConstraint('role_name', name=op.f('uq_roles_role_name'))\n )\n op.create_table('users',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_name', sa.String(length=50), nullable=False),\n sa.Column('password_md5', sa.String(length=50), nullable=False),\n sa.Column('real_name', sa.String(length=50), nullable=False),\n sa.Column('email', sa.String(length=50), nullable=True),\n sa.Column('organization_id', sa.Integer(), nullable=True),\n sa.Column('admin_flag', sa.Boolean(), nullable=True),\n sa.Column('create_time', sa.TIMESTAMP(), nullable=True),\n sa.Column('update_time', sa.TIMESTAMP(), nullable=True),\n sa.ForeignKeyConstraint(['organization_id'], ['organizations.id'], name=op.f('fk_users_organization_id_organizations')),\n sa.PrimaryKeyConstraint('id', name=op.f('pk_users')),\n sa.UniqueConstraint('user_name', name=op.f('uq_users_user_name'))\n )\n op.create_table('users_roles',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('role_id', sa.Integer(), nullable=True),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['role_id'], ['roles.id'], name=op.f('fk_users_roles_role_id_roles')),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], name=op.f('fk_users_roles_user_id_users')),\n sa.PrimaryKeyConstraint('id', name=op.f('pk_users_roles'))\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('users_roles')\n op.drop_table('users')\n op.drop_table('roles')\n op.drop_table('organizations')\n # ### end Alembic commands ###\n","repo_name":"shenbing/QCM","sub_path":"migrations/versions/af553fa94b1f_.py","file_name":"af553fa94b1f_.py","file_ext":"py","file_size_in_byte":3025,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"677151651","text":"import json\nfrom itertools import islice\n\n# 461. Hamming Distance - Easy\n# The Hamming distance between two integers is the number of positions\n# at which the corresponding bits are different.\n# Given two integers x and y, return the Hamming distance between them.\n#\n# https://leetcode.com/problems/hamming-distance/\n\nclass Solution:\n def hammingDistance(self, x: int, y: int) -> int:\n x = x ^ y # total XOR\n res = 0\n while x: # go through all bits and count 1\n if x & 1:\n res += 1 \n x = x >> 1\n\n return res\n \n\nif __name__ == '__main__': \n with open('OUTPUT/IN', 'r') as f_in, open('OUTPUT/OUT', \"w\") as f_out:\n while True:\n n_args = 2\n args_raw = [x.rstrip() for x in islice(f_in, n_args)]\n if not args_raw:\n break\n\n exec = Solution()\n res = exec.hammingDistance(int(args_raw[0]), int(args_raw[1])) \n\n f_out.write(json.dumps(res) + '\\n')\n","repo_name":"yuvSid/interviewPrepare","sub_path":"python/hamming_distance.py","file_name":"hamming_distance.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"30643267450","text":"from Configuration import Configuration\r\nfrom predictors import Predictor\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Dropout, Activation\r\nfrom keras.layers.recurrent import LSTM\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom Service.Utilities import Tools\r\nimport numpy\r\nimport pandas\r\nimport traceback\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nimport test\r\n#https://www.datacamp.com/community/tutorials/deep-learning-python#predict\r\n# pip install tensorflow keras\r\n\r\nclass dlPredictor(Predictor.Predictor):\r\n \r\n def __init__(self, dataManager,hist=-1,context=None):\r\n super().__init__(\"dl\",dataManager,hist)\r\n self.model = None\r\n self.features = 7\r\n \r\n self.scaler = MinMaxScaler(feature_range=(-1, 1))\r\n \r\n \r\n @staticmethod\r\n def timeseries_to_supervised(data, lag=1):\r\n df = pandas.DataFrame(data)\r\n columns = [df.shift(i) for i in range(1, lag+1)]\r\n columns.append(df)\r\n df = pandas.concat(columns, axis=1)\r\n df.fillna(0, inplace=True)\r\n return df\r\n \r\n @staticmethod\r\n def difference(dataset, interval=1):\r\n diff = list()\r\n for i in range(interval, len(dataset)):\r\n value = dataset[i] - dataset[i - interval]\r\n diff.append(value)\r\n return pandas.Series(diff)\r\n \r\n @staticmethod\r\n def inverse_difference(history, yhat, interval=1):\r\n return yhat + history[-interval]\r\n \r\n def scale(self,train, test):\r\n \r\n self.scaler = self.scaler.fit(train)\r\n \r\n train = train.reshape(train.shape[0], train.shape[1])\r\n train_scaled = self.scaler.transform(train)\r\n \r\n test = test.reshape(test.shape[0], test.shape[1])\r\n test_scaled = self.scaler.transform(test)\r\n return train_scaled, test_scaled\r\n \r\n def invert_scale(self,X, value):\r\n new_row = [x for x in X] + [value]\r\n array = numpy.array(new_row)\r\n array = array.reshape(1, len(array))\r\n inverted = self.scaler.inverse_transform(array)\r\n return inverted[0, -1]\r\n \r\n def fit_lstm(self,train, batch_size, nb_epoch, neurons):\r\n X, y = train[:, 0:-1], train[:, -1]\r\n X = X.reshape(X.shape[0], 1, X.shape[1])\r\n if self.model is None:\r\n \r\n self.model = Sequential()\r\n self.model.add(LSTM(neurons, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True))\r\n self.model.add(Dense(1))\r\n self.model.compile(loss='mean_squared_error', optimizer='adam')\r\n \r\n for i in range(nb_epoch):\r\n self.model.fit(X, y, epochs=1, batch_size=batch_size, verbose=0, shuffle=False)\r\n self.model.reset_states()\r\n \r\n def forecast_lstm(self,batch_size, X):\r\n X = X.reshape(1, 1, len(X))\r\n yhat = self.model.predict(X, batch_size=batch_size)\r\n return yhat[0,0]\r\n \r\n def runAll(self):\r\n self.preprocess()\r\n \r\n def predict(self,sticker,timestamp,context):\r\n highhist = context[0]\r\n lowhist = context[1]\r\n closehist = context[2]\r\n volumehist = context[3]\r\n openhist = context[4]\r\n \r\n raw_values = closehist\r\n diff_values = dlPredictor.difference(raw_values, 1)\r\n supervised = dlPredictor.timeseries_to_supervised(diff_values, 1)\r\n supervised_values = supervised.values\r\n train, test = supervised_values[0:-1], supervised_values[-1:]\r\n train_scaled, test_scaled = self.scale(train, test)\r\n self.fit_lstm(train_scaled, 1, 50, 5)\r\n \r\n train_reshaped = train_scaled[:, 0].reshape(len(train_scaled), 1, 1)\r\n self.model.predict(train_reshaped, batch_size=1)\r\n \r\n X, y = test_scaled[0, 0:-1], test_scaled[0, -1]\r\n yhat = self.forecast_lstm(1, X)\r\n \r\n yhat = self.invert_scale(X, yhat)\r\n \r\n yhat = self.inverse_difference(raw_values, yhat, len(test_scaled)+1)\r\n \r\n prediction = int(numpy.sign(yhat))\r\n \r\n confidence=1.0\r\n skip = False # if True, then not confident\r\n \r\n del X\r\n del y\r\n del diff_values\r\n del train\r\n del test\r\n del train_scaled\r\n del test_scaled\r\n \r\n del highhist\r\n del lowhist\r\n del closehist\r\n del volumehist\r\n del openhist\r\n \r\n \r\n return (prediction,confidence,skip)","repo_name":"binun/predict","sub_path":"predictors/dlPredictor.py","file_name":"dlPredictor.py","file_ext":"py","file_size_in_byte":4496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"71618006482","text":"#-------------------------------------------------------------------------------\n# 先頭が # で始まっている行はコメントである.\n\n# 当面,次の行は必ず書く.\nimport flask\nfrom flask import Flask, redirect, url_for, request, render_template, \\\n flash, abort, make_response\napp = Flask(__name__)\n\n# 次のように書くことによって,ブラウザからの要求を処理することができる:\n#\n# @app.route('相対URL')\n# def 名前():\n# .... 関数本体 ....\n#\n# 関数本体にはさまざまなことを書くことができる.少しずつ紹介する.\n\n# 下の関数は,http://localhost:8088/hello というアクセスを処理する.\n@app.route('/hello')\ndef func_hello():\n return 'こんにちは.'\n\n# @route('/hello')\n# 「http://localhost:8088」を除いた部分 (/hello) が相対URLとして\n# 指定されている.\n#\n# def func_hello():\n# def の後ろに書く名前は,何でも良い.ただし,\n# - 半角英字で始まる半角英数字\n# - 1つのファイル中で複数回同じ名前を使ってはいけない.\n# 最後のコロンを忘れやすいので注意する.\n#\n# 関数本体は,半角スペース4つを行頭に置く.\n# もっとも簡単なものは,return '文字列'\n# return 'こんにちは.'\n#\n\n# 下の関数は,http://localhost:8088/bye というアクセスを処理する.\n@app.route('/bye')\ndef func_bye():\n return 'さようなら.'\n\n# 下の関数は,http://localhost:8088/Tsurumi/University/LAIS\n# というアクセスを処理する.\n@app.route('/Tsurumi/University/LAIS')\ndef func_tu():\n return '鶴見大学ドキュメンテーション学科'\n\n# 下のように,HTML文書を返すこともできる.\n# ただし,普通はこのような書き方はせず,templateというものを使う.後述.\n# ここでは,HTML文書も返せるということを示すために書いている.\n@app.route('/rich_hello')\ndef func_rich_hello():\n html_page = '''\n\n \n \n \n \n
ようこそ
\n \n
こんにちは.どうぞよろしく
\n \n \n\n'''\n return flask.render_template_string(html_page)\n\n# ファイル末尾には,当面,必ず下の行を書く.\napp.run(host='localhost', port=8088, debug=True)\n","repo_name":"dbe2-2023/dbe2-2023.github.io","sub_path":"content/docs/flask/020basic/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"29239786857","text":"import unittest\nfrom mock import patch, Mock\n\nfrom maps.infra.monitoring.sla_calculator.core.graphite import graphite_statuses\n\n\nclass GraphiteStatusesTest(unittest.TestCase):\n @patch('requests.get')\n def test(self, get_method):\n SAMPLING_RATE = 15\n response = Mock()\n response.json.return_value = [{\n \"target\": \"test_target\",\n \"datapoints\": [\n # Spend 2 + 1 intervals under the treshold\n [\n # value\n 1.0,\n # timestamp\n 10 * SAMPLING_RATE\n ],\n [\n # This is still not more than 2. So this is counted as bad request.\n 2.0,\n (10 + 2) * SAMPLING_RATE\n ],\n # Following two intervals are good\n [\n 3.0,\n (10 + 2 + 1) * SAMPLING_RATE\n ],\n [\n 4.0,\n (10 + 2 + 1 + 1) * SAMPLING_RATE\n ]]}]\n get_method.return_value = response\n\n statuses = graphite_statuses('test_target', '2017-01-01', more_than=2)\n statuses.set_index('status', inplace=True)\n\n self.assertAlmostEqual(statuses['amount'][200], 2)\n self.assertAlmostEqual(statuses['amount'][500], 3)\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"maps/tests/graphite_test.py","file_name":"graphite_test.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"22359877824","text":"from ontology.builtins import concat\nfrom ontology.interop.Ontology.Runtime import Base58ToAddress\nfrom ontology.interop.System.App import RegisterAppCall, DynamicAppCall\nfrom ontology.interop.System.ExecutionEngine import GetExecutingScriptHash\nfrom ontology.interop.System.Runtime import CheckWitness, Notify\nfrom ontology.interop.System.Storage import GetContext, Put, Get\nfrom ontology.libont import bytearray_reverse\n\nCURRENT_PRICE = 'CurrentPrice'\n\nOWNER = Base58ToAddress('AbG3ZgFrMK6fqwXWR1WkQ1d1EYVunCwknu')\n\nChainlinkCall = RegisterAppCall('ed6bb0abbe24e5603a7f2a5c44e056f3eaeb5949', 'operation', 'args')\nChainlinkClientCall = RegisterAppCall('fb11d3b30a54ae147e86f57d9e554578f68a0041', 'operation', 'args')\nLink = RegisterAppCall('bfb52e4b8a5b49099e1ac0ef55789053f2ea347d', 'operation', 'args')\nOracleCall = RegisterAppCall('04dc7f8a0ff88de0784ef742650a1d79495565ae', 'operation', 'args')\nCBORCall = RegisterAppCall('3f75e2814021abed8a616da8d408d1347cac988f', 'operation', 'args')\n\nContractAddress = GetExecutingScriptHash()\n\n\ndef Main(operation, args):\n if operation == 'requestEthereumPrice':\n assert (len(args) == 3)\n oracle = args[0]\n jobId = args[1]\n payment = args[2]\n return requestEthereumPrice(oracle, jobId, payment)\n\n return False\n\n\ndef requestEthereumPrice(oracle, jobId, payment):\n # assert (CheckWitness(OWNER))\n req = ChainlinkClientCall('buildChainlinkRequest', [jobId, ContractAddress, 'fullfill'])\n req = ChainlinkCall('add', [req, \"url\", \"https://etherprice.com/api\"])\n req = ChainlinkCall('addStringArray', [req, \"path\", [\"recent\", \"usd\"]])\n # Notify([OWNER, oracle, req, payment])\n assert (ChainlinkClientCall('sendChainlinkRequestTo', [OWNER, oracle, req, payment]))\n return [OWNER, oracle, req, payment]\n\n\ndef addStringArray(request, key, values):\n request = CBORCall('encodeString', [request, key])\n request = CBORCall('startArray', request)\n for value in range(values):\n request = CBORCall('encodeString', [request, value])\n request = CBORCall('endSequence', request)\n return request\n\n\ndef DynamicCallFunction(callAddress, callbackFunctionId, params):\n res = DynamicAppCall(callAddress, callbackFunctionId, params)\n if res and res == b'\\x01':\n return True\n else:\n return False\n\n\ndef DynamicCallFunctionResult(callAddress, callbackFunctionId, params):\n return DynamicAppCall(callAddress, callbackFunctionId, params)","repo_name":"ontio/oracle-chainlink-smartcontract","sub_path":"ont-contracts/demo/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"25014272985","text":"\"\"\"\nModule containing numba compiled filtering functions for usage in symbolic\ndynamics implementations.\n\nAuthors:\\n\n- Philipp Schuette\\n\n\"\"\"\n\nfrom typing import Tuple, no_type_check\n\nimport numba as nb # type: ignore\nimport numpy as np\nfrom numpy.typing import NDArray\n\nfrom pyzeta.core.pyzeta_types.general import tBoolMat, tMat, tWordVec\nfrom pyzeta.core.pyzeta_types.special import tLetter, tMask, tWord\n\n##############################################\n# Numba Versions of Symbolic Dynamic Methods #\n##############################################\n\n\n@nb.jit(\n nb.uint32[:, :](nb.uint32[:, :], nb.bool_[:, :]),\n nopython=True,\n fastmath=True,\n cache=True,\n)\n@no_type_check\ndef matMul(left: tMat, right: tBoolMat) -> tMat:\n \"\"\"\n Multiply two matrices `left` and `right` fast. Performs no checks.\n\n :param left: First matrix to multiply\n :param right: Second matrix to multiply\n :return: Matrix product\n \"\"\"\n dim: np.uint8 = left.shape[0]\n result: NDArray[np.uint32] = np.zeros((dim, dim), dtype=np.uint32)\n for i in range(dim):\n for j in range(dim):\n for k in range(dim):\n result[i][j] += left[i][k] * right[k][j]\n return result\n\n\n@nb.jit(\n nb.uint8[:, :](nb.uint8, nb.bool_[:, :]),\n nopython=True,\n fastmath=True,\n cache=True,\n)\n@no_type_check\ndef getWordsFast(n: int, adj: tBoolMat) -> tMat:\n \"\"\"\n Generate all words of length `n` for alphabet with adjacency matrix `adj`.\n\n :param n: Length of words to be generated\n :param adj: Adjacency matrix over some alphabet\n :return: Array containing all words of length `n`\n \"\"\"\n alphabetSize: np.uint8 = adj.shape[0]\n adjPower: NDArray[np.uint32] = np.eye(alphabetSize, dtype=np.uint32)\n for i in range(1, n):\n adjPower = matMul(adjPower, adj)\n wordNum: nb.uint8 = np.sum(adjPower)\n adjNonNull = np.nonzero(adj)\n # array for storing generated words; initialise with all entries set to -1\n result: tWordVec = np.full((wordNum, n), -1, dtype=tLetter)\n tmp: tWordVec\n\n # initialize words of length 1:\n for i in range(alphabetSize):\n result[i][0] = i\n adjPower = np.eye(alphabetSize, dtype=np.uint32)\n\n for length in range(1, n):\n wordNum = np.sum(adjPower)\n adjPower = matMul(adjPower, adj)\n tmp = np.copy(result)\n pos: np.uint8 = 0\n for i in range(wordNum):\n word: tWord = tmp[i]\n adjRow: tWord = adjNonNull[1][adjNonNull[0] == word[length - 1]]\n for letter in adjRow:\n for k in range(length):\n result[pos][k] = word[k]\n result[pos][k + 1] = letter\n pos += 1\n\n return result\n\n\n@nb.jit(\n nb.uint8[:, :](nb.uint8, nb.bool_[:, :], nb.uint8[:, :]),\n nopython=True,\n fastmath=True,\n cache=True,\n)\n@no_type_check\ndef appendWordsFast(n: int, adj: tBoolMat, words: tWordVec) -> tWordVec:\n \"\"\"\n Append to given words according to a given adjacency matrix to obtain words\n of a given length\n\n :param n: Length of words to be generated\n :param adj: Adjacency matrix determining valid words\n :param words: Array containing (all) words of some length <`n`\n :return: Array containing all words of length `n`\n \"\"\"\n alphabetSize: np.uint8 = adj.shape[0]\n givenWordNum: np.uint8 = words.shape[0]\n givenWordLen: np.uint8 = words.shape[1]\n adjPower: NDArray[np.uint32] = np.eye(alphabetSize, dtype=np.uint32)\n for i in range(1, n):\n adjPower = matMul(adjPower, adj)\n wordNum: nb.uint8 = np.sum(adjPower)\n adjNonNull = np.nonzero(adj)\n\n # array for storing generated words; initialise with all entries set to -1\n result: tWordVec = np.full((wordNum, n), -1, dtype=tLetter)\n tmp: tWordVec\n\n result[:givenWordNum, :givenWordLen] = words\n adjPower = np.eye(alphabetSize, dtype=np.uint32)\n for length in range(1, givenWordLen):\n adjPower = matMul(adjPower, adj)\n\n for length in range(givenWordLen, n):\n wordNum = np.sum(adjPower)\n adjPower = matMul(adjPower, adj)\n tmp = np.copy(result)\n pos: nb.uint8 = 0\n for i in range(wordNum):\n word: tWord = tmp[i]\n adjRow: tWord = adjNonNull[1][adjNonNull[0] == word[length - 1]]\n for letter in adjRow:\n for k in range(length):\n result[pos][k] = word[k]\n result[pos][k + 1] = letter\n pos += 1\n\n return result\n\n\n@nb.guvectorize(\n [(nb.uint8[:], nb.bool_[:])], \"(n)->()\", nopython=True, cache=True\n)\n@no_type_check\ndef isPrimeFast(word: tWord, res: bool):\n \"\"\"\n Check if given `word` is prime. Accepts arrays of symbolic words.\n\n :param word: Word (or array of words) of letters from a given alphabet\n :return: `True` iff word is prime\n \"\"\"\n n: np.uint8 = len(word)\n kPerm: bool = True\n res[0] = True\n\n for k in range(1, n // 2 + 1):\n if n % k == 0:\n for m in range(1, n // k):\n if np.any(word[:k] != word[m * k : (m + 1) * k]):\n kPerm = False\n break\n if kPerm:\n res[0] = False\n break\n kPerm = True\n\n\n@nb.jit(\n nb.bool_(nb.uint8[:], nb.uint8[:, :]),\n fastmath=True,\n nopython=True,\n cache=True,\n)\n@no_type_check\ndef containsPermFast(word: tWord, wordsToCheck: tWordVec) -> bool:\n \"\"\"\n Return `True` if `wordList` contains a permutation of `word`.\n\n :param word: Word over a given alphabet\n :param wordsToCheck: array of words over the same alphabet\n :return: `True` iff `wordsToCheck` contain permutations of `word`\n \"\"\"\n n: np.uint8 = len(word)\n for _ in range(n):\n word = np.roll(word, 1)\n for checkWord in wordsToCheck:\n if np.all(word == checkWord):\n return True\n return False\n\n\n@nb.jit(nb.bool_[:](nb.uint8[:, :]), fastmath=True, nopython=True, cache=True)\n@no_type_check\ndef filterPermsFast(words: tWordVec) -> tMask:\n \"\"\"\n Filter out all permutations from a list of `words` after the first\n occurrence.\n\n :param words: array of words over a given alphabet\n :return: Mask that implements this filter on the list\n \"\"\"\n size: np.int8 = len(words)\n mask: tMask = np.zeros(size, dtype=np.bool_)\n mask[0] = True\n for i in range(1, size):\n mask[i] = not containsPermFast(words[i], words[:i])\n return mask\n\n\n@nb.guvectorize(\n [(nb.uint8[:], nb.bool_[:, :], nb.bool_[:])],\n \"(n),(m,m)->()\",\n nopython=True,\n cache=True,\n)\n@no_type_check\ndef isCyclRedFast(word: tWord, adj: tBoolMat, res: Tuple[bool]) -> None:\n \"\"\"\n Return `True` if last to first letter of `word` defines a valid transtion\n (i.e. word is cyclically reduced). Accepts arrays of symbolic words.\n\n :param word: Word (or array of words) over a given alphabet\n :return: `True` if word is cyclically reduced, `False` otherwise\n \"\"\"\n n: np.uint8 = len(word)\n if adj[word[n - 1]][word[0]] == 0:\n res[0] = False\n else:\n res[0] = True\n\n\n@nb.guvectorize(\n [(nb.uint8[:], nb.bool_[:])], \"(n)->()\", nopython=True, cache=True\n)\n@no_type_check\ndef isPeriodicFast(word: tWord, res: Tuple[bool]) -> None:\n \"\"\"\n Check if given `word` is periodic. Accepts arrays of symbolic words.\n\n :param word: Word (or array of words) over a given alphabet\n :return: `True` if word is periodic, `False` otherwise\n \"\"\"\n n: np.uint8 = len(word)\n if word[0] == word[n - 1]:\n res[0] = True\n else:\n res[0] = False\n","repo_name":"Spectral-Analysis-UPB/PyZeta","sub_path":"pyzeta/core/dynamics/symbolic_dynamics/helpers/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":7584,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"42570816150","text":"#!/usr/bin/python\n# coding=utf-8\n# class\n\n\nclass MyClass:\n \"\"\"a simple example class\"\"\"\n # 类变量,实例所共享的变量\n myFavoriteFruits = []\n\n # 构造函数\n def __init__(self, name):\n self.name = name # 实例变量\n\n def addMyFavoriteFruits(self, fruit):\n self.myFavoriteFruits.append(fruit)\n\n# define class end\n\n# 实例化MyClass\ninstance1 = MyClass(\"zhaoyingnan\")\ninstance2 = MyClass(\"mengdi\")\nprint(instance1.name) # zhaoyingnan\nprint(instance2.name) # mengdi\n\n# 可见类变量是实例所共享的\ninstance1.addMyFavoriteFruits(\"apple\")\nprint(instance1.myFavoriteFruits) # ['apple']\ninstance2.addMyFavoriteFruits(\"banana\")\nprint(instance1.myFavoriteFruits) # ['apple', 'banana']\n","repo_name":"ljj038/wuye.python","sub_path":"class/test01.py","file_name":"test01.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"13041892763","text":"import numpy\nfrom numpy import testing as nptest\nfrom scipy.signal import gausspulse\n\nfrom .. import qtransform\nfrom ...table import EventTable\nfrom ...segments import Segment\nfrom ...timeseries import TimeSeries\n\n__author__ = 'Alex Urban '\n\n\n# -- global variables ---------------------------------------------------------\n\n# create noise and a glitch template at 1000 Hz\nNOISE = TimeSeries(\n numpy.random.normal(size=4096 * 10), sample_rate=4096, epoch=-5)\nGLITCH = TimeSeries(\n gausspulse(NOISE.times.value, fc=500)*10, sample_rate=4096)\nDATA = NOISE + GLITCH\n\n# global test objects\nSEARCH = Segment(-0.25, 0.25)\nQGRAM, FAR = qtransform.q_scan(DATA, search=SEARCH)\nQSPECGRAM = QGRAM.interpolate()\n\n\n# -- test utilities -----------------------------------------------------------\n\ndef test_far():\n # test that FAR is better than 1 / Hubble time\n assert FAR < 1 / (1.37e10 * 365 * 86400)\n\n\ndef test_monotonicity():\n # test that Q-plane frequencies are strictly increasing\n freq = QGRAM.plane.frequencies\n assert (freq[1:] > freq[:-1]).all()\n\n\ndef test_q_scan():\n # scan with the TimeSeries method\n ts_qspecgram = DATA.q_transform(whiten=False)\n\n # test spectrogram output\n assert ts_qspecgram.q == QSPECGRAM.q\n assert ts_qspecgram.shape == QSPECGRAM.shape\n assert ts_qspecgram.dtype == numpy.dtype('float32')\n nptest.assert_allclose(ts_qspecgram.value, QSPECGRAM.value)\n\n\ndef test_unnormalised_q_scan():\n # scan with norm=False\n ts_qspecgram = DATA.q_transform(whiten=False, norm=False)\n\n # test spectrogram output\n assert ts_qspecgram.q == QSPECGRAM.q\n assert ts_qspecgram.dtype == numpy.dtype('float64')\n\n\ndef test_q_scan_fd():\n # create test object from frequency-domain input\n fdata = DATA.fft()\n fs_qgram, far = qtransform.q_scan(\n fdata, duration=abs(DATA.span), sampling=DATA.sample_rate.value,\n search=SEARCH, epoch=fdata.epoch.value)\n fs_qspecgram = fs_qgram.interpolate()\n\n # test that the output is the same\n assert far == FAR\n assert fs_qspecgram.q == QSPECGRAM.q\n assert fs_qspecgram.dtype == numpy.dtype('float32')\n assert fs_qspecgram.shape == QSPECGRAM.shape\n nptest.assert_allclose(fs_qspecgram.value, QSPECGRAM.value, rtol=3e-2)\n\n\ndef test_qtable():\n # test EventTable output\n qtable = QGRAM.table()\n imax = qtable['energy'].argmax()\n assert isinstance(qtable, EventTable)\n assert qtable.meta['q'] == QGRAM.plane.q\n nptest.assert_almost_equal(qtable['time'][imax], QGRAM.peak['time'])\n nptest.assert_almost_equal(qtable['duration'][imax], 1/1638.4)\n nptest.assert_almost_equal(qtable['frequency'][imax],\n QGRAM.peak['frequency'])\n nptest.assert_almost_equal(\n qtable['bandwidth'][imax],\n 2 * numpy.pi ** (1/2.) * qtable['frequency'][imax] / QGRAM.plane.q)\n nptest.assert_almost_equal(qtable['energy'][imax], QGRAM.peak['energy'])\n\n # it's enough to check consistency between the shape of time and\n # frequency columns, because of the way they're calculated\n assert qtable['time'].shape == qtable['frequency'].shape\n\n # test that too high an SNR threshold returns an empty table\n assert len(QGRAM.table(snrthresh=1e9)) == 0\n","repo_name":"gwpy/gwpy","sub_path":"gwpy/signal/tests/test_qtransform.py","file_name":"test_qtransform.py","file_ext":"py","file_size_in_byte":3257,"program_lang":"python","lang":"en","doc_type":"code","stars":358,"dataset":"github-code","pt":"3"}
+{"seq_id":"71050961363","text":"# Rebar Table CNS 560\nrebar_table = {\"D10\": {\"area\": 0.7133, \"db\": 0.953, \"weight\": 0.560, \"l\": 3.00},\n \"D13\": {\"area\": 1.2670, \"db\": 1.270, \"weight\": 0.994, \"l\": 4.00},\n \"D16\": {\"area\": 1.9860, \"db\": 1.590, \"weight\": 1.560, \"l\": 5.00},\n \"D19\": {\"area\": 2.8650, \"db\": 1.910, \"weight\": 2.250, \"l\": 6.00},\n \"D22\": {\"area\": 3.8710, \"db\": 2.220, \"weight\": 3.040, \"l\": 7.00},\n \"D25\": {\"area\": 5.0670, \"db\": 2.540, \"weight\": 3.980, \"l\": 8.00},\n \"D29\": {\"area\": 6.4690, \"db\": 2.870, \"weight\": 5.080, \"l\": 9.00},\n \"D32\": {\"area\": 8.1430, \"db\": 3.220, \"weight\": 6.390, \"l\": 10.1},\n \"D36\": {\"area\": 10.070, \"db\": 3.580, \"weight\": 7.900, \"l\": 11.3},\n \"D43\": {\"area\": 14.520, \"db\": 4.300, \"weight\": 11.40, \"l\": 13.5}}\n\n\nclass Rebar:\n def __init__(self, size):\n self.__size = size\n\n @property\n def area(self) -> float:\n return rebar_table[self.__size][\"area\"]\n\n @property\n def weight(self) -> float:\n return rebar_table[self.__size][\"weight\"]\n\n\nclass TopBar(Rebar):\n def __init__(self, qty, size, dT):\n super().__init__(size)\n self.n = qty\n self.dT = dT\n self.areas = self.n * self.area\n\n\nclass BotBar(Rebar):\n def __init__(self, qty, size, dB):\n super().__init__(size)\n self.n = qty\n self.dB = dB\n self.areas = self.n * self.area\n\n\nclass Stirrup(Rebar):\n def __init__(self, n_leg, size, spacing):\n super().__init__(size)\n self.n_leg = n_leg\n self.spacing = spacing\n self.areas = self.n_leg * self.area\n\n\nif __name__ == \"__main__\":\n top_bar = [TopBar(3, \"D22\", 6.0),\n TopBar(2, \"D22\", 11.0)]\n print(top_bar[0].areas)\n","repo_name":"tonysum33/pyConcrete","sub_path":"rebar.py","file_name":"rebar.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"4989302443","text":"import matplotlib.pyplot as plt\nfrom read_file import select_original_breakpoints\nfrom collections import defaultdict\n\n\nif __name__ == '__main__':\n N = 5\n slopes, intervals = select_original_breakpoints(N, 'segm/segmented_curves_filtered.txt')\n\n slopes_hist = defaultdict(lambda: [])\n intervals_hist = defaultdict(lambda: [])\n\n for sample in slopes:\n for i in range(N):\n slopes_hist[i].append(sample[i])\n\n for sample in intervals:\n for i in range(N):\n intervals_hist[i].append(sample[i])\n\n fig, axs = plt.subplots(5, 2, figsize=(6.5, 10), sharex='col')\n for i in range(N):\n axs[i][0].hist(slopes_hist[i])\n axs[i][0].set_xlabel('$\\\\alpha_%d$' % (i + 1))\n axs[i][0].set_ylabel('Occurrences', fontsize=12)\n for i in range(N):\n axs[i][1].hist(intervals_hist[i])\n axs[i][1].set_xlabel('$l_%d$' % (i + 1), fontsize=12)\n\n plt.tight_layout()\n plt.savefig('histogram_by_var.pdf')\n","repo_name":"carolmb/viewing-profiles-of-scientific-articles","sub_path":"plots/histogram_by_vars.py","file_name":"histogram_by_vars.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"20458034459","text":"class Card:\n \"\"\" This class is for each individual card.\"\"\"\n\n def __init__(self, suit, value, buffer):\n self.suit = suit\n self._value = value\n self.points = self.getPoints(buffer)\n self.name = self.getName(buffer)\n\n def getName(self, buffer):\n if self._value < 10:\n return str(self._value)\n elif self._value == 2+buffer:\n return \"Queen\"\n elif self._value == 3+buffer:\n return \"Jack\"\n elif self._value == 4+buffer:\n return \"King\"\n elif self._value == 10+buffer:\n return str(7)\n elif self._value == 11+buffer:\n return \"Ace\"\n\n def getPoints(self, buffer):\n points = self._value-buffer\n if points < 0:\n return 0\n else:\n return points\n","repo_name":"nunes-pedro/sueca","sub_path":"src/Card.py","file_name":"Card.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"11577292671","text":"\"\"\"Handles performing evaluations on results.\"\"\"\n\nfrom typing import Dict\n\nimport lark as _lark\nfrom pavilion import utils\nfrom pavilion.parsers import (check_expression, get_expr_parser,\n EvaluationExprTransformer,\n VarRefVisitor, match_examples,\n BAD_EXAMPLES)\nfrom ..errors import ParserValueError, StringParserError, ResultError\nfrom .base import BASE_RESULTS\n\n\ndef check_evaluations(evaluations: Dict[str, str]):\n \"\"\"Check all evaluations for basic errors.\n\n :raises ResultError: For detected problems.\n \"\"\"\n\n for key, expr in evaluations.items():\n if key in BASE_RESULTS:\n raise ResultError(\n \"Key '{}' in result.evaluate section is reserved.\"\n .format(key))\n\n try:\n check_expression(expr)\n except StringParserError as err:\n raise ResultError(\n \"Error parsing evaluate expression for key '{}':\\n{}\\n{}\"\n .format(key, err.message, err.context)\n )\n\ndef evaluate_results(results: dict, evaluations: Dict[str, str],\n base_log: utils.IndentedLog = None):\n \"\"\"Perform result evaluations using an expression parser. The variables\n in such expressions are pulled from the results data structure, and the\n results are stored there too.\n :param results: The result dict. Will be modified in place.\n :param evaluations: A dictionary of evals to perform.\n :param base_log: The optional logger function from (result.get_result_logger)\n :return:\n \"\"\"\n\n base_log = base_log or utils.IndentedLog()\n base_log(\"Evaluating result evaluations.\")\n\n log = utils.IndentedLog()\n\n if 'result' not in results and 'result' not in evaluations:\n evaluations['result'] = 'return_value == 0'\n\n try:\n parse_evaluation_dict(evaluations, results, log)\n except StringParserError as err:\n raise ResultError(\"\\n\".join([err.message, err.context]))\n except ValueError as err:\n # There was a reference loop.\n raise ResultError(err.args[0])\n finally:\n base_log.indent(log)\n\n\ndef parse_evaluation_dict(eval_dict: Dict[str, str], results: dict,\n log: utils.IndentedLog) -> None:\n \"\"\"Parse the dictionary of evaluation expressions, given that some of them\n may contain references to each other. Each evaluated value will be stored\n under its corresponding key in the results dict.\n\n :raises StringParserError: When there's an error parsing or resolving\n one of the expressions. The error will already contain key information.\n :raises ValueError: When there's a reference loop.\n \"\"\"\n\n parser = get_expr_parser()\n transformer = EvaluationExprTransformer(results)\n var_ref_visitor = VarRefVisitor()\n\n unresolved = {}\n\n for key, expr in eval_dict.items():\n log(\"Parsing the evaluate expression '{}'\".format(expr))\n try:\n tree = parser.parse(expr)\n except (_lark.UnexpectedCharacters, _lark.UnexpectedToken) as err:\n # Try to figure out why the error happened based on examples.\n err_type = match_examples(err, parser.parse, BAD_EXAMPLES, expr)\n log(\"Error parsing expression, failing.\")\n log(err_type)\n log(err.get_context(expr))\n raise StringParserError(\n \"Error evaluating expression '{}' for key '{}':\\n{}\"\n .format(expr, key, err_type), err.get_context(expr))\n\n var_refs = var_ref_visitor.visit(tree)\n\n unresolved[key] = (tree, var_refs, expr)\n\n log(\"Resolving evaluations.\")\n\n while unresolved:\n resolved = []\n for key, (tree, var_refs, expr) in unresolved.items():\n for var in var_refs:\n if var in unresolved:\n break\n else:\n log(\"Resolving evaluation '{}': '{}'\".format(key, expr))\n try:\n results[key] = transformer.transform(tree)\n except ParserValueError as err:\n log(\"Error resolving evaluation: {}\".format(err.args[0]))\n log(err.get_context(expr))\n\n # Any value errors should be converted to this error type.\n raise StringParserError(err.args[0], err.get_context(expr))\n resolved.append(key)\n log(\"Value resolved to: '{}'\".format(results[key]))\n\n if not resolved:\n # Pass up the unresolved\n raise ValueError(\"Reference loops found amongst evaluation keys \"\n \"{}.\".format(tuple(unresolved.keys())))\n\n for key in resolved:\n del unresolved[key]\n\n log(\"Finished resolving expressions\")\n","repo_name":"hpc/pavilion2","sub_path":"lib/pavilion/result/evaluations.py","file_name":"evaluations.py","file_ext":"py","file_size_in_byte":4816,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"3"}
+{"seq_id":"34885445874","text":"import calendar\nimport webbrowser\nimport tempfile\nfrom datetime import date, timedelta\nfrom jinja2 import Template\nimport pandas as pd\n\nweekly_agenda_template = \"\"\"\n\n\n\n Daily Agenda for {{ from_date }} to {{ to_date }}\n\n\n\n
\n {% for key, value in tasks.iterrows() %}\n
\n
Date: {{ value['due'] }}
\n
{{ value['task'] }}
\n
\n {% endfor %}\n
\n\n {# a comment #}\n\n\n\"\"\"\n\n\ndef weekly_agenda(tasks, get_config=lambda x, d: d, today=None):\n weekdays = list(calendar.day_name)\n if today is None:\n today = date.today()\n\n df = pd.DataFrame([x.as_dict() for x in tasks.ls])\n print(df)\n df['month'] = pd.DatetimeIndex(df['due']).month\n df['year'] = pd.DatetimeIndex(df['due']).year\n df['day'] = pd.DatetimeIndex(df['due']).day\n df['dayofweek'] = pd.DatetimeIndex(df['due']).dayofweek\n # df['weekofyear'] = pd.DatetimeIndex(df['due']).isocalendar().week\n\n from_date = min(df[\"due\"])\n to_date = max(df[\"due\"])\n template = Template(weekly_agenda_template)\n x = template.render(tasks=df,\n from_date=from_date,\n to_date=to_date,\n today=today)\n #print(x)\n html_report_file = None\n with tempfile.NamedTemporaryFile(\"w\", suffix=\".html\", delete=False) as tf:\n tf.write(x)\n html_report_file = 'file://' + tf.name\n webbrowser.open_new(html_report_file)\n\n\nif __name__ == '__main__':\n test_tasks = []\n t = date.today()\n delta = timedelta(days=1)\n for i in range(3):\n td = t + (delta * i)\n test_tasks.append({ \"task\": \"blah\" + str(i), \"due\": td})\n test_tasks.append({ \"task\": \"bluh\" + str(i), \"due\": td})\n weekly_agenda(test_tasks)\n","repo_name":"abhishekmishra/idetodo","sub_path":"view_calendar.py","file_name":"view_calendar.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"71559460563","text":"from django.core.exceptions import ValidationError\n\nfrom accounts.models import SimpleUserProfile\n\n\ndef validate_user_is_service(user_profile: SimpleUserProfile):\n if not user_profile.is_service:\n raise ValidationError(\n \"This user profile is not a service\"\n )\n","repo_name":"IldarSaygafarov2/api_hisay","sub_path":"api/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"20044702512","text":"from pathlib import Path\n\nimport bisect\nfrom PIL import Image\nfrom tqdm import tqdm\nimport pandas as pd\nimport imageio\n\nimport torch\nimport torchvision.transforms as tfms\nfrom torch.utils.data import Dataset, ConcatDataset, Subset\n\nfrom utils import logger\n\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\nDATA_BASE = Path('./data').resolve()\n## doyun' dev computer\n#MGH_DATA_BASE = DATA_BASE.joinpath('cxr/mgh/covid19_v4').resolve()\n#MGH_DATA_BASE = DATA_BASE.joinpath('cxr/mgh/v4').resolve()\n#MGH_DATA_BASE = DATA_BASE.joinpath('cxr/mgh/v4_crop').resolve()\n#LOF_DATA_BASE = DATA_BASE.joinpath('cxr/mgh/outlier_cat329').resolve()\n#ATLAS_DATA_BASE = DATA_BASE.joinpath('cxr/mgh/v4_crop').resolve()\n## LMIC devbox computer\n#MGH_DATA_BASE = DATA_BASE.joinpath('CheXpert-v1.0/external_data').resolve()\n#MGH_DATA_BASE = DATA_BASE.joinpath('NIH/external_data_pa').resolve()\n#MGH_DATA_BASE = DATA_BASE.joinpath('MIMIC_v1/external_data_pa').resolve()\n#MGH_DATA_BASE = DATA_BASE.joinpath('covid19_v4').resolve()\n#MGH_DATA_BASE = DATA_BASE.joinpath('v4').resolve()\nMGH_DATA_BASE = DATA_BASE.joinpath('v4_crop').resolve()\nLOF_DATA_BASE = DATA_BASE.joinpath('outlier').resolve()\nATLAS_DATA_BASE = DATA_BASE.joinpath('v4_crop').resolve()\n\nlabel_name = ['Bone>Fracture>.', 'Bone>Non-fracture>.', 'Diaphragm>Diaphragm>.',\n 'Foreign body>.>.', 'Hilar/mediastinum>Aorta>.',\n 'Hilar/mediastinum>Cardiomegaly>.', 'Hilar/mediastinum>Hilar area>.',\n 'Hilar/mediastinum>Mediastinum>.',\n 'Lung density>Decreased density (Lucency)>Cavity/Cyst',\n 'Lung density>Decreased density (Lucency)>Emphysema',\n 'Lung density>Increased lung density>Atelectasis',\n 'Lung density>Increased lung density>Nodule/mass',\n 'Lung density>Increased lung density>Other interstitial opacity',\n 'Lung density>Increased lung density>Pulmonary edema',\n 'Lung density>Increased lung density>pneumonia',\n 'Lung volume>Decreased lung volume>.',\n 'Lung volume>Increased lung volume>.', 'Pleura>Other pleural lesions>.',\n 'Pleura>Pleural effusion>.', 'Pleura>Pneumothorax>.']\n\nfolder_name = ['b_f', 'b_nf', 'd_d', 'fb', 'hm_a',\n 'hm_c', 'hm_ha', 'hm_m', 'ld_dd_cc', 'ld_dd_e',\n 'ld_ild_a', 'ld_ild_nm', 'ld_ild_oio', 'ld_ild_pe', 'ld_ild_p',\n 'lv_dlv', 'lv_ilv', 'p_opl', 'p_pe', 'p_p']\n\nClean_Neg = False\nClean_Neg_list = [('Bone>Fracture>.','clean_negative3.2.1_Bone_Bone_Fracture_BLANK.csv'),\n ('Bone>Non-fracture>.','clean_negative3.2.1_Bone_Bone_Non-fracture_BLANK.csv'),\n ('Diaphragm>Diaphragm>.','clean_negative3.2.1_Below diaphragm _Diaphragm_Diaphragm_BLANK.csv'),\n ('Foreign body>.>.','clean_negative3.2.1_Whole CXR_Foreign body_BLANK_BLANK.csv'),\n ('Hilar/mediastinum>Aorta>.','clean_negative3.2.1_Hilar mediastinum_Hilar mediastinum_Aorta_BLANK.csv'),\n ('Hilar/mediastinum>Cardiomegaly>.','clean_negative3.2.1_Hilar mediastinum_Hilar mediastinum_Cardiomegaly_BLANK.csv'),\n ('Hilar/mediastinum>Hilar area>.','clean_negative3.2.1_Hilar mediastinum_Hilar mediastinum_Hilar area_BLANK.csv'),\n ('Hilar/mediastinum>Mediastinum>.','clean_negative3.2.1_Hilar mediastinum_Hilar mediastinum_Mediastinum_BLANK.csv'),\n ('Lung density>Decreased density (Lucency)>Cavity/Cyst','clean_negative3.2.1_Lung_Lung density_Decreased density (Lucency)_Cavity Cyst.csv'),\n ('Lung density>Decreased density (Lucency)>Emphysema','clean_negative3.2.1_Lung_Lung density_Decreased density (Lucency)_Emphysema.csv'),\n ('Lung density>Increased lung density>Atelectasis','clean_negative3.2.1_Lung_Lung density_Increased lung density_Atelectasis.csv'),\n ('Lung density>Increased lung density>Nodule/mass','clean_negative3.2.1_Lung_Lung density_Increased lung density_Nodule mass.csv'),\n ('Lung density>Increased lung density>Other interstitial opacity','clean_negative3.2.1_Lung_Lung density_Increased lung density_Other interstitial opacity.csv'),\n ('Lung density>Increased lung density>Pulmonary edema','clean_negative3.2.1_Lung_Lung density_Increased lung density_Pulmonary edema.csv'),\n ('Lung density>Increased lung density>pneumonia','clean_negative3.2.1_Lung_Lung density_Increased lung density_pneumonia.csv'),\n ('Lung volume>Decreased lung volume>.','clean_negative3.2.1_Lung_Lung volume_Decreased lung volume_BLANK.csv'),\n ('Lung volume>Increased lung volume>.','clean_negative3.2.1_Lung_Lung volume_Increased lung volume_BLANK.csv'),\n ('Pleura>Other pleural lesions>.','clean_negative3.2.1_Pleura_Pleura_Other pleural lesions_BLANK.csv'),\n ('Pleura>Pleural effusion>.','clean_negative3.2.1_Pleura_Pleura_Pleural effusion_BLANK.csv'),\n ('Pleura>Pneumothorax>.','clean_negative3.2.1_Pleura_Pleura_Pneumothorax_BLANK.csv')]\n\ndef _tb_load_manifest(file_path, num_labels=31, name_labels=None, name_paths=None, mode='single', ext_data=False, fl_balance=False, r_seed=-1):\n if not file_path.exists():\n logger.error(f\"manifest file {file_path} not found.\")\n raise RuntimeError\n\n logger.debug(f\"loading dataset manifest {file_path} ...\")\n df = pd.read_csv(str(file_path)).fillna(0)\n\n if (not ext_data) and (True): # using the clean-set\n # cleanset\n if True:\n ## MGH validation set\n df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)]\n if r_seed != -1:\n df = df.sample(n=1000, replace=True, random_state=r_seed)\n\n if (False):\n df = df.loc[(df['Hilar/mediastinum>Cardiomegaly>.']==1) \n | (df['Lung density>Increased lung density>Atelectasis'] == 1)\n | (df['Lung density>Increased lung density>Pulmonary edema'] == 1)\n | (df['Lung density>Increased lung density>pneumonia'] == 1)\n | (df['Pleura>Pleural effusion>.'] == 1)]\n df.reset_index(drop=True, inplace=True)\n\n ## MGH testset\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][0:250]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][250:500]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][500:750]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][750:]\n \n ## CheXpert trainset\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][0:250]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][250:500]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][500:750]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][750:1000]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][1000:1250]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][1250:1500]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][1500:1750]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][1750:2000]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][2000:2250]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][2250:2500]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][2500:2750]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][2750:3000]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][3000:3250]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][3250:3500]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][3500:3750]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][3750:4000]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][4000:4250]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][4250:4500]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][4500:]\n\n ## NIH trainset\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][0:500]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][500:1000]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][1000:1500]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][1500:2000]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][2000:2500]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][2500:3000]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][3000:3500]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][3500:4000]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][4000:]\n\n ## MIMIC trainset\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][0:500]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][500:1000]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][1000:1500]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][1500:2000]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][2000:2500]\n #df = df.loc[(df['bad_age'] == 0) & (df['bad_quality'] == 0)][2500:]\n\n if (Clean_Neg):\n #hilar area special care\n for cl_feature, cl_file in Clean_Neg_list:\n df_case = pd.read_csv(MGH_DATA_BASE.joinpath('clean_nagative_data_v5_deblank/'+cl_file), names=['ACC'])\n #df_case = pd.read_csv(MGH_DATA_BASE.joinpath('clean_nagative_data_v5/'+cl_file))\n #df_case = pd.read_csv(MGH_DATA_BASE.joinpath('clean_negative_data_v5_321/'+cl_file), names=['ACC'])\n df[f'{cl_feature}'] = df[f'{cl_feature}'].replace(0, -2)\n #df.loc[df.AccessionNumber.isin(df_case.ACC), f'{cl_feature}'] = 0\n df.loc[(df.AccessionNumber.isin(df_case.ACC))&(df[f'{cl_feature}']==-2), f'{cl_feature}'] = 0\n\n if (fl_balance):\n for k, feature in enumerate(label_name):\n num_p = df.loc[(df[f'{feature}'] == 1)].shape[0]\n num_n = df.loc[(df[f'{feature}'] == 0)].shape[0]\n ratio_pn = num_p / num_n\n ratio_th = 5\n if (ratio_pn < (1.0/ratio_th)):\n df[f'{feature}'] = df[f'{feature}'].replace(0, -1)\n df_n = df.loc[(df[f'{feature}'] == -1)].sample(n=(num_p*ratio_th), random_state=2020)\n df[f'{feature}'].loc[df['AccessionNumber'].isin(df_n['AccessionNumber'])] = 0\n\n pos = df[f'{feature}'].loc[df[f'{feature}']==1].shape[0]\n neg = df[f'{feature}'].loc[df[f'{feature}']==0].shape[0]\n dontcare = df[f'{feature}'].loc[df[f'{feature}']==-1].shape[0]\n\n logger.info(f'[{k:02d}-{feature}] pos: {pos}, neg: {neg}, dont-care: {dontcare}')\n\n if name_labels == None:\n df = df[~(df.iloc[:, -(num_labels+1):-1] == -1).all(1)]\n else:\n df = df[~(df[name_labels] == -1).all(1)]\n df.reset_index(drop=True, inplace=True)\n\n if (Clean_Neg):\n for cl_feature, cl_file in Clean_Neg_list:\n df[f'{cl_feature}'] = df[f'{cl_feature}'].replace(-2, -1)\n\n if False:\n for k, feature in enumerate(label_name):\n num_p = df.loc[(df[f'{feature}'] == 1)].shape[0]\n num_n = df.loc[(df[f'{feature}'] == 0)].shape[0]\n num_i = df.loc[(df[f'{feature}'] == -1)].shape[0]\n\n print(f'{feature}-{num_p}-{num_p/df.shape[0]}-{num_i}-{num_i/df.shape[0]}-{num_n}-{num_n/df.shape[0]}')\n exit(-1)\n\n if (True): # in order to add clinical information to network\n df['ScaledSex'] = df.sex.replace(0, -1)\n weight_gender = 10\n weight_age = 100\n min_age = 11.0\n max_age = 100.0\n #df.PatientAge = (df.PatientAge-min(df.PatientAge))/(max(df.PatientAge)-min(df.PatientAge))\n df['ScaledAge'] = (df.PatientAge-min_age)/(max_age-min_age)\n df.ScaledAge = weight_age * (df.ScaledAge - 0.5)\n df['ScaledSex'] = weight_gender * df.ScaledSex\n\n df.reset_index(drop=True, inplace=True)\n\n else:\n try:\n df['ScaledSex'] = df.sex.replace(0, -1)\n weight_gender = 10\n weight_age = 100\n min_age = 11.0\n max_age = 117.0\n #df.PatientAge = (df.PatientAge-min(df.PatientAge))/(max(df.PatientAge)-min(df.PatientAge))\n df['ScaledAge'] = (df.PatientAge-min_age)/(max_age-min_age)\n df.ScaledAge = weight_age * (df.ScaledAge - 0.5)\n df['ScaledSex'] = weight_gender * df.ScaledSex\n except:\n df['ScaledAge'] = 0\n df['ScaledSex'] = 0\n\n\n if (mode == 'single') | (mode == 'extd'):\n LABELS = df.columns[-(num_labels+1):-1] if name_labels == None else name_labels\n labels = df[LABELS].astype(int)\n paths = df['PATH'] if name_paths == None else df[name_paths]\n ages = df['ScaledAge'].astype(float)\n genders = df['ScaledSex'].astype(float)\n df_tmp = pd.concat([paths, ages, genders, labels], axis=1)\n elif mode == 'double':\n LABELS = df.columns[-(num_labels+2):-2] if name_labels == None else name_labels\n labels = df[LABELS].astype(int)\n paths = df[df.columns[-2:]] if name_paths == None else df[name_paths]\n df_tmp = pd.concat([paths, labels], axis=1)\n else:\n raise RuntimeError\n\n entries = df_tmp\n\n logger.debug(f\"{len(entries)} entries are loaded.\")\n return entries\n\n# data augmentation - 512\ntrain_transforms = tfms.Compose([\n tfms.ToPILImage(),\n tfms.Resize(562, Image.LANCZOS),\n tfms.RandomRotation((-10, 10)),\n tfms.RandomCrop((512, 512)),\n tfms.RandomHorizontalFlip(p=0.01), #with 1% horizontal flip\n tfms.ToTensor(),\n])\n\ntest_transforms = tfms.Compose([\n tfms.ToPILImage(),\n tfms.Resize((512, 512), Image.LANCZOS),\n tfms.ToTensor(),\n])\n\ndef get_image(img_path, transforms):\n image = imageio.imread(img_path)\n image_tensor = transforms(image)\n image_tensor = image_tensor[:1, :, :]\n #print(f'{img_path}-{image_tensor.shape}')\n return image_tensor\n\n\nclass CxrDataset(Dataset):\n transforms = train_transforms\n\n def __init__(self, base_path, manifest_file, num_labels=31, name_labels=None, name_paths=None, mode='single', ext_data=False, csv_path=None, fl_balance=False, r_seed=-1, *args, **kwargs):\n super().__init__(*args, **kwargs)\n manifest_path = base_path.joinpath(manifest_file).resolve() if csv_path == None else csv_path.joinpath(manifest_file).resolve()\n self.entries = _tb_load_manifest(manifest_path, num_labels=num_labels, name_labels=name_labels, name_paths=name_paths, mode=mode, ext_data=ext_data, fl_balance=fl_balance, r_seed = r_seed)\n self.base_path = base_path\n self.mode = mode\n self.name_labels = name_labels\n\n def __getitem__(self, index):\n # need to debug\n def get_entries(index):\n df = self.entries.loc[index]\n if (self.mode == 'single') | (self.mode == 'extd'):\n paths = self.base_path.joinpath(df[0]).resolve()\n label = df[3:].tolist() if self.name_labels == None else df[self.name_labels].tolist()\n age = df[1]\n gender = df[2]\n return paths, label, age, gender\n else:\n paths = [self.base_path.joinpath(df[0]).resolve(), self.base_path.joinpath(df[1]).resolve()]\n label = df[2:].tolist() if self.name_labels == None else df[self.name_labels].tolist()\n return paths, label\n\n if (self.mode == 'single') | (self.mode == 'extd'):\n img_path, label, age, gender = get_entries(index)\n image_tensor = get_image(img_path, CxrDataset.transforms)\n target_tensor = torch.FloatTensor(label)\n clinic_tensor = torch.FloatTensor([age, gender])\n #clinic_tensor = torch.FloatTensor([age])\n return image_tensor, target_tensor, clinic_tensor\n elif self.mode == 'double':\n img_paths, label = get_entries(index)\n image_tensor0 = get_image(img_paths[0], CxrDataset.transforms)\n image_tensor1 = get_image(img_paths[1], CxrDataset.transforms)\n target_tensor = torch.FloatTensor(label)\n return image_tensor0, image_tensor1, target_tensor\n else:\n raise RuntimeError\n\n\n def __len__(self):\n return len(self.entries)\n\n def get_label_counts(self, indices=None):\n df = self.entries if indices is None else self.entries.loc[indices]\n counts = [df[x].value_counts() for x in self.labels]\n new_df = pd.concat(counts, axis=1).fillna(0).astype(int)\n return new_df\n\n @property\n def labels(self):\n #if self.mode == 'single':\n # return self.entries.columns[1:].values.tolist()\n #elif self.mode == 'extd':\n if (self.mode == 'single') | (self.mode == 'extd'):\n return self.entries.columns[3:].values.tolist()\n else:\n return self.entries.columns[2:].values.tolist()\n\n @staticmethod\n def train():\n CxrDataset.transforms = train_transforms\n\n @staticmethod\n def eval():\n CxrDataset.transforms = test_transforms\n\nclass CxrConcatDataset(ConcatDataset):\n\n #def __init__(self, *args, **kwargs):\n # super().__init__(*args, **kwargs)\n # self.get_label_counts()\n\n def get_label_counts(self, indices=None):\n if indices is None:\n indices = list(range(self.__len__()))\n dataset_indices = [bisect.bisect_right(self.cumulative_sizes, idx) for idx in indices]\n sample_indices = [(i if d == 0 else i - self.cumulative_sizes[d - 1]) for i, d in zip(indices, dataset_indices)]\n nested_indices = [[] for d in self.datasets]\n for d, s in zip(dataset_indices, sample_indices):\n nested_indices[d].append(s)\n dfs = []\n for d, dataset in enumerate(self.datasets):\n dfs.append(dataset.get_label_counts(nested_indices[d]))\n df = pd.concat(dfs, sort=False).groupby(level=0).sum().astype(int)\n for dataset in self.datasets:\n assert len(df.columns) == len(dataset.labels), \"label names should be matched!\"\n return df\n\n @property\n def labels(self):\n return self.datasets[0].labels\n\n\nclass CxrSubset(Subset):\n\n #def __init__(self, *args, **kwargs):\n # super().__init__(*args, **kwargs)\n # self.get_label_counts()\n\n def get_label_counts(self, indices=None):\n if indices is None:\n indices = list(range(self.__len__()))\n\n df = self.dataset.get_label_counts([self.indices[x] for x in indices])\n return df\n\n @property\n def labels(self):\n return self.dataset.labels\n\n\ndef CxrRandomSplit(dataset, lengths):\n from torch._utils import _accumulate\n if sum(lengths) > len(dataset):\n raise ValueError(\"Sum of input lengths must less or equal to the length of the input dataset!\")\n indices = torch.randperm(sum(lengths)).tolist()\n return [CxrSubset(dataset, indices[offset - length:offset]) for offset, length in zip(_accumulate(lengths), lengths)]\n\n\n\n# Initiating dataset\ndef copy_mgh_dataset(src_path, csv_path, csv_file, t_view='AP', t_path='PATH', cont_op=False):\n if (cont_op):\n csvs = MGH_DATA_BASE.joinpath(csv_file).resolve()\n else:\n csvs = csv_path.joinpath(csv_file)\n\n for m in [csvs.resolve()]:\n print(f'>>> processing {m}')\n\n df = pd.read_csv(str(m))\n failures = []\n failed_files = []\n for i in tqdm(range(len(df)), total=len(df)):\n fs = [df.iloc[i][str(t_path)]]\n\n for k, f in enumerate(fs):\n r, df = anonymization(df, i, t_view, t_path)\n t = MGH_DATA_BASE.joinpath(r).resolve()\n\n if Path(t).is_file():\n print(f'skip the existed file: {t}')\n else:\n try:\n resized = resize_image(f)\n Path.mkdir(t.parent, parents=True, exist_ok=True)\n resized.save(t, 'PNG')\n except:\n failures.append(i)\n failed_files.append(r)\n #breakpoint()\n\n df = df if cont_op else gen_labels(df)\n print(f'before failures: {df.shape}')\n df = df.drop(failures)\n print(f'after failures: {df.shape}')\n #breakpoint()\n t = MGH_DATA_BASE.joinpath(csv_file).resolve()\n #t = MGH_DATA_BASE.joinpath('post2015_mgh_cxr_all_dataset_v3').resolve()\n df.to_csv(t, float_format='%.0f', index=False)\n\n # 1. file make for two inputs is okay?\n # 2. make csv files for one or two inputs are okay?\n # 3. implementation of Data batch + augmentation\n\ndef resize_image(f_name):\n fp = src_path.joinpath(f_name).resolve()\n img = Image.open(fp)\n w, h = img.size\n rs = (512, int(h/w*512)) if w < h else (int(w/h*512), 512)\n resized = img.resize(rs, Image.LANCZOS)\n\n return resized\n\n# anonymizing a file name\ndef anonymization(df, i, t_view, t_path):\n r = f'mgh_{t_view}_{df.iloc[i][0]:08d}.png'\n df.loc[i, t_path] = r\n\n return r, df\n\ndef gen_labels(df):\n # view positions\n df.insert(7, 'ap', 0)\n df['ap'].loc[df['ViewPosition'] == 'AP'] = 1\n df.insert(8, 'pa', 0)\n df['pa'].loc[df['ViewPosition'] == 'PA'] = 1\n df.insert(9, 'll', 0)\n df['ll'].loc[df['ViewPosition'] == 'LL'] = 1\n # sex\n df.insert(11, 'sex', 0)\n df['sex'].loc[df['PatientSex'] == 'M'] = 1\n # manufacturer\n df.insert(14, 'varian', 0)\n df['varian'].loc[df['Manufacturer'] == 'Varian'] = 1\n df.insert(15, 'agfa', 0)\n df['agfa'].loc[df['Manufacturer'] == 'Agfa'] = 1\n df.insert(16, 'ge', 0)\n df['ge'].loc[(df['Manufacturer'] == 'GE Healthcare') | (df['Manufacturer'] == '\"GE Healthcare\"') | (df['Manufacturer'] == 'GE MEDICAL SYSTEMS')] = 1\n df.insert(17, 'others', 0)\n df['others'].loc[(df['varian'] + df['agfa'] + df['ge']) == 0] = 1\n\n return df\n\nif __name__ == \"__main__\":\n if (False):\n src_path = Path('/mnt/hdd/data_storage/mgh_cxr_img').resolve()\n csv_path = Path('/mnt/hdd/data_storage/mgh_cxr_list/clean-lists/20200316-dataset-mgh-v4').resolve()\n if src_path.exists():\n # for AP\n #csv_file = 'example-10-ap.csv'\n csv_file = 'post2015_mgh_cxr_ap_dataset_v4.csv'\n copy_mgh_dataset(src_path, csv_path, csv_file, t_view='AP', t_path='PATH')\n # for PA LL\n #csv_file = 'example-10-pa-ll.csv'\n csv_file = 'post2015_mgh_cxr_pa_ll_dataset_v4.csv'\n copy_mgh_dataset(src_path, csv_path, csv_file, t_view='PA', t_path='PATH1')\n copy_mgh_dataset(src_path, csv_path, csv_file, t_view='LL', t_path='PATH2', cont_op=True)\n #csv_file = 'example-10-pa.csv'\n csv_file = 'post2015_mgh_cxr_pa_dataset_v4.csv'\n copy_mgh_dataset(src_path, csv_path, csv_file, t_view='PA', t_path='PATH')\n #csv_file = 'example-10-ll.csv'\n csv_file = 'post2015_mgh_cxr_ll_dataset_v4.csv'\n copy_mgh_dataset(src_path, csv_path, csv_file, t_view='LL', t_path='PATH')\n else:\n assert False, (f'{src_path} is not existed.')\n else:\n inc_labels = [1, 2, 3, 6, 7, 8, 11, 13, 14, 15, 18, 19, 21, 22, 28]\n inc_rate = [2, 8, 4, 2, 4, 2, 2, 8, 16, 8, 8, 4, 8, 8, 8]\n for k, feature in enumerate(label_name):\n num_p = df.loc[(df[f'{feature}'] == 1)].shape[0]\n","repo_name":"MGH-LMIC/First-Aid-CXR-AI","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":24062,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"15343806464","text":"from itertools import permutations\nfrom random import choice\n\ndef Check_AB(X, Y):\n A = len([1 for i in range(len(Y)) if Y[i]==X[i]])\n B = len(set(Y)&set(X))-A\n return A,B\n\nData = list(permutations([ _ for _ in range(10)],4))\nguessTimes=0\nwhile True:\n comGuess = list(choice(Data)) #電腦猜的數\n guessTimes+=1\n print('第{}次猜題:{}'.format(guessTimes,comGuess))\n a, b =map(int,input('請輸入A、B值(空格分隔):').split())\n Data = [data for data in Data if (Check_AB(data,comGuess)==(a,b))]\n print(len(Data))\n if len(Data)==1:\n break\nprint('你的答案是:{}'.format(Data[0]))\n\n\n\n\n\n\n","repo_name":"JianDa0127/GitHub_JianDa","sub_path":"Guess Num/1-1_User出題_AI表內隨機猜.py","file_name":"1-1_User出題_AI表內隨機猜.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"42312078501","text":"from PortMap import *\nimport umath\n\ntimer = StopWatch()\ntimeToBal = []\nkP = .5 #proportional constant \nkD = 1 #dirivitive constant\n\ndef driveForward(distance,speed):\n motorLeft.run_angle(speed,distance,Stop.HOLD,wait=False)\n motorRight.run_angle(speed,distance,Stop.HOLD)\n \ndef autoBalance(kP,kD): \n angleTreshholdMin=2\n angleTreshholdMax=6\n autoBalanceMode = False\n balanceCount = 0\n lastError = 0\n while True:\n angleX = hub.imu.tilt()[1]\n if (not autoBalanceMode and abs(angleX)>angleTreshholdMax):\n autoBalanceMode = True\n balanceCount = 0\n if (autoBalanceMode and abs(angleX)<=angleTreshholdMin):\n autoBalanceMode = False\n if (balanceCount > 3000):\n break\n if (autoBalanceMode):\n hub.light.on(Color.RED)\n error = umath.sin(umath.radians(angleX))\n derivative = lastError - error\n lastError = error\n speed = kP*error + kD*derivative \n balanceCount = 0\n if (not autoBalanceMode):\n balanceCount+=1\n hub.light.on(Color.GREEN)\n speed = 0\n #print(\"error \" + str(angleX) + \"; correction \" + str(speed) +\"; kP \" + str(error) +\"; kD \" + str(lastError) ) \n motorLeft.run(speed*1000)\n motorRight.run(speed*1000)\n\nkDList = []\naverage = []\nkD= 0.125\nfor i in range(0,10):\n kD = -kD if kD>0 else kD*-2\n timeToBal.clear()\n average.append(0)\n for n in range(0,10):\n driveForward(100,1000)\n timer.reset()\n autoBalance(kP,kD)\n timeToBal.append(timer.time())\n average[i] = sum(timeToBal)/len(timeToBal)\n kDList.append(average)\n print(str(kD) + \": \" + str(average[i]) + \" -----Battery:\"+ str(hub.battery.voltage()))\n","repo_name":"Snowbotics39131/FLL-Pre-Season-Code","sub_path":"NotCurrentlyUsed/autobalancePID.py","file_name":"autobalancePID.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"}
+{"seq_id":"13349311341","text":"#!/usr/bin/env python3\n\nimport datetime\nimport os\nimport subprocess\nimport uuid\nimport json\nimport logging\nimport sys\nimport time\nimport traceback\n\nimport requests\nimport yaml\nimport datadog\n\nimport libs.acme_tiny as acme_tiny\n\n\n# Rancher env variables:\n# - CATTLE_URL\n# - CATTLE_ACCESS_KEY\n# - CATTLE_SECRET_KEY\ndef rancher_get_certs():\n r = requests.get(os.environ['CATTLE_URL'] + \"/certificates\",\n auth=(os.environ['CATTLE_ACCESS_KEY'], os.environ['CATTLE_SECRET_KEY']))\n if r.status_code != 200:\n raise Exception('Rancher returned non-200 code: ' + str(r.status_code) + ' - ' + r.text)\n return r.json()[\"data\"]\n\n\ndef rancher_save_cert(name, private_key, cert, link=None):\n\n payload = {'key': private_key, 'cert': cert}\n\n if link is None: # New certificate\n payload[\"name\"] = name\n r = requests.post(os.environ['CATTLE_URL'] + \"/certificates\", data=json.dumps(payload),\n headers={'Content-Type': 'application/json'},\n auth=(os.environ['CATTLE_ACCESS_KEY'], os.environ['CATTLE_SECRET_KEY']))\n\n else: # Update existing certificate\n r = requests.put(link, data=json.dumps(payload),\n headers={'Content-Type': 'application/json'},\n auth=(os.environ['CATTLE_ACCESS_KEY'], os.environ['CATTLE_SECRET_KEY']))\n\n if r.status_code not in [200, 201]:\n raise Exception('Rancher returned non-200 code: ' + str(r.status_code) + ' - ' + r.text)\n\n\ndef openssl(args, input=None):\n proc = subprocess.Popen([\"openssl\"] + args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = proc.communicate(input)\n if proc.returncode != 0:\n raise IOError(\"OpenSSL Error: {0}\".format(stderr.decode(\"utf-8\")))\n return stdout\n\n\ndef make_cert(config, logger, name, domains, link=None):\n logger.info(\"Creating certificate {0} for domains: {1}\".format(name, ', '.join(domains)))\n\n if len(domains) < 1:\n raise Exception(\"No domains for certificate\")\n\n private_key_file = \"/tmp/\" + uuid.uuid4().hex\n csr_file = \"/tmp/\" + uuid.uuid4().hex\n\n logger.debug(\"Generating private key to \" + private_key_file + \"...\")\n openssl([\"genrsa\", \"-out\", private_key_file, str(config[\"key_length\"])])\n\n with open(private_key_file, 'r') as f:\n private_key = f.read()\n\n logger.debug(\"Generating CSR to \" + csr_file + \"...\")\n if len(domains) == 1:\n openssl([\"req\", \"-new\", \"-sha256\", \"-key\", private_key_file, \"-out\", csr_file, \"-subj\", \"/CN=\" + domains[0]])\n else:\n csr_config_file = \"/tmp/\" + uuid.uuid4().hex\n logger.debug(\"Generating CSR config to \" + csr_config_file + \"...\")\n with open(\"/etc/ssl/openssl.cnf\", \"r\") as f:\n openssl_config = f.read()\n openssl_config += \"\\n[SAN]\\nsubjectAltName=DNS:\" + ',DNS:'.join(domains) + \"\\n\"\n with open(csr_config_file, \"w\") as f:\n f.write(openssl_config)\n openssl([\"req\", \"-new\", \"-sha256\", \"-key\", private_key_file, \"-out\", csr_file, \"-subj\", \"/\", \"-reqexts\", \"SAN\", \"-config\", csr_config_file])\n logger.debug(\"Deleting CSR config file...\")\n os.remove(csr_config_file)\n\n logger.debug(\"Deleting private key file...\")\n os.remove(private_key_file)\n\n logger.info(\"Signing CSR using acme_tiny...\")\n tiny_kwargs = {}\n if \"ca\" in config:\n tiny_kwargs[\"CA\"] = config[\"ca\"]\n else:\n tiny_kwargs[\"directory_url\"] = config[\"ca_directory\"]\n cert = acme_tiny.get_crt(config[\"account_key\"], csr_file, config[\"acme_dir\"], log=logger, **tiny_kwargs)\n\n logger.debug(\"Deleting CSR file...\")\n os.remove(csr_file)\n\n # TODO: Backup certificate & key ?\n\n logger.info(\"Saving cert in Rancher...\")\n rancher_save_cert(name, private_key, cert, link)\n\n\ndef load_config(logger):\n\n with open(\"config/config.yml\", \"r\") as f:\n config = yaml.safe_load(f)\n\n # Validation\n if \"ca\" in config and \"ca_directory\" in config:\n raise Exception(\"The config should have either ca_directory or ca (deprecated) but not both.\")\n if \"ca\" in config:\n logger.warning(\"The config 'ca' is deprecated, please use 'ca_directory' instead.\")\n if \"chain\" in config:\n logger.warning(\"The config 'chain' is not used anymore.\")\n\n # Strip cert names and domains\n for cert in config[\"certs\"]:\n cert[\"name\"] = cert[\"name\"].strip()\n for i in range(len(cert[\"domains\"])):\n cert[\"domains\"][i] = cert[\"domains\"][i].strip()\n\n return config\n\n\ndef contains_sublist(lst, sublst):\n for e in sublst:\n if e not in lst:\n return False\n return True\n\n\ndef check_certs(config, logger):\n now = datetime.datetime.now()\n\n logger.info(\"Getting certificates from Rancher...\")\n rancher_certs = rancher_get_certs()\n\n rancher_certs_by_name = {}\n for cert in rancher_certs:\n rancher_certs_by_name[cert[\"name\"].strip()] = cert\n\n # Log which certs are in Rancher and in the config\n logger.debug(\"Found certs from Rancher:\")\n for cert in rancher_certs:\n logger.debug(\"- \" + cert[\"name\"] + \": \" + ', '.join(cert[\"subjectAlternativeNames\"]))\n logger.debug(\"Found certs from config:\")\n for cert in config[\"certs\"]:\n logger.debug(\"- \" + cert[\"name\"] + \": \" + ', '.join(cert[\"domains\"]))\n\n to_do = [] # List of (remaining_days, name, domains, link) for certs to make\n\n logger.info(\"Checking certs:\")\n for cert_config in config[\"certs\"]:\n name = cert_config[\"name\"]\n domains = cert_config[\"domains\"]\n if name not in rancher_certs_by_name:\n logger.info(\"- Cert \" + name + \" does not exists\")\n to_do.append((0, name, domains, None))\n else:\n rancher_cert = rancher_certs_by_name[name]\n link = rancher_cert[\"links\"][\"self\"]\n if contains_sublist(rancher_cert[\"subjectAlternativeNames\"], domains):\n cert_exp = datetime.datetime.strptime(rancher_cert[\"expiresAt\"], \"%a %b %d %H:%M:%S %Z %Y\")\n remaining_days = (cert_exp - now).days\n logger.info(\"- Cert {0} expires in {1} days\".format(name, remaining_days))\n if remaining_days < 30:\n to_do.append((remaining_days, name, domains, link))\n else:\n logger.info(\"- Cert \" + name + \" is missing domains\")\n to_do.append((0, name, domains, link))\n\n # Renew certs in the order they expire\n to_do.sort()\n for (_, name, domains, link) in to_do:\n make_cert(config, logger, name, domains, link)\n\n return len(to_do)\n\n\ndef setup_logging():\n # Configure the logger to send <= info messages to stdout and >= warning messages to stderr\n class InfoFilter(logging.Filter):\n def filter(self, rec):\n return rec.levelno in (logging.DEBUG, logging.INFO)\n logger = logging.getLogger(__name__)\n h1 = logging.StreamHandler(sys.stdout)\n h1.setLevel(logging.DEBUG)\n h1.addFilter(InfoFilter())\n h2 = logging.StreamHandler()\n h2.setLevel(logging.WARNING)\n logger.addHandler(h1)\n logger.addHandler(h2)\n # Configure logger level\n logger.setLevel(logging.DEBUG if (\"LOG_DEBUG\" in os.environ) else logging.INFO)\n\n\ndef single_run():\n logger = logging.getLogger(__name__)\n start_time = datetime.datetime.now()\n\n logger.info(\"*** Rancher Auto Certs started \" + start_time.strftime(\"%Y-%m-%d %H:%M\") + \" ***\")\n\n config = load_config(logger)\n logger.debug(\"Using CA %s and directory %s\", config.get(\"ca\"), config.get(\"ca_directory\"))\n logger.debug(\"Using account key: \" + config[\"account_key\"])\n\n nb_certs = check_certs(config, logger)\n\n logger.info(\"*** {0} cert(s) created in {1} ***\".format(nb_certs, datetime.datetime.now() - start_time))\n\n return nb_certs\n\n\ndef daemon():\n datadog.initialize(\n statsd_host=os.getenv(\"DOGSTATSD_HOST\", \"127.0.0.1\"),\n statsd_port=int(os.getenv(\"DOGSTATSD_PORT\", \"8125\")),\n )\n\n while True:\n try:\n nb_certs = single_run()\n datadog.statsd.event(\n \"Rancher Auto Certs executed successfully\",\n \"{} certificate(s) created or renewed\".format(nb_certs),\n alert_type='success',\n )\n datadog.statsd.service_check('rancher_auto_certs.status', datadog.statsd.OK)\n except Exception as e:\n traceback.print_exc()\n datadog.statsd.event(\n \"Rancher Auto Certs encountered an error\",\n \"Please check container logs.\\n{}: {}\".format(type(e).__name__, str(e)),\n alert_type='error',\n )\n datadog.statsd.service_check('rancher_auto_certs.status', datadog.statsd.CRITICAL)\n time.sleep(24 * 60 * 60)\n\n\ndef main():\n setup_logging()\n if \"--daemon\" in sys.argv:\n daemon()\n else:\n single_run()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jonremy/rancher-auto-certs","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8985,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"}
+{"seq_id":"72596581842","text":"from copy import deepcopy\nimport logging\nfrom typing import Any, Optional, Union\n\nfrom pydicom.dataset import Dataset\nfrom pydicom.sr.coding import Code\n\nlogger = logging.getLogger(__name__)\n\n\nclass CodedConcept(Dataset):\n\n \"\"\"Coded concept of a DICOM SR document content module attribute.\"\"\"\n\n def __init__(\n self,\n value: str,\n scheme_designator: str,\n meaning: str,\n scheme_version: Optional[str] = None\n ) -> None:\n \"\"\"\n Parameters\n ----------\n value: str\n code\n scheme_designator: str\n designator of coding scheme\n meaning: str\n meaning of the code\n scheme_version: Union[str, None], optional\n version of coding scheme\n\n \"\"\"\n super(CodedConcept, self).__init__()\n if len(value) > 16:\n if value.startswith('urn') or '://' in value:\n self.URNCodeValue = str(value)\n else:\n self.LongCodeValue = str(value)\n else:\n self.CodeValue = str(value)\n if len(meaning) > 64:\n raise ValueError('Code meaning can have maximally 64 characters.')\n self.CodeMeaning = str(meaning)\n self.CodingSchemeDesignator = str(scheme_designator)\n if scheme_version is not None:\n self.CodingSchemeVersion = str(scheme_version)\n # TODO: Enhanced Code Sequence Macro Attributes\n\n def __hash__(self) -> int:\n return hash(self.scheme_designator + self.value)\n\n def __eq__(self, other: Any) -> bool:\n \"\"\"Compares `self` and `other` for equality.\n\n Parameters\n ----------\n other: Union[highdicom.sr.CodedConcept, pydicom.sr.coding.Code]\n code\n\n Returns\n -------\n bool\n whether `self` and `other` are considered equal\n\n \"\"\"\n if isinstance(other, (Code, CodedConcept)):\n this = Code(\n self.value,\n self.scheme_designator,\n self.meaning,\n self.scheme_version\n )\n return Code.__eq__(this, other)\n return super().__eq__(other)\n\n def __ne__(self, other: Any) -> bool:\n \"\"\"Compares `self` and `other` for inequality.\n\n Parameters\n ----------\n other: Union[CodedConcept, pydicom.sr.coding.Code]\n code\n\n Returns\n -------\n bool\n whether `self` and `other` are not considered equal\n\n \"\"\"\n return not (self == other)\n\n @classmethod\n def from_dataset(\n cls,\n dataset: Dataset,\n copy: bool = True\n ) -> 'CodedConcept':\n \"\"\"Construct a CodedConcept from an existing dataset.\n\n Parameters\n ----------\n dataset: pydicom.dataset.Dataset\n Dataset representing a coded concept.\n copy: bool\n If True, the underlying dataset is deep-copied such that the\n original dataset remains intact. If False, this operation will\n alter the original dataset in place.\n\n Returns\n -------\n highdicom.sr.CodedConcept:\n Coded concept representation of the dataset.\n\n Raises\n ------\n TypeError:\n If the passed dataset is not a pydicom dataset.\n AttributeError:\n If the dataset does not contain the required elements for a\n coded concept.\n\n \"\"\"\n if not isinstance(dataset, Dataset):\n raise TypeError(\n 'Dataset must be a pydicom.dataset.Dataset.'\n )\n code_value_kws = ['CodeValue', 'LongCodeValue', 'URNCodeValue']\n num_code_values = sum(hasattr(dataset, kw) for kw in code_value_kws)\n if num_code_values != 1:\n raise AttributeError(\n 'Dataset should have exactly one of the following attributes: '\n f'{\", \".join(code_value_kws)}.'\n )\n for kw in ['CodeMeaning', 'CodingSchemeDesignator']:\n if not hasattr(dataset, kw):\n raise AttributeError(\n 'Dataset does not contain the following attribute '\n f'required for coded concepts: {kw}.'\n )\n if copy:\n concept = deepcopy(dataset)\n else:\n concept = dataset\n concept.__class__ = cls\n return concept\n\n @classmethod\n def from_code(cls, code: Union[Code, 'CodedConcept']) -> 'CodedConcept':\n \"\"\"Construct a CodedConcept for a pydicom Code.\n\n Parameters\n ----------\n code: Union[pydicom.sr.coding.Code, highdicom.sr.CodedConcept]\n Code.\n\n Returns\n -------\n highdicom.sr.CodedConcept:\n CodedConcept dataset for the code.\n\n \"\"\"\n if isinstance(code, cls):\n return code\n return cls(*code)\n\n @property\n def value(self) -> str:\n \"\"\"str: value of either `CodeValue`, `LongCodeValue` or `URNCodeValue`\n attribute\"\"\"\n return getattr(\n self, 'CodeValue',\n getattr(\n self, 'LongCodeValue',\n getattr(\n self, 'URNCodeValue',\n None\n )\n )\n )\n\n @property\n def meaning(self) -> str:\n \"\"\"str: meaning of the code\"\"\"\n return self.CodeMeaning\n\n @property\n def scheme_designator(self) -> str:\n \"\"\"str: designator of the coding scheme (e.g. ``\"DCM\"``)\"\"\"\n\n return self.CodingSchemeDesignator\n\n @property\n def scheme_version(self) -> Optional[str]:\n \"\"\"Union[str, None]: version of the coding scheme (if specified)\"\"\"\n return getattr(self, 'CodingSchemeVersion', None)\n","repo_name":"ImagingDataCommons/highdicom","sub_path":"src/highdicom/sr/coding.py","file_name":"coding.py","file_ext":"py","file_size_in_byte":5768,"program_lang":"python","lang":"en","doc_type":"code","stars":142,"dataset":"github-code","pt":"3"}
+{"seq_id":"72041805841","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport csv\nimport sys\nimport os\nimport errno\n\nproxy = sys.argv[1]\ntftp = proxy\ncsv_file = open(sys.argv[2], 'rt')\n\ntry:\n #detect delimiter\n dialect = csv.Sniffer().sniff(csv_file.read(1024))\n csv_file.seek(0)\n #detect header\n header = csv.Sniffer().has_header(csv_file.read(1024))\n csv_file.seek(0)\n reader = csv.reader(csv_file, dialect)\n #skip the first line if header found\n if header:\n next(reader, None)\n #phonebook XML start\n phonebook=\"\\nAdresář\"\n\n #iterate CSV\n for row in reader:\n filedata = None\n #map columns\n sn = row[0]\n num = row[1]\n pswd = row[2]\n name = row[3]\n template = row[4]\n\n template = \"{}.xml\".format(template)\n\n outfile = \"./output/spa{}.xml\".format(sn)\n #create output directory if not exists\n if not os.path.exists(os.path.dirname(outfile)):\n try:\n os.makedirs(os.path.dirname(outfile))\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n #phonebook record\n phonebook += \"\\n{}{}\".format(name,num)\n\n #parse template\n with open(template, 'r') as file :\n\n filedata = file.read()\n #replace fields with values\n filedata = filedata.replace('##NUM##', num)\n filedata = filedata.replace('##PASS##', pswd)\n filedata = filedata.replace('##NAME##', name)\n filedata = filedata.replace('##PROXY##', proxy)\n filedata = filedata.replace('##TFTP##', tftp)\n\n #write output config file for device\n with open(outfile, 'w') as file:\n file.write(filedata)\n print(\"{}: {} [{}]\").format(sn, num, name)\n\n #phonebook XML end + write to file\n phonebook += \"\\n\"\n with open(\"./output/directory.xml\", 'w') as file:\n file.write(phonebook)\n\n #generate provisioning models configs\n models = ['spa301','spa501G','spa502G','spa504G','spa508G','spa509G','spa512G','spa514G']\n for model in models:\n outfile = \"./output/{}.xml\".format(model)\n with open(outfile, 'w') as file:\n conf = \"\"\"\n \n Yes\n 10\n tftp://{}/spa$MA.xml\n \"\"\".format(tftp)\n file.write(conf)\n\nfinally:\n csv_file.close()\n\n\n","repo_name":"lynt-smitka/Cisco-SPA-CZ-provisioning","sub_path":"generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"70301674003","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.forms import ModelForm\n\nfrom build_world.fields import EntityCharField, EntityTextField\nimport merge_in_memory as mim_module\n\nclass Entity(models.Model):\n choices = (('world', 'World'), ('story', 'Story'), ('section', 'Section'))\n etype = models.CharField('Entity type', max_length='32', choices=choices, db_column='type',\n db_index=True, help_text='Specify the type of entity (eg. World, Story) you are creating.', default='world')\n parent = models.ForeignKey('self', null=True, blank=True,\n help_text='Indicate to which entity (eg. World, Story, Section) this belongs.')\n founder = models.ForeignKey(User, related_name='founders')\n owner = models.ForeignKey(User, null=True, blank=True)\n name = EntityCharField(max_length=100)\n body = EntityTextField(blank=True)\n description = EntityTextField(max_length=1000, blank=True,\n help_text='Describe your project. This description will be visible to everyone.')\n notes = EntityTextField(blank=True, help_text='This is a place for storing any notes you might want to keep about the project.')\n private = models.BooleanField(default=False)\n active_version = models.ForeignKey('EntityVersion', related_name='active_version', null=True, blank=True, default=None)\n\n def __unicode__(self):\n return self.etype + \": \" + self.name\n\n @models.permalink\n def get_absolute_url(self, attr=None):\n if attr and self.hasattr(attr):\n return ('entity_attr', (), {\n 'etype':self.etype, \n 'pk':self.id, \n 'attr':attr })\n else:\n return ('entity', (), {\n 'etype':self.etype, \n 'pk':self.id })\n\n def has_same_data(self, other, reverse=False, **kwargs):\n \"\"\"Takes in another entity and possibly some diffs to apply. Applies the diffs to self temporarily and checks if the primary attributes of the two entities are identical.\n \"\"\"\n if type(self) != type(other):\n return False\n\n merger = mim_module.Merger()\n body = self.body\n description = self.description\n notes = self.notes\n\n if 'body_diff' in kwargs:\n body = merger.diff_apply(body, kwargs['body_diff'], reverse)\n if 'descr_diff' in kwargs:\n description = merger.diff_apply(description, kwargs['descr_diff'], reverse)\n if 'notes_diff' in kwargs:\n notes = merger.diff_apply(notes, kwargs['notes_diff'], reverse)\n\n # TEST OUTPUT\n return self.body + \" \" + body + \" \" + other.body + \" \" + kwargs['body_diff']\n return body\n return other.body\n\n if body == other.body and description == other.description and notes == other.notes:\n return True\n\n return False\n\n def get_to_version(self, version):\n \"\"\"Takes a version and tracks the current entity forward or back to\n that version by applying diffs.\n \"\"\"\n try:\n curr_version_num = self.active_version.version_num\n except AttributeError:\n curr_version_num = 0\n\n try:\n target_version_num = version.version_num\n except AttributeError:\n target_version_num = 0\n\n # Get a list of versions that are in between this entity and the target\n # version.\n if curr_version_num <= target_version_num:\n reverse = False\n versions = EntityVersion.objects.filter(\n entity=self, \n version_num__gt=curr_version_num, \n version_num__lte=target_version_num)\n else:\n reverse = True\n versions = EntityVersion.objects.filter(\n entity=self, \n version_num__lte=curr_version_num, \n version_num__gt=target_version_num)\n for ver in versions:\n self.apply_version(ver, reverse=reverse)\n\n def apply_version(self, version, reverse=False):\n \"\"\"Takes a version and applies its diffs to the current entity.\"\"\"\n self.apply_diff_strings(reverse, body_diff=version.body,\n descr_diff=version.description, notes_diff=version.notes)\n\n\n def apply_diff_strings(self, reverse=False, **kwargs):\n \"\"\"Takes a set of diffs and applies them to the relevant attributes.\"\"\"\n merger = mim_module.Merger()\n if 'body_diff' in kwargs:\n self.body = merger.diff_apply(self.body, kwargs['body_diff'], reverse)\n if 'descr_diff' in kwargs:\n self.description = merger.diff_apply(self.description, kwargs['descr_diff'], reverse)\n if 'notes_diff' in kwargs:\n self.notes = merger.diff_apply(self.notes, kwargs['notes_diff'], reverse)\n\n def make_version_with_diffs(self, other, version=None):\n \"\"\"Take in another entity and possibly a version. Return a version\n containing the diff of self and the other entity (in that order).\n \"\"\"\n merger = mim_module.Merger()\n if type(self) != type(other):\n return False\n if not version:\n version = EntityVersion()\n version.body = merger.diff_make(self.body, other.body)\n version.description = merger.diff_make(self.description, other.description)\n version.notes = merger.diff_make(self.notes, other.notes)\n return version\n \nclass MemberRelation(models.Model):\n \"\"\"Stores information about which user can contribute to which entities, and at what permission levels, and so on.\n \"\"\"\n entity = models.ForeignKey(Entity)\n user = models.ForeignKey(User)\n ranks = (('chief_contrib', 'Chief Contributor'), ('chief_editor', 'Chief Editor'), ('editor', 'Editor'),\n ('contributor', 'Contributor'), ('artist', 'Artist'))\n relation = models.CharField(max_length=64, choices=ranks)\n\n def __unicode__(self):\n return self.user.username + \", \" + self.relation + \" of \" + str(self.entity)\n\nclass Version(models.Model):\n \"\"\"Stores the changes a person has made to an entity, or the data for a new\n entity that has yet to be accepted.\n NOTE: This relies on the merge_in_memory package, found at: \n https://github.com/danielmoniz/merge_in_memory\n \"\"\"\n version_num = models.IntegerField(null=True, blank=True)\n\n class Meta:\n abstract = True\n\n def __unicode__(self):\n return self.version_num\n\nclass EntityVersion(Version):\n \"\"\"Stores entity data in a version.\"\"\"\n entity = models.ForeignKey(Entity, null=True)\n modifies = models.ForeignKey('self', null=True, blank=True)\n active = models.BooleanField(default=False)\n body = EntityTextField(null=True, blank=True)\n description = EntityTextField(null=True, blank=True)\n notes = EntityTextField(null=True, blank=True)\n edited = models.BooleanField(default=False)\n accepted = models.BooleanField(default=False)\n user = models.ForeignKey(User)\n\n def __unicode__(self):\n output_list = [\n str(self.entity), \n \"modified by \" + str(self.user),\n str(self.version_num)]\n if self.active:\n output_list.append('ACTIVE')\n else:\n output_list.append('ACTIVE')\n\n return ', '.join(output_list)\n\n def make_version_from_entities(self, entity1, entity2):\n \"\"\"Take in two entities and modify this version's relevant fields to\n reflect the changes in the entities.\n NOTE: This is primarily used with a real entity and a dummy entity.\n Therefore only the text fields need modifying.\n \"\"\"\n # Generate the diffs and call entity1.make_version_with_diffs\n pass\n\n\n\n# FORMS ================================================================\nclass MemberRelationForm(ModelForm):\n \"\"\"Allow users to create/modify/modify the permissions of others.\"\"\"\n class Meta:\n model = MemberRelation\n exclude = ('entity')\n \nclass EntityForm(ModelForm):\n \"\"\"A class for modifying an Entity.\"\"\"\n class Meta:\n model = Entity\n \nclass EntityCreateForm(EntityForm):\n \"\"\"A class for creating a new Entity. Leaves out both Founder and Owner.\n \"\"\"\n class Meta(EntityForm.Meta):\n exclude = ('founder', 'owner', 'active_version',)\n\nclass WorldForm(EntityForm):\n \"\"\"A class describing a form for modifying a World.\"\"\"\n class Meta(EntityForm.Meta):\n exclude = ('parent', 'founder', 'body', 'active_version')\n\nclass WorldNonOwnerForm(WorldForm):\n \"\"\"A class describing a form for modifying a World as a non-owner.\"\"\"\n class Meta(WorldForm.Meta):\n exclude = ('parent', 'founder', 'body', 'owner', 'active_version')\n\nclass StoryForm(EntityForm):\n \"\"\"A class describing a form for modifying a Story.\"\"\"\n class Meta(EntityForm.Meta):\n exclude = ('founder', 'active_version')\n\nclass StoryNonOwnerForm(StoryForm):\n \"\"\"A class describing a form for modifying a Story as a non-owner.\"\"\"\n class Meta(StoryForm.Meta):\n exclude = ('founder', 'owner', 'active_version')\n\nclass SectionForm(EntityForm):\n \"\"\"A class describing a form for modifying a Section.\"\"\"\n class Meta(EntityForm.Meta):\n exclude = ('founder', 'active_version')\n\nclass SectionNonOwnerForm(SectionForm):\n \"\"\"A class describing a form for modifying a Section as a non-owner.\"\"\"\n class Meta(SectionForm.Meta):\n exclude = ('founder', 'owner', 'active_version')\n","repo_name":"danielmoniz/Rainbow","sub_path":"build_world/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9467,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"31702734635","text":"import matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport pandas as pd\n\npath = \"cleaned/train.csv\"\ndf = pd.read_csv(path)\n\ndf.describe()\n\n# Remove Columns that will not be used for classification\nd_col = [\"ID\", \"Customer_ID\", \"Month\", \"Name\", \"SSN\", \"Monthly_Inhand_Salary\"]\n\nfor _ in d_col:\n if _ in df.columns:\n df = df.drop(_, axis=1)\n\ndf.info()\n\n\n# See Nominal values\nfor col in df:\n if df[col].dtypes == object:\n print(col)\n print(\"**\" * 20)\n print(df[col].value_counts(dropna=False))\n print(\"**\" * 20)\n\n\ndf[\"Credit_Score\"]\n\n\n# Conversion of Nominal data into Numeric\ny_, label = pd.factorize(df[\"Credit_Score\"])\ndf[df.select_dtypes([\"object\"]).columns] = df[\n df.select_dtypes([\"object\"]).columns\n].apply(lambda x: pd.factorize(x)[0])\n\ndf.describe()\n\n# finding Columns with Outliers using IQR method\ndef find_outliers(df, threshold=1.5):\n cols = []\n\n for _ in df.columns:\n q1 = np.percentile(df[_], 25)\n q3 = np.percentile(df[_], 75)\n iqr = q3 - q1\n lower_limit = q1 - threshold * iqr\n upper_limit = q3 + threshold * iqr\n\n if any((df[_] < lower_limit) | (df[_] > upper_limit)):\n cols.append(_)\n return cols\n\n\noutlier_columns = find_outliers(df)\nprint(outlier_columns)\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Generate a color palette with a unique color for each box plot\nnum_plots = len(outlier_columns)\npalette = sns.color_palette(\"PiYG\", num_plots)\n\nfig, axes = plt.subplots(nrows=num_plots, ncols=1, figsize=(10, 2 * num_plots))\n\nfor i, column in enumerate(outlier_columns):\n ax = axes[i]\n sns.boxplot(x=df[column], ax=ax, color=palette[i])\n ax.set_title(f\"Box plot of {column}\", fontsize=12)\n ax.set_ylabel(\"\")\n ax.grid(True, axis=\"y\")\nplt.text(\n 0.9,\n 0.1,\n \"Roll: 18, 25\",\n ha=\"right\",\n va=\"bottom\",\n transform=plt.gca().transAxes,\n color=\"red\",\n fontsize=24,\n)\nplt.tight_layout()\nplt.savefig(\"outlier_box.png\", dpi=300)\nplt.show()\n\n\n# Limit the Outliers to Upper limit and Lower Limit\nthreshold = 1.5\ndf2 = df.copy()\nfor col in outlier_columns:\n q1 = np.percentile(df[col], 25)\n q3 = np.percentile(df[col], 75)\n iqr = q3 - q1\n lower_limit = q1 - threshold * iqr\n upper_limit = q3 + threshold * iqr\n\n df2[col] = np.where(\n df[col] > upper_limit,\n upper_limit,\n np.where(df[col] < lower_limit, lower_limit, df[col]),\n )\n\n\"\"\"for _ in outlier_columns:\n Q1 = df[_].quantile(0.25)\n Q3 = df[_].quantile(0.75)\n IQR = Q3 - Q1\n df = df.drop(df.loc[df[_] > (Q3 + 1.5 * IQR)].index)\n df = df.drop(df.loc[df[_] < (Q1 - 1.5 * IQR)].index)\ndf.info()\"\"\"\n\ndf[\"Annual_Income\"]\n\n# Box plot after handeling outliers\nfig, axes = plt.subplots(\n nrows=len(outlier_columns), ncols=1, figsize=(10, 2.5 * len(outlier_columns))\n)\n\nfor i, column in enumerate(outlier_columns):\n ax = axes[i]\n sns.boxplot(x=df2[column], ax=ax)\n ax.set_xlabel(\"Index\", fontsize=12)\n ax.set_ylabel(column, fontsize=12)\n ax.set_title(f\"Box plot of {column}\", fontsize=14)\n\nplt.tight_layout()\nplt.show()\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Set the color palette\npalette = sns.color_palette(\"PiYG\", 14)\n\nfig, axes = plt.subplots(2, 1, figsize=(6, 10))\n\n# Plot \"Before\" distribution\nsns.histplot(df[\"Annual_Income\"], kde=True, ax=axes[0], color=palette[0], alpha=0.5)\naxes[0].set_title(\"Before\")\n\n# Plot \"After\" distribution\nsns.histplot(df2[\"Annual_Income\"], kde=True, ax=axes[1], color=palette[0], alpha=0.5)\naxes[1].set_title(\"After\")\n\n# Adjust alpha value for plot elements\nfor ax in axes:\n ax.set_facecolor((1, 1, 1, 1)) # Set background alpha value\n ax.grid(alpha=0.2) # Adjust gridlines alpha value\nplt.text(\n 0.9,\n 0.1,\n \"Roll: 18, 25\",\n ha=\"right\",\n va=\"bottom\",\n transform=plt.gca().transAxes,\n color=\"red\",\n fontsize=14,\n)\nplt.tight_layout()\nplt.savefig(\"limit.png\", dpi=300)\nplt.show()\n\ncorr = df.corr()\n\nplt.figure(figsize=(20, 20))\nmatrix = np.triu(corr)\nsns.heatmap(corr, cmap=\"PiYG\", annot=True, mask=matrix)\nplt.tight_layout()\nplt.text(\n 0.9,\n 0.9,\n \"Roll: 18, 25\",\n ha=\"right\",\n va=\"top\",\n transform=plt.gca().transAxes,\n color=\"red\",\n fontsize=34,\n)\nplt.savefig(\"matrix.png\", dpi=300)\nplt.show()\n\n\n# Training Data\ny = df[\"Credit_Score\"]\nX = df.drop(\"Credit_Score\", axis=1)\n\nfrom sklearn import tree\n\nclf = tree.DecisionTreeClassifier(criterion=\"entropy\")\n\n\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.33, random_state=100\n)\n\n\nclf = clf.fit(X_train, y_train)\n\npredicted = clf.predict(X_test)\npred_label = label[predicted]\ny_label = label[y_test]\n\nprint(pred_label)\n\n\nprint(y_label)\n\nfrom sklearn.metrics import (\n accuracy_score,\n confusion_matrix,\n ConfusionMatrixDisplay,\n f1_score,\n classification_report,\n)\n\nconf_mat = confusion_matrix(y_label, pred_label)\nC = conf_mat / conf_mat.astype(np.float).sum(axis=1)\ndisp = ConfusionMatrixDisplay(confusion_matrix=C, display_labels=label)\nfig, ax = plt.subplots(figsize=(8, 6))\n\n# Use only the green color from the \"PiYG\" palette\ncmap = plt.cm.get_cmap(\"PiYG\")\ncmap = cmap(np.linspace(0.5, 1, cmap.N))\ncmap = cmap[:, 1:2]\ncmap = plt.cm.colors.ListedColormap(cmap)\n\ndisp.plot(ax=ax, cmap=\"Greens\", xticks_rotation=\"vertical\")\n\nplt.title(\"Confusion Matrix\")\nplt.tight_layout()\nplt.text(\n 0.9,\n 0.1,\n \"Roll: 18, 25\",\n ha=\"right\",\n va=\"bottom\",\n transform=plt.gca().transAxes,\n color=\"red\",\n fontsize=18,\n)\nplt.savefig(\"entropy.png\", dpi=300)\n\n\nprint(classification_report(y_test, predicted))\n\n\nclf_gini = tree.DecisionTreeClassifier(criterion=\"gini\", random_state=0)\n\n\n# fit the model\nclf_gini.fit(X_train, y_train)\n\n\ny_pred_gini = clf_gini.predict(X_test)\n\n\n\nconf_mat = confusion_matrix(y_test, y_pred_gini)\nC = conf_mat / conf_mat.astype(np.float).sum(axis=1)\ndisp = ConfusionMatrixDisplay(confusion_matrix=C, display_labels=label)\nfig, ax = plt.subplots(figsize=(8, 6))\ndisp.plot(ax=ax, cmap=\"Greens\", xticks_rotation=\"vertical\")\n\nplt.title(\"Confusion Matrix\")\nplt.tight_layout()\nplt.text(\n 0.9,\n 0.1,\n \"Roll: 18, 25\",\n ha=\"right\",\n va=\"bottom\",\n transform=plt.gca().transAxes,\n color=\"red\",\n fontsize=18,\n)\nplt.savefig(\"entropy2.png\", dpi=300)\n\n\nprint(classification_report(y_test, y_pred_gini))\n","repo_name":"Pilot-Khadka/Machine_Learning_Projects","sub_path":"Decision Tree -Credit Score classigicaiton/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":6409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"30006138262","text":"def function(arr):\r\n res = []\r\n for i in arr:\r\n if (type(i) == list or type(i) == tuple):\r\n function(i)\r\n else:\r\n res.append(i)\r\n return res\r\n\r\nprint(function(arr = ['a', ['c', 1, 3], ['f', 7, [4, '4']], [{'lalala': 111}]]))","repo_name":"Enphonn/Python","sub_path":"Lab1/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"39931810554","text":"#!/usr/bin/env python3\n\nimport streamlink\nimport time\nfrom facenet_pytorch import MTCNN\nimport io\nimport requests\nimport concurrent.futures\nimport requests\nimport time\nimport torch\nimport sys\nimport logging\nimport sys\nimport streamlink\nimport os.path\nimport json\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom methods.constants import IMG_HEIGHT, IMG_WIDTH\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\ntry:\n import cv2\nexcept ImportError:\n sys.stderr.write(\"This example requires opencv-python is installed\")\n raise\n\nlog = logging.getLogger(__name__)\nGREEN = (0, 255, 0)\nEKS_IP = \"http://ac1079231337f47aabdc6aa6e7a2be07-233993352.us-east-2.elb.amazonaws.com\" \n# EKS_IP = \"http://127.0.0.1\" # \n\ndef stream_to_url(url, quality='best'):\n if \"twitch\" in url:\n streams = streamlink.streams(url)\n if streams:\n return streams[quality].to_url()\n else:\n raise ValueError(\"No streams were available\")\n else:\n return url\n\n\ndef add_rect2frame(frame, boxes):\n for box in boxes:\n box = [int(i) for i in box]\n x1, y1, x2, y2 = box\n cv2.rectangle(frame, (x1, y1), (x2, y2), (255, 0, 0), 3)\n\n\ndef augment(frame, boxes, asset):\n for box in boxes:\n x0, y0, x1, y1 = [int(i) for i in box]\n if x1 - x0 <= 0:\n continue\n if y1 - y0 <= 0:\n continue\n asset_patch = cv2.resize(asset, (x1 - x0, y1 - y0))\n a = (asset_patch[:, :, 3] / 255.)[:, :, None]\n frame[y0: y1, x0: x1] = (1-a) * frame[y0: y1, x0: x1] + a * asset_patch[:, :, :3]\n\n\ndef detect_faces(frame, mtcnn):\n boxes, _ = mtcnn.detect(frame)\n add_rect2frame(frame, boxes)\n return frame\n\n\ndef numpy_to_binary(arr):\n is_success, buffer = cv2.imencode(\".png\", arr)\n io_buf = io.BytesIO(buffer)\n return io_buf.read()\n\n\ndef detect_faces_online(frame, add2frame=False, timeout=5):\n X_sz, Y_sz = frame.shape[:2]\n W_new, H_new = IMG_WIDTH, IMG_HEIGHT\n resized = cv2.resize(frame, (W_new, H_new), interpolation=cv2.INTER_LINEAR)\n r = requests.put(\n f\"{EKS_IP}:9001/predictions/all_det\",\n numpy_to_binary(resized), \n timeout=timeout\n ).content\n\n scale_X = Y_sz / W_new\n scale_Y = X_sz / H_new\n\n boxes = json.loads(r.decode())\n #assert 0, boxes\n if isinstance(boxes, dict):\n print(boxes)\n exit(1)\n boxes = []\n boxes = [[x1 * scale_X, y1 * scale_Y , x2* scale_X, y2 * scale_Y] for x1, y1, x2, y2 in boxes]\n if add2frame:\n add_rect2frame(frame, boxes)\n return boxes\n\n\ndef write_on_line(text):\n sys.stdout.write(f'\\r{text}')\n sys.stdout.flush()\n\n\ndef main(url, fpath_asset=None, x0=None, y0=None, x1=None, y1=None, quality='best', fps=300.0):\n stream_url = stream_to_url(url)\n log.info(\"Loading stream {0}\".format(stream_url))\n cap = cv2.VideoCapture(stream_url)\n w, h = cap.get(cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\n print(\"shape=\", (h, w))\n if x0 is None:\n x0 = 0.0\n if x1 is None:\n x1 = 1.0\n if y0 is None:\n y0 = 0.0\n if y1 is None:\n y1 = 1.0\n x0, x1 = int(round(x0 * w)), int(round(x1 * w))\n y0, y1 = int(round(y0 * h)), int(round(y1 * h))\n print(y0, y1, x0, x1)\n if fpath_asset is None:\n fpath_asset = \"img.png\"\n img_data = requests.get(\"http://assets.stickpng.com/images/58e8ff52eb97430e819064cf.png\").content\n with open(fpath_asset, 'wb') as handler:\n handler.write(img_data)\n\n asset = cv2.imread(fpath_asset, cv2.IMREAD_UNCHANGED)\n\n frame_time = int((1.0 / fps) * 1000.0)\n CONNECTIONS = 200\n multithreader = concurrent.futures.ThreadPoolExecutor(max_workers=CONNECTIONS)\n tic = None \n cnt = 0\n futures = []\n frames_queue = []\n beginning = True\n boxes = InertialBoxes(b=0.90)\n lst_image = np.ones((IMG_WIDTH, IMG_HEIGHT, 3))\n while True:\n try:\n tic_ = time.time()\n ret, frame = cap.read()\n print(f\"Getting frame {time.time() - tic_}\")\n if ret is None:\n break\n frame = frame[y0:y1, x0:x1]\n assert len(frame.ravel()) > 0\n frames_queue.append(frame)\n # frame = detect_faces_online(frame)\n futures.append(multithreader.submit(detect_faces_online, frame))\n print(\"A\", time.time() -tic_)\n try:\n print(\"Futures# = \", len(futures))\n if len(futures) < 100 and beginning:\n print('=================')\n continue\n beginning = False\n if tic is None:\n tic = time.time()\n future = next(concurrent.futures.as_completed(futures[0:1]))\n print(\"B\", time.time() -tic_)\n boxes_new = future.result()\n print(\"B.1\")\n boxes.tick()\n for box in boxes_new:\n boxes.handle_box(box)\n futures.pop(0)\n frame = frames_queue.pop(0)\n print(\"C\", time.time() -tic_)\n # add_rect2frame(frame, boxes)\n augment(frame, [box[\"coords\"] for box in boxes.info], asset)\n print(\"D\", time.time() -tic_)\n cv2.imshow('frame', cv2.resize(frame, (1920//2, 1080//2)))\n lst_image = frame\n cnt += 1\n print(\"E\", time.time() -tic_)\n\n except Exception as e:\n #print(e.args)\n print(\"Err#1\")\n cv2.imshow('frame', cv2.resize(lst_image, (1920//2, 1080//2)))\n cnt += 1\n #exit(1)\n #time.sleep(100)\n\n # time.sleep(100)\n toc = time.time()\n print(f\"FPS = {cnt / (toc - tic)}\")\n if cv2.waitKey(max(1, min(1, frame_time - 1000 * int(toc - tic_)))) & 0xFF == ord('q'):\n break\n print(\"F\", time.time() -tic_)\n except KeyboardInterrupt:\n break\n\n cv2.destroyAllWindows()\n cap.release()\n\n\ndef stream_without_torch(url, quality='best', fps=300.0):\n stream_url = stream_to_url(url)\n log.info(\"Loading stream {0}\".format(stream_url))\n cap = cv2.VideoCapture(stream_url)\n\n frame_time = int((1.0 / fps) * 1000.0)\n tic = time.time()\n cnt = 0\n tic = time.time()\n while True:\n try:\n ret, frame = cap.read()\n \n if ret:\n tic_ = time.time()\n cv2.imshow('frame', frame)\n \n cnt += 1\n # time.sleep(100)\n toc = time.time()\n print(f\"FPS = {cnt / (toc - tic)}\")\n if cv2.waitKey(max(0, frame_time - 1000 * int(toc - tic_))) & 0xFF == ord('q'):\n break\n else:\n break\n except KeyboardInterrupt:\n break\n\n cv2.destroyAllWindows()\n cap.release()\n\n\nclass InertialBoxes:\n def __init__(self, b=0.90, tol=10, min_freshness=0.10, max_freshness=1.0):\n self.b = b\n self.tol = tol\n self.info = []\n self.min_freshness = min_freshness\n self.max_freshness = max_freshness\n\n def add_new_box(self, coords):\n x0, y0, x1, y1 = coords\n self.info.append({\n \"center\": (0.5 * (x1+x0), 0.5 * (y1+y0)),\n \"coords\": coords,\n \"freshness\": 1,\n })\n\n def update_box(self, i, coords):\n box = self.info[i]\n b = self.b\n box[\"coords\"] = tuple(b * np.array(box[\"coords\"]) + (1 - b) * np.array(coords))\n x0, y0, x1, y1 = box[\"coords\"]\n box[\"center\"] = (0.5 * (x1+x0), 0.5 * (y1+y0))\n box[\"freshness\"] = min(box[\"freshness\"] + 1, self.max_freshness)\n\n def tick(self):\n for i, box in enumerate(self.info):\n box[\"freshness\"] *= self.b\n self.info = [box for box in self.info if box[\"freshness\"] > self.min_freshness]\n\n def is_close(self, coords0, coords1):\n return abs(np.array(coords0) - np.array(coords1)).mean() < self.tol\n\n def handle_box(self, coords):\n found_box = False\n for i, box in enumerate(self.info):\n if self.is_close(coords, box[\"coords\"]):\n found_box = True\n self.update_box(i, coords)\n break\n if not found_box:\n self.add_new_box(coords)\n\n\nif __name__ == \"__main__\":\n import argparse\n logging.basicConfig(level=logging.INFO)\n\n parser = argparse.ArgumentParser(description=\"Face detection on streams via Streamlink\")\n parser.add_argument(\"url\", help=\"Stream to play\")\n parser.add_argument(\"--path_asset\", default=None)\n parser.add_argument(\"--x0\", default=None, type=float)\n parser.add_argument(\"--y0\", default=None, type=float)\n parser.add_argument(\"--x1\", default=None, type=float)\n parser.add_argument(\"--y1\", default=None, type=float)\n\n opts = parser.parse_args()\n \n TWITCH_URL = opts.url if opts.url else \"https://www.twitch.tv/valhalla_cup\"\n main(TWITCH_URL, opts.path_asset, opts.x0, opts.y0, opts.x1, opts.y1)\n # stream_without_torch(TWITCH_URL)\n","repo_name":"khlin216/torchserve-streamer","sub_path":"torchserve/streamer.py","file_name":"streamer.py","file_ext":"py","file_size_in_byte":9203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"2086722500","text":"from unicodedata import category\nfrom coins.models import Balance\nfrom coins.serializers import BalanceSerializer, TransactionSerializer\nfrom coins.models import Coin, Transactions\nfrom rest_framework.test import APITestCase\nfrom django.urls import reverse\nfrom django.contrib.auth.models import User\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework import status\nfrom django.contrib.auth.hashers import make_password\nfrom rest_framework.test import APITestCase\nfrom rest_framework.test import APIClient\n\n\nclass CoinApiTest(APITestCase):\n\n def setUp(self) -> None:\n self.client = APIClient()\n self.user = User.objects.create(\n username='admin',\n email='a@admin.com',\n password=make_password('a123456')\n )\n\n self.user2 = User.objects.create(\n username='test',\n email='test@test.com',\n password=make_password('test')\n )\n\n self.token = Token.objects.create(user=self.user)\n # self.client.credentials(Authorization='Token ' + self.token.key)\n self.client.force_authenticate(user=self.user)\n self.coin = Coin.objects.all().first()\n\n def test_create_deposit(self, **kwargs):\n url = reverse('transactions')\n\n data = {\n 'operation': Transactions.DEPOSIT,\n 'transmitter': self.user.id,\n 'coin': self.coin.id,\n 'amount': 100\n }\n\n response = self.client.post(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n \n transaction = TransactionSerializer(\n Transactions.objects.get(\n transmitter=self.user,\n operation=Transactions.DEPOSIT\n )\n )\n\n balance = BalanceSerializer(\n Balance.objects.get(\n owner=self.user,\n coin=self.coin,\n category=Balance.REGULAR\n )\n )\n self.assertEqual(\n response.data,\n {\n 'transaction': transaction.data,\n 'balance': balance.data\n }\n )\n\n def test_create_withdrawal(self, **kwargs):\n url = reverse('transactions')\n\n data = {\n 'operation': Transactions.WITHDRAWAL,\n 'transmitter': self.user.id,\n 'coin': self.coin.id,\n 'amount': 100\n }\n\n response = self.client.post(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE)\n \n balance = Balance.objects.get(\n owner=self.user,\n category=Balance.REGULAR,\n coin=self.coin\n )\n\n balance.balance = 200.00000\n balance.save()\n\n response = self.client.post(url, data)\n\n balance = Balance.objects.get(\n owner=self.user,\n category=Balance.REGULAR,\n coin=self.coin\n )\n\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.data, BalanceSerializer(balance).data)\n\n","repo_name":"LucaPicc/basic-wallet","sub_path":"backend/backend/coins/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"28584134325","text":"# import click\nfrom threading import Thread\nfrom sys import argv\n\nfrom dhcp import dhcp\nfrom tcp import tcp\nfrom tftp import tftp\nfrom utility import power_cycle\n\ndata_dir = \"./install/boot\"\ntftp_port = 69\ntcp_port = 3333\nip = \"172.30.8.1\"\nsubnet_mask = \"255.255.255.0\"\nmac_ip_file = \"hosts.csv\"\n\ndef server(): \n tftp_thread = Thread(target=tftp.do_tftpd, args=[data_dir, ip, tftp_port], name=\"tftpd\")\n tftp_thread.start()\n\n dhcp_thread = Thread(target=dhcp.do_dhcp, args=[ip, subnet_mask, mac_ip_file], name=\"dhcpd\")\n dhcp_thread.start()\n\n tcp_thread = Thread(target=tcp.do_tcp, args=[data_dir, tcp_port, ip], name=\"tcp\")\n tcp_thread.start()\n\n tftp_thread.join()\n dhcp_thread.join()\n tcp_thread.join()\n\n\ndef restart(ports):\n for port in ports:\n power_cycle.power_cycle(port)\n\n\ndef reinstall(port):\n with open(\"/tcp/reinstall.txt\", \"w\") as f:\n f.write(\"172.30.8.{}\".format(port))\n \n power_cycle.power_cycle(port)\n\n\ndef exit_piman():\n print(\"Insufficient amount of arguments\")\n exit(1)\n\nif __name__ == \"__main__\":\n args = \"Arguments: \"\n for a in argv:\n args += a + \" \"\n print(args)\n\n if len(argv) < 2:\n power_cycle.power_cycle(10)\n server()\n exit()\n\n if argv[1] == \"server\":\n server()\n elif argv[1] == \"restart\":\n if len(argv) < 3:\n exit_piman()\n restart(argv[2:])\n elif argv[1] == \"reinstall\":\n if len(argv) < 3:\n exit_piman()\n reinstall(argv[2:])\n else: \n power_cycle.power_cycle(10)\n server()\n","repo_name":"Hankang0321/CS158B-Group","sub_path":"piman.py","file_name":"piman.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"39776514094","text":"\"\"\"testdb URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom dbconn import views\n\nurlpatterns = [\n\turl(r'^$', views.homepage), # \"^\"符號表示字串開頭,\"$\"表示字串結尾\n\turl(r'^post/(\\w+)$', views.showpost),\n url(r'^currency/(?P[A-Z]{3})/$', views.USD),\n url(r'^oilprice/$', views.Oilprice),\n url(r'^rate/$', views.Rate),\n url(r'^invoice/$', views.Invoice),\n url(r'^admin/', include(admin.site.urls)),\n]\n","repo_name":"multw/twinformation","sub_path":"testdb/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"20248712020","text":"import json\n\nfrom rest_framework import status, viewsets, mixins\nfrom rest_framework.decorators import action\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom common.permissions import ManagerPermission\nfrom common.constants import CANCELED, DONE, DONT_ENOUGH_MONEY, DONT_AVAILABLE\nfrom payments.models import CreditCard, Order, Cart\nfrom payments.serializers import CreditCardSerializer, OrderSerializer, TransactionSerializer, CartSerializer\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass CreditCardView(viewsets.ViewSet, mixins.CreateModelMixin):\n permission_classes = (IsAuthenticated, )\n\n def create(self, request, *args, **kwargs):\n logger.info(f'create credit card: {request.data}')\n data = request.data\n data['user'] = request.user.pk\n serializer = CreditCardSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n if serializer.errors:\n card = CreditCard.objects.filter(user=request.user).first()\n if card:\n card.balance += request.data['balance']\n card.save()\n return Response(CreditCardSerializer(card).data, status=status.HTTP_200_OK)\n logger.error(f'create credit card: {request.data} - {str(serializer.errors)}')\n return Response({'error': serializer.errors},\n status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n\nclass CartView(APIView):\n permission_classes = (IsAuthenticated, )\n\n def get(self, request):\n try:\n logger.info('get cart')\n cart = Cart.objects.personal(user=request.user)\n return Response(CartSerializer(cart).data, status=status.HTTP_200_OK)\n except Exception as e:\n logger.error(f'get cart - {str(e)}')\n return Response({'error': str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n def post(self, request):\n try:\n logger.info('post items to cart')\n data = json.loads(request.body)\n Cart.objects.add_product(user=request.user, product_id=data['product_id'], amount=data.get('amount', 1))\n return Response({'info': 'added'}, status=status.HTTP_200_OK)\n except Exception as e:\n logger.error(f\"post item to cart - str(e)\")\n return Response({'error': str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n\nclass CardDetails(APIView):\n def post(self, request):\n try:\n logger.info(f'delete item from cart {str(request.data)}')\n data = json.loads(request.body)\n Cart.objects.remove_product(user=request.user, product_id=data['product_id'])\n return Response({'info': 'deleted'}, status=status.HTTP_200_OK)\n except Exception as e:\n logger.error(f'delete item from cart {str(request.data)} - {str(e)}')\n return Response({'error': str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n\nclass TransactionView(viewsets.ViewSet, mixins.CreateModelMixin):\n permission_classes = (IsAuthenticated, )\n\n def create(self, request):\n try:\n logger.info(f'create transaction: {request.data}')\n cart = Cart.objects.get(user=request.user)\n card = CreditCard.objects.get(user=request.user)\n has_balance = cart.check_balance()\n if has_balance == DONT_ENOUGH_MONEY:\n raise Exception('У вас недостаточно средств на карте')\n is_available = cart.check_availability()\n if is_available[0] == DONT_AVAILABLE:\n raise Exception('Данных товаров нет в наличии')\n available = is_available[1]\n new_cart = Cart.objects.create(total_sum=cart.total_sum)\n for cart_item in cart.cart_items.all():\n new_cart.cart_items.add(cart_item.id)\n new_cart.save()\n data = {\"cart\": new_cart.id, 'availability': available}\n serializer = TransactionSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n cart.withdraw_money()\n cart.empty_cart()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except CreditCard.DoesNotExist:\n logger.error(f'create transaction: {request.data} - credit card doesn\\'t exist')\n return Response({\"error\": \"Нет кредитной карты! Добавьте ее\"}, status=status.HTTP_400_BAD_REQUEST)\n except Exception as e:\n logger.error(f'create transaction: {request.data} - {str(e)}')\n return Response({\"error\": str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n\nclass OrderView(viewsets.ModelViewSet):\n permission_classes = (IsAuthenticated,)\n queryset = Order.objects.all()\n serializer_class = OrderSerializer\n\n @action(methods=['GET'], detail=False, url_path='managers', url_name='managers',\n permission_classes=(ManagerPermission,))\n def managers_orders(self, request):\n logger.info(f'managers\\' orders')\n queryset = Order.objects.assignee_orders(assignee=request.user)\n serializer = OrderSerializer(queryset, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n @action(methods=['PUT'], detail=True, url_path='cancel', url_name='cancel',\n permission_classes=(IsAuthenticated,))\n def user_cancel(self, request, pk):\n logger.info('user cancel order')\n order = Order.objects.get(id=pk)\n order.status = CANCELED\n order.assignee = None\n order.save()\n return Response({'info': 'canceled'}, status=status.HTTP_200_OK)\n\n @action(methods=['PUT'], detail=True, url_path='complete', url_name='complete',\n permission_classes=(IsAuthenticated,))\n def complete_order(self, request, pk):\n logger.info(f'order completed {pk}')\n order = Order.objects.get(id=pk)\n order.status = DONE\n order.save()\n return Response({'info': 'completed'}, status=status.HTTP_200_OK)","repo_name":"ayazhanutemurat/DjanoProject","sub_path":"market_place/payments/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"74211628881","text":"from django.shortcuts import render\nimport requests\nfrom django.http import HttpResponse, JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nimport json\n\n\n\ndef index(request):\n return render(request,\"index.html\",{})\n# Create your views here.\n\ndef get_weather(request):\n result = {'success': True, 'msg': ''}\n try:\n lat = request.POST['lat']\n lon = request.POST['lon']\n URL = \"https://api.met.no/weatherapi/locationforecast/2.0/compact\"\n PARAMS = {'lat': lat, 'lon': str(lon)}\n HEADERS = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Safari/537.36',\n 'From': 'youremail@domain.example'\n }\n res = requests.get(url = URL, headers= HEADERS, params = PARAMS)\n json_data = json.loads(res.text)\n result['data'] = json_data\n except Exception as e:\n result['success'] = False\n result['msg'] = str(e)\n print(str(e))\n return JsonResponse(result)\n","repo_name":"chidodev/neliti_task_2","sub_path":"neliti/task2/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"22682048094","text":"from calendar import Calendar\nfrom dataclasses import asdict\nfrom datetime import datetime, timedelta, time\n\nfrom dao.event import eventDAO\nfrom db import transactional\nfrom domain.calendar import CalendarItem, CalendarView\nfrom domain.event import Event\nfrom utils import (\n format_date,\n generate_pregnancy_weeks,\n get_pregnancy_examinations\n)\n\n\ndef calendar_range(firstweekday: int, year: int, month: int):\n c = Calendar(firstweekday)\n return list(c.itermonthdates(year, month))\n\n\ndef list_range(firstweekday: int, calendar_id: int, year: int, month: int):\n time_range = calendar_range(firstweekday, year, month)\n rows = eventDAO.find_by_time_range(calendar_id,\n time_range[0],\n time_range[-1])\n items = {format_date(d): CalendarItem(date=d, events=[])\n for d in time_range}\n if rows is not None:\n events = [Event(*row) for row in rows]\n\n for event in events:\n\n end = event.end_date\n if end > time_range[-1]:\n end = time_range[-1]\n start = event.start_date\n if start < time_range[0]:\n start = time_range[0]\n delta = end - start\n for d in range(delta.days + 1):\n dd = start + timedelta(d)\n items[format_date(dd)].events.append(event.id)\n return CalendarView(events=events, items=items)\n\n\n@transactional\ndef menstruation_start(calendar_id: int, start: datetime,\n menstruation_period: int, full_period: int):\n\n end = start + timedelta(days=menstruation_period)\n\n full_period_delta = timedelta(days=full_period)\n # create current period\n menstruation_event = Event(\n id=None,\n title=\"经期\",\n description=\"\",\n calendar_id=calendar_id,\n create_time=datetime.now(),\n modified_time=datetime.now(),\n start_date=start,\n end_date=end,\n start_time=None,\n end_time=None,\n recurrence=0,\n state=1,\n )\n\n eventDAO.save(**asdict(menstruation_event))\n\n # predict ovulation preid\n ovulation_predict_delta = full_period / 2\n ovulation_start = ovulation_predict_delta - 5\n ovulation_end = ovulation_predict_delta + 4\n menstruation_event = Event(\n id=None,\n title=\"排卵期\",\n description=\"\",\n calendar_id=calendar_id,\n create_time=datetime.now(),\n modified_time=datetime.now(),\n start_date=start + timedelta(days=ovulation_start),\n end_date=start + timedelta(days=ovulation_end),\n start_time=None,\n end_time=None,\n recurrence=0,\n state=1,\n )\n eventDAO.save(**asdict(menstruation_event))\n # predict next period\n predict_menstruation = Event(\n id=None,\n title=\"经期(计算)\",\n description=\"\",\n calendar_id=calendar_id,\n create_time=datetime.now(),\n modified_time=datetime.now(),\n start_date=start + full_period_delta,\n end_date=end + full_period_delta,\n start_time=None,\n end_time=None,\n recurrence=0,\n state=1,\n )\n eventDAO.save(**asdict(predict_menstruation))\n\n\n@transactional\ndef normal(calendar_id: int, title: str, description: str,\n start_date: str, end_date: str,\n start_time: str, end_time: str) -> int:\n event = Event(\n id=None,\n title=title,\n description=description,\n calendar_id=calendar_id,\n create_time=datetime.now(),\n modified_time=datetime.now(),\n start_date=start_date,\n end_date=end_date,\n start_time=start_time,\n end_time=end_time,\n recurrence=0,\n state=1,\n )\n return eventDAO.save(**asdict(event))\n\n\n@transactional\ndef pregnancy_start(calendar_id: int, start: datetime):\n weeks = generate_pregnancy_weeks(start)\n events = []\n for w in weeks:\n wd = get_pregnancy_examinations(w[0])\n if wd is None:\n wd = \"\"\n event = Event(\n id=None,\n title=f'第 {w[0]} 周',\n description=wd,\n calendar_id=calendar_id,\n create_time=datetime.now(),\n modified_time=datetime.now(),\n start_date=w[1],\n end_date=w[2],\n start_time=time(hour=0, minute=0, second=0),\n end_time=time(hour=23, minute=59, second=59),\n recurrence=0,\n state=1\n )\n events.append(asdict(event))\n eventDAO.batch_save(events)\n","repo_name":"lostsquirrel/calendar","sub_path":"service/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":4565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"22506453060","text":"\"\"\"Testing sound device.\"\"\"\n\nfrom __future__ import print_function, absolute_import\n\nimport sounddevice as sd\nimport numpy as np\n\n\ndef play_sound(data, fs):\n sd.play(data, fs)\n status = sd.wait()\n\n return status\n\n\n# fs = 48000\n# sound_1 = np.ones((fs,), dtype=np.float64)*1000\n#\n# play_sound(sound_1, fs)\n\nfs = 44100\nsound_2 = np.ones((fs*2,), dtype=np.float64)*1000\n\nplay_sound(sound_2, fs)\n","repo_name":"SensorsINI/jaer-control","sub_path":"scripts/test_sound_device.py","file_name":"test_sound_device.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"17189743201","text":"# 토너먼트 카드 게임\ndef find(l, r):\n if l==r:\n return l\n else:\n r1 = find(l, (l+r)//2)\n r2 = find((l+r)//2+1, r)\n if card[r1] == card[r2]:\n return r1\n else:\n if card[r1] == 1 and card[r2] == 3:\n return r1\n elif card[r1] == 3 and card[r2] == 1:\n return r2\n elif card[r1] < card[r2]:\n return r2\n elif card[r1] > card[r2]:\n return r1\n\nimport sys\nsys.stdin = open('input07.txt', 'r')\nT = int(input())\n\nfor tc in range(1, T+1):\n N = int(input())\n card = [0]+list(map(int, input().split())) # 인덱스 1번부터 저장\n\n print(f\"#{tc} {find(1,N)}\")\n","repo_name":"Lagom92/algorithm","sub_path":"0227/card_game.py","file_name":"card_game.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"7191562137","text":"n = int(input())\r\n\r\ns1 = 'I hate'\r\ns2 = 'I love'\r\ns3 = 'that'\r\ns4 = ' it'\r\ns = ''\r\ny=1\r\n\r\nfor i in range(n):\r\n if y==1:\r\n s=s1+s4\r\n y+=1\r\n elif y%2==0:\r\n s=s.replace('it', '')\r\n s=s+s3+' '+s2+s4\r\n y+=1\r\n else:\r\n s=s.replace('it', '')\r\n s=s+s3+' '+s1+s4\r\n y+=1\r\nprint(s)\r\n","repo_name":"ters81/codeforces.com","sub_path":"A. Халк.py","file_name":"A. Халк.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"}
+{"seq_id":"26608646474","text":"import numpy as np\n\nfrom black_it.samplers.r_sequence import RSequenceSampler\nfrom black_it.search_space import SearchSpace\n\nexpected_params = np.array(\n [\n [0.94, 0.44],\n [0.69, 0.01],\n [0.45, 0.58],\n [0.2, 0.15],\n [0.96, 0.72],\n [0.71, 0.29],\n [0.47, 0.86],\n [0.22, 0.43],\n ],\n)\n\n\ndef test_rsequence_2d() -> None:\n \"\"\"Test the r-sequence sampler, 2d.\"\"\"\n sampler = RSequenceSampler(batch_size=8, random_state=0)\n param_grid = SearchSpace(\n parameters_bounds=np.array([[0, 1], [0, 1]]).T,\n parameters_precision=np.array([0.01, 0.01]),\n verbose=False,\n )\n new_params = sampler.sample(param_grid, np.zeros((0, 2)), np.zeros((0, 2)))\n assert np.allclose(expected_params, new_params)\n","repo_name":"bancaditalia/black-it","sub_path":"tests/test_samplers/test_rseq.py","file_name":"test_rseq.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"2"}
+{"seq_id":"31086942427","text":"import math\nimport time\nimport os.path\n\nimport numpy as np\n\nimport keras\nfrom keras.callbacks import TensorBoard, History\nfrom keras.utils import np_utils\nfrom sklearn import metrics\n\nimport keras.backend as K\nimport tensorflow as tf\n\nfrom myclassifier.batchgenerator import PaddedBatchGenerator\n\nimport matplotlib.pyplot as plt\nimport itertools\n\nfrom lib.buildmodels import build_model\n\ndef train_and_evaluate(train, test, model, batch_size=4, epochs=25, name=\"model\"):\n\n ## create the batches for train annd test\n paddedTrainBatch = PaddedBatchGenerator(train['samples'], train['labels'], batch_size)\n paddedTestBatch = PaddedBatchGenerator(test['samples'], test['labels'], batch_size)\n\n ## compile the model\n model.compile(optimizer = \"Adam\", loss = \"categorical_crossentropy\", metrics = [\"accuracy\"])\n\n ## train the model\n model.fit(paddedTrainBatch, epochs=epochs, verbose=2)\n\n ## get predictions and labels\n actual, predicted = get_labels_and_prediction_without_padding(model, paddedTestBatch)\n\n ## calculate confusion matrix\n m = metrics.confusion_matrix(actual, predicted, labels=np.arange(4))\n\n ## get frame level accuracy\n frame_result = model.evaluate(paddedTestBatch, verbose=2)\n\n print('labels: ', actual)\n print('predicted:', predicted)\n\n ## calculate file level accuracy\n count = 0\n for i in range(len(actual)):\n if actual[i] == predicted[i]:\n count += 1\n\n return round(1 - frame_result[1], 4), round(1 - count / len(actual), 4), m\n\ndef handle_k_fold(name, models_rnn, k_fold, nodes, dropout, l2, epochs, trains, tests):\n print('--------------------------------------------------')\n errs_frame = []\n errs_file = []\n matrixes = None\n\n ## calculates data for all folds\n for i in range(k_fold):\n ## build new model for each fold\n rnn = build_model(models_rnn(20, nodes, dropout, l2))\n\n ## get error rate per frame and per file, and the confusion matrix\n err_frame, err_file, matrix = train_and_evaluate(trains[i], tests[i], rnn, epochs=epochs, name=name)\n errs_frame.append(err_frame)\n errs_file.append(err_file)\n\n ## sum all folds' confusion matrix\n if matrixes is not None:\n matrixes += matrix\n else:\n matrixes = matrix\n\n print('result for model:', name)\n print('frame avg err:', round(np.mean(errs_frame), 4))\n print('frame std err:', round(np.std(errs_frame), 4))\n print('file avg err: ', round(np.mean(errs_file), 4))\n print('file std err: ', round(np.std(errs_file), 4))\n print(matrixes)\n print('--------------------------------------------------')\n\n## calculate labels and prediction for each file and removed padding\ndef get_labels_and_prediction_without_padding(model, paddedTestBatch):\n actual_labels = []\n predicted_labels = []\n ## flattening the matrix while categorize the class\n ## output should be 1D array of class indexes\n for i in range(len(paddedTestBatch)):\n ## get examples and labels\n examples, targets = paddedTestBatch[i]\n\n ## get predictions\n prediction = model.predict(examples)\n\n ## get the class index while removing paddings\n for j in range(len(targets)):\n actual_label = []\n predicted_label = []\n for k in range(len(targets[j])):\n if np.sum(targets[j][k]) != 0:\n print(prediction[j][k])\n actual_label.append(np.argmax(targets[j][k]))\n predicted_label.append(np.argmax(prediction[j][k]))\n actual_labels.append(np.bincount(np.array(actual_label)).argmax())\n predicted_labels.append(np.bincount(np.array(predicted_label)).argmax())\n\n return actual_labels, predicted_labels\n","repo_name":"mohit03031999/Speaker-Verification-Using-Recurrent-Neural-Network","sub_path":"myclassifier/recurrent.py","file_name":"recurrent.py","file_ext":"py","file_size_in_byte":3800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"10438954259","text":"import numpy as np\nfrom xml.etree.ElementTree import ElementTree\nimport re\nfrom matplotlib import pyplot as plt\n\nclass SVGReader(object):\n def __init__(self):\n # load XML-tree\n self.ns = 'http://www.w3.org/2000/svg' # XML namespace\n self.et = ElementTree()\n self.svgpath = None\n\n def init(self, data):\n self.data = data\n self.tree = self.et.parse(data)\n if (self.tree.get('width')[-2:] in ['mm', 'px']): # units millimeter or pixels\n viewbox = self.tree.get('viewBox').split(' ')\n xmin, ymin, xmax, ymax = viewbox\n if (self.tree.get('width')[-2:] is 'mm'):\n self.width_px = float(xmax) - float(xmin)\n self.height_px = float(ymax) - float(ymin)\n self.meter_to_pixel = self.width_px/float(self.tree.get('width')[:-2])\n else: # [px]\n self.width_px = float(xmax) - float(xmin)\n self.height_px = float(ymax) - float(ymin)\n else:\n # if no unit mentioned, it is px\n self.width_px = float(self.tree.get('width')) # get width from svg\n self.height_px = float(self.tree.get('height')) # get height from svg\n\n self.position = [0, 0] # default, [px]\n self.obstacles = []\n\n def convert_path_to_points(self):\n\n # find svg-paths, describing the shapes e.g. using Bezier curves\n # for rectangle check on straight lines --> is control point on line\n # between start and end?\n try:\n # search for the word path in the outer branch of the SVG-file\n self.svgpath = self.tree.findall(\"{%s}path\" %self.ns)\n if not self.svgpath:\n # if not yet found, search for the word path in the next branch\n self.svgpath = self.tree.find(\"{%s}g\" %self.ns).findall(\"{%s}path\" %self.ns)\n if not self.svgpath:\n # if not yet found, search for the word path in the next branch\n self.svgpath = self.tree.find(\"{%s}g\" %self.ns).find(\"{%s}g\" %self.ns).findall(\"{%s}path\" %self.ns)\n except: # error occured, e.g. no found, this is possible when you only have basic shapes\n print('No shapes found which are described by a path, probably you only have basic shapes')\n return\n if not self.svgpath: # no path found, this is possible when you only have basic shapes\n print('No shapes found which are described by a path, probably you only have basic shapes')\n return\n\n self.n_paths = len(self.svgpath) # number of paths which build up the figure\n\n # initialize output file\n counter = 0\n # loop over paths\n while counter < self.n_paths:\n # look for all MCmc with a number behind it, line runs until a space or minus sign is found\n lines = re.findall('[MCmc][\\s.,0-9-]+', self.svgpath[counter].get('d'))\n points = []\n for line in lines:\n if line:\n # line [0] contains Mx,y, the startpoint\n test1=line[1:].replace(\",\",\" \") # replace comma by: space\n test2 = test1.replace(\"-\",\" -\") # replace minus sign by: space minus\n test3 = test2.replace(\"c\",\" c \") # replace c by: space c space\n # splits the line at each space, to create separate points\n newpoints = np.array(list(map(eval, test3.strip().split(' '))))\n if line[0] == 'c': # lower case c means relative coordinates, upper case C is absolute coordinates\n newpoints[0:6:2] = newpoints[0:6:2] + points[-2] # relative to absolute coordinates for x\n newpoints[1:6:2] = newpoints[1:6:2] + points[-1] # relative to absolute coordinates for y\n # for the first line (Mx,y) there is no 'c', so the starting point (x,y)\n # is added to points in the first iteration\n points.extend(newpoints) # add newpoints to points\n counter += 1\n # save points to file\n f = open(\"environment.txt\", \"a\")\n f.write( \"path_\"+ str(counter) + \"=\"+ str(np.array(points)) + \"\\n\" )\n f.close()\n\n def convert_basic_shapes(self):\n # code for basic shapes and \n\n # Todo: these shapes can also have a transform, find it and\n # add this transform to self.transform\n\n # find svg-paths, describing the rectangles\n try:\n # search for the word rect in the outer branch of the SVG-file\n self.rectangles = self.tree.findall(\"{%s}rect\" %self.ns)\n if not self.rectangles:\n # if not yet found, search for the word rect in the next branch\n self.rectangles = self.tree.find(\"{%s}g\" %self.ns).findall(\"{%s}rect\" %self.ns)\n if not self.rectangles:\n # if not yet found, search for the word rect in the next branch\n self.rectangles = self.tree.find(\"{%s}g\" %self.ns).find(\"{%s}g\" %self.ns).findall(\"{%s}rect\" %self.ns)\n self.n_rect = len(self.rectangles) # number of paths which build up the figure\n if self.n_rect == 0:\n print('No rectangles found')\n except:\n print('No shapes found which are described by a rect')\n\n # find svg-paths, describing the circles\n try:\n # search for the word circ in the outer branch of the SVG-file\n self.circles = self.tree.findall(\"{%s}circle\" %self.ns)\n if not self.circles:\n # if not yet found, search for the word circ in the next branch\n self.circles = self.tree.find(\"{%s}g\" %self.ns).findall(\"{%s}circle\" %self.ns)\n if not self.circles:\n # if not yet found, search for the word circ in the next branch\n self.circles = self.tree.find(\"{%s}g\" %self.ns).find(\"{%s}g\" %self.ns).findall(\"{%s}circle\" %self.ns)\n self.n_circ = len(self.circles) # number of paths which build up the figure\n if self.n_circ == 0:\n print('No circles found')\n except:\n print('No shapes found which are described by a circle')\n\n for rectangle in self.rectangles:\n obstacle = {}\n obstacle['shape'] = 'rectangle'\n pos = [float(rectangle.get('x')), float(rectangle.get('y'))] # Note: [x,y] is the top left corner\n # axis are placed in the top left corner and point to the right(x) and downward(y)\n obstacle['pos'] = [pos[0]+float(rectangle.get('width'))*0.5, pos[1]+float(rectangle.get('height'))*0.5]\n obstacle['pos'] += self.transform # apply transform\n obstacle['width'] = float(rectangle.get('width'))\n obstacle['height'] = float(rectangle.get('height'))\n obstacle['velocity'] = [0, 0]\n obstacle['bounce'] = False\n self.obstacles.append(obstacle)\n\n for circle in self.circles:\n obstacle = {}\n obstacle['shape'] = 'circle'\n obstacle['pos'] = [float(circle.get('cx')), float(circle.get('cy'))] # Note: [x,y] is the top left corner\n obstacle['pos'] += self.transform # apply transform\n obstacle['radius'] = float(circle.get('r'))\n obstacle['velocity'] = [0, 0]\n obstacle['bounce'] = False\n self.obstacles.append(obstacle)\n\n def convert_lines(self):\n # code for basic shapes and \n\n # find svg-polylines\n # example: \n try:\n # search for the word polyline in the outer branch of the SVG-file\n self.polylines = self.tree.findall(\"{%s}polyline\" %self.ns)\n if not self.polylines:\n # if not yet found, search for the word polyline in the next branch\n self.polylines = self.tree.find(\"{%s}g\" %self.ns).findall(\"{%s}polyline\" %self.ns)\n if not self.polylines:\n # if not yet found, search for the word polyline in the next branch\n self.polylines = self.tree.find(\"{%s}g\" %self.ns).find(\"{%s}g\" %self.ns).findall(\"{%s}polyline\" %self.ns)\n self.n_polylines = len(self.polylines) # number of paths which build up the figure\n if self.n_polylines == 0:\n print('No polylines found')\n except:\n print('No shapes found which are described by a polyline')\n\n # find svg-lines\n # example: \n try:\n # search for the word line in the outer branch of the SVG-file\n self.lines = self.tree.findall(\"{%s}line\" %self.ns)\n if not self.lines:\n # if not yet found, search for the word line in the next branch\n self.lines = self.tree.find(\"{%s}g\" %self.ns).findall(\"{%s}line\" %self.ns)\n if not self.lines:\n # if not yet found, search for the word line in the next branch\n self.lines = self.tree.find(\"{%s}g\" %self.ns).find(\"{%s}g\" %self.ns).findall(\"{%s}line\" %self.ns)\n self.n_lines = len(self.lines) # number of paths which build up the figure\n if self.n_lines == 0:\n print('No lines found')\n except:\n print('No shapes found which are described by a line')\n\n for polyline in self.polylines:\n try:\n stroke_width = float(polyline.get('stroke-width')) # stroke-width given as basic element\n except: # stroke-width wrapped in style element\n style = polyline.get('style').split(';')\n for element in style:\n if 'stroke-width' in element:\n stroke_width = float(element.split(':')[1])\n vertices = polyline.get('points').split(' ')\n vertices[:] = (v for v in vertices if v != '') # remove all empty strings\n vertices = np.array(list(map(eval, vertices))) # gives array of arrays [[x,y],[],...]\n vertices += self.transform\n\n # make rectangle of each vertex couple\n for l in range(len(vertices)-1):\n obstacle = {}\n obstacle['shape'] = 'rectangle'\n obstacle['velocity'] = [0, 0]\n obstacle['bounce'] = False\n\n # Note: to avoid explicitly checking if the line goes from\n # left to right / right to left\n # bottom to top / top to bottom\n # we use w and h separate from obstacle width and height\n line = np.array(vertices[l+1]) - np.array(vertices[l])\n if line[0] == 0: # vertical line\n obstacle['width'] = stroke_width\n obstacle['height'] = abs(line[1])\n h = line[1]\n w = cmp(h,0)*stroke_width # give stroke_width same sign as h\n elif line[1] == 0: # horizontal line\n obstacle['width'] = abs(line[0])\n obstacle['height'] = stroke_width\n w = line[0]\n h = cmp(w,0)*stroke_width # give stroke_width same sign as w\n else:\n raise RuntimeError('Diagonal lines are not yet supported')\n obstacle['pos'] = [vertices[l][0] + w*0.5, vertices[l][1] + h*0.5]\n self.obstacles.append(obstacle)\n\n for line in self.lines:\n obstacle = {}\n obstacle['shape'] = 'rectangle'\n obstacle['velocity'] = [0, 0]\n obstacle['bounce'] = False\n\n try:\n stroke_width = float(line.get('stroke-width')) # stroke-width given as basic element\n except: # stroke-width wrapped in style element\n style = line.get('style').split(';')\n for element in style:\n if 'stroke-width' in element:\n stroke_width = float(element.split(':')[1])\n x1, y1 = float(line.get('x1')), float(line.get('y1'))\n x2, y2 = float(line.get('x2')), float(line.get('y2'))\n # add transform\n x1 += self.transform[0]\n y1 += self.transform[1]\n x2 += self.transform[0]\n y2 += self.transform[1]\n if x1 == x2: # vertical line\n obstacle['width'] = stroke_width\n obstacle['height'] = abs(y2-y1)\n h = y2-y1 # signed value\n w = cmp(h,0)*stroke_width\n elif y1 == y2: # horizontal line\n obstacle['width'] = abs(x2-x1)\n obstacle['height'] = stroke_width\n w = x2-x1 # signed value\n h = cmp(w,0)*stroke_width\n else:\n raise RuntimeError('Diagonal lines are not yet supported')\n\n # don't use width and height since then you have to check if x1 > x2 etc,\n # to decide if the line goes from left to right or the other way around\n obstacle['pos'] = [x1 + w*0.5, y1 + h*0.5]\n self.obstacles.append(obstacle)\n\n def compute_transform(self):\n # Note: only works for translation for the moment\n # check if figure is transformed, e.g. a translation\n try:\n trans1 = self.tree.find(\"{%s}g\" %self.ns).get('transform')\n trans1 = trans1.split('translate')\n trans1.remove('')\n self.transform1 = np.array(map(eval, trans1)[0])\n except:\n print('No transform1 found')\n try:\n trans2 = self.tree.find(\"{%s}g\" %self.ns).find(\"{%s}g\" %self.ns).get('transform')\n trans2 = trans2.split('translate')\n trans2.remove('')\n self.transform2 = np.array(map(eval, trans2)[0])\n except:\n print('No transform2 found')\n if hasattr(self, 'transform1') and hasattr(self, 'transform2'):\n self.transform = self.transform1 + self.transform2 # coordinate frame transformation\n elif hasattr(self, 'transform1') :\n self.transform = self.transform1\n elif hasattr(self, 'transform2') :\n self.transform = self.transform2\n else:\n self.transform = [0, 0] # no transforms found\n\n def reconstruct(self, file):\n # help function, re-draws the loaded figure, allowing to check if it has the desired shapes\n points = []\n with open(file, \"r\") as f:\n for line in f:\n for word in line.split(' '):\n if (word != ', ' and word != ', ' and word != '' and word != ' '):\n points.append(word)\n f.close()\n newpoints = []\n for point in points:\n if point[-1:] == '\\n':\n point = point[:-1]\n if point[-1:] == ']':\n point = point[:-1]\n if point[0] != 'p':\n newpoints.append(point)\n x = []\n y = []\n for i in range(0,len(newpoints),2):\n x.append(newpoints[i])\n y.append(newpoints[i+1])\n\n plt.plot(x,y)\n plt.show()\n\n def build_environment(self):\n\n # Todo: write code here to check which elements are in the svg:\n # path, rectangle, circ, line, polyline,...\n # and call the appropriate functions, instead of calling them all\n\n self.compute_transform() # assigns values to self.transform\n\n self.convert_basic_shapes() # looks for rect and circle shapes\n self.convert_path_to_points() # looks for shapes defined by a Bezier path\n self.convert_lines() # looks for shapes defined by polyline and line\n # if you found some paths, they are transformed to an obstacle and\n # added to self.obstacles\n\n def get_gcode_description (self):\n # Note: for now this function only works for lines and paths. With capital M and lower-case c.\n # The paths are supposed to represent circle segments, if they don't,\n # a circle approximation of the curve is used.\n\n children = self.tree.getchildren() # the xml-tree\n self.commands = [] # holds the GCode commands\n\n for idx, child in enumerate(children):\n if 'line' in child.tag:\n # child is a line\n # example: \n x1, y1 = float(child.get('x1')), float(child.get('y1')) # start\n x2, y2 = float(child.get('x2')), float(child.get('y2')) # end\n # add transform\n x1 += self.transform[0]\n y1 += self.transform[1]\n x2 += self.transform[0]\n y2 += self.transform[1]\n\n y1 = -y1 # flip axis: in svg top left corner is [0,0], y-axis points downwards\n y2 = -y2 # make y-axis point upwards\n\n # make line GCode segment\n if not self.commands:\n # this is the first command, so make it a G00\n self.commands.append('G00 X'+str(x1)+' Y'+str(y1))\n self.commands.append('G01 X'+str(x2)+' Y'+str(y2)) # only add endpoint, startpoint comes from previous command\n\n elif 'path' in child.tag:\n path = child.get('d')\n # d='Mx,y c x1 y1 x2 y2 x y'\n # Mx,y = the startpoint or endpoint of the curve\n # c starts a curve, lower case means relative coordinates i.e. relative to Mx, y\n # x1 y1 is the control point that is closest to Mx, y\n # x2 y2 is the second control point\n # x y is the endpoint of the curve\n # d= can contain multiple c-commands = curves\n if path[0] == 'M':\n path = path[1:].replace(\",\",\" \") # replace comma by: space\n path = path.replace(\"-\",\" -\") # replace minus sign by: space minus\n path = path.replace(\"c\",\" c \") # replace c by: space c space\n path = path.split(' c ')\n\n filtered_path = []\n for curve in path:\n curve = curve.split(' ')\n # remove all empty strings\n curve = [e for e in curve if e!= '']\n curve = [e for e in curve if e!= ' ']\n filtered_path.append(curve) # save filtered path\n\n # the first border point of the curve (later decide if this is start or end)\n curve_point1 = filtered_path[0]\n curve_point1[0] = float(curve_point1[0])\n curve_point1[1] = -float(curve_point1[1]) # minus: let y-axis point up\n filtered_path.pop(0) # remove first point from path\n\n if filtered_path:\n # there are curves in the path,\n # loop over all curves\n circle_points = []\n for curve in filtered_path:\n if not circle_points:\n # first circle point, move starting from Mx, y = curve_point1\n # note: minus sign for y-direction\n circle_points.append([float(curve[-2])+curve_point1[0], -float(curve[-1])+curve_point1[1]])\n else:\n # this was not the first circle point, move relative from previous point\n # note: minus sign for y-direction\n circle_points.append([float(curve[-2])+circle_points[-1][0], -float(curve[-1])+circle_points[-1][1]])\n\n # add endpoint of curve\n circle_points.insert(0,curve_point1)\n\n # For now this function supposes that each curve consists of minimum three points.\n # If not, you need to approximate the circle shape in another way\n if len(circle_points) > 2:\n # find circle through first three points\n # by finding the intersection of the two perpendicular bisectors through [p1p2] and [p2p3]\n p1, p2, p3 = circle_points[:3]\n mid1 = [(p2[0]+p1[0])*0.5, (p2[1]+p1[1])*0.5] # midpoint of first bisector\n mid2 = [(p3[0]+p2[0])*0.5, (p3[1]+p2[1])*0.5]\n cx, cy = [], [] # will hold circle center\n\n if (p1[0] == p2[0] and p2[1] == p3[1]):\n # vertical and horizontal bisectors\n cx = mid1[0]\n cy = mid2[1]\n elif (p1[1] == p2[1] and p2[0] == p3[0]):\n # horizontal and vertical bisectors\n cx = mid2[0]\n cy = mid1[1]\n elif p2[0] == p1[0]:\n # vertical bisector1\n cx = mid1[0]\n rico2 = (p3[1]-p2[1])/(p3[0]-p2[0])\n normal2 = -1/rico2\n cy = mid2[1] + normal2*(x-mid2[0])\n elif p2[1] == p1[1]:\n # horizontal bisector1\n cy = mid1[1]\n rico2 = (p3[1]-p2[1])/(p3[0]-p2[0])\n normal2 = -1/rico2\n cx = (cy-mid2[1])/normal2 + mid2[0]\n elif p2[0] == p3[0]:\n # vertical bisector2\n cx = mid2[0]\n rico1 = (p2[1]-p1[1])/(p2[0]-p1[0])\n normal1 = -1/rico1\n cy = mid1[1] + normal1*(x-mid1[0])\n elif p2[1] == p3[1]:\n # horizontal bisector2\n cy = mid2[1]\n rico1 = (p2[1]-p1[1])/(p2[0]-p1[0])\n normal1 = -1/rico1\n cx = (cy-mid1[1])/normal1 + mid1[0]\n else:\n # two diagonal bisectors\n rico1 = (p2[1]-p1[1])/(p2[0]-p1[0])\n normal1 = -1/rico1\n # y = mid1[1] + normal1*(x-mid1[0]) [1]\n rico2 = (p3[1]-p2[1])/(p3[0]-p2[0])\n normal2 = -1/rico2\n # y = mid2[1] + normal2*(x-mid2[0]) [2]\n\n # [1] = [2] --> x =\n if normal2 != normal1:\n cx = (mid1[1]-mid2[1]-normal1*mid1[0]+normal2*mid2[0])/(normal2-normal1)\n else:\n raise RuntimeError('Normals are equal, something went wrong')\n\n cy = mid1[1] + normal1*(cx-mid1[0])\n\n # compute radius\n r = np.sqrt((p1[0]-cx)**2+(p1[1]-cy)**2)\n\n # plot solution\n # plt.figure(11)\n # eval = np.linspace(0,2*np.pi,100)\n # plt.plot(cx+r*np.cos(eval),cy+r*np.sin(eval),'g-')\n # plt.plot(cx,cy,'gx')\n # # plt.plot(mid1[0],mid1[1],'rx')\n # # plt.plot(mid2[0],mid2[1],'rx')\n # plt.plot(p2[0],p2[1],'rx')\n # plt.plot(p1[0],p1[1],'rx')\n # plt.plot(p3[0],p3[1],'rx')\n\n else:\n raise RuntimeError('Curve must consist of more than two points for the moment')\n\n # given radius and center\n # compute I an J from center\n\n # compute end point of curves\n curve_point2 = np.array(curve_point1)\n for curve in filtered_path:\n # add relative positions of all curves\n curve_point2 += np.array([float(curve[-2]), -float(curve[-1])])\n\n # now decide what is start and end of curve, because start must connect to end of previous command\n prev_seg_end = [float(self.commands[-1].split(' ')[1].split('X')[1]),\n float(self.commands[-1].split(' ')[2].split('Y')[1])]\n dist1 = np.sqrt((curve_point1[0]-prev_seg_end[0])**2+(curve_point1[1]-prev_seg_end[1])**2)\n dist2 = np.sqrt((curve_point2[0]-prev_seg_end[0])**2+(curve_point2[1]-prev_seg_end[1])**2)\n # curve start point is closest to the end of the previous segment\n if dist1 < dist2:\n start = curve_point1\n end_curve = curve_point2\n control_point = [start[0]+float(filtered_path[0][0]), start[1]-float(filtered_path[0][1])]\n else:\n start = curve_point2\n end_curve = curve_point1\n control_point = [start[0]-float(filtered_path[-1][4])+float(filtered_path[-1][2]),\n start[1]+float(filtered_path[-1][5])-float(filtered_path[-1][3])]\n\n # compute I and J\n I = cx - start[0]\n J = cy - start[1]\n\n # determine if circle goes clockwise or counter-clockwise\n # by taking the vector product of the center to the start point & start to control point\n v1 = [start[0]-cx, start[1]-cy]\n v2 = [control_point[0]-start[0], control_point[1]-start[1]]\n vector_product = v1[0]*v2[1] - v2[0]*v1[1]\n\n if vector_product < 0:\n # clockwise arc\n self.commands.append('G02 X'+str(end_curve[0])+' Y'+str(end_curve[1])+ ' I'+str(I)+' J'+str(J))\n else:\n # counter-clockwise arc\n self.commands.append('G03 X'+str(end_curve[0])+' Y'+str(end_curve[1])+ ' I'+str(I)+' J'+str(J))\n else:\n # there are no curves in the path, probably it just contains a move command\n # so it represents a GCode line segment\n self.commands.append('G01 X'+str(end_curve[0])+' Y'+str(end_curve[1]))\n else:\n raise RuntimeError('Only absolute positioning of the start of the curve is supported for now')\n\n # Write .nc file\n old_name = self.data.name.split('/')[-1][:-4]\n f = open(old_name+'_gcode.nc', 'w')\n for command in self.commands:\n f.write(command + '\\n')\n f.close()","repo_name":"meco-group/omg-tools","sub_path":"omgtools/gui/svg_reader.py","file_name":"svg_reader.py","file_ext":"py","file_size_in_byte":27698,"program_lang":"python","lang":"en","doc_type":"code","stars":530,"dataset":"github-code","pt":"2"}
+{"seq_id":"850780220","text":"# -*- coding: utf-8 -*-\n# @Time : 2021/4/25 下午1:18\n# @Author : ShaHeTop-Almighty-ares\n# @Email : yang6333yyx@126.com\n# @File : consume_cpu_compared.py\n# @Software: PyCharm\n\nimport time\n\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom concurrent.futures import ProcessPoolExecutor\n\n\ndef fib(n):\n if n < 2:\n return 1\n return fib(n - 1) + fib(n - 2)\n\n\nif __name__ == '__main__':\n # 多线程\n with ThreadPoolExecutor(2) as executor:\n all_task = [executor.submit(fib, (n)) for n in range(25, 40)]\n start_time = time.time()\n for f in as_completed(all_task):\n data = f.result()\n print('data:{}'.format(data))\n print('多线程:last time:{}'.format(time.time() - start_time))\n\n # 多进程\n with ProcessPoolExecutor(2) as executor:\n all_task = [executor.submit(fib, (n)) for n in range(25, 40)]\n start_time = time.time()\n for f in as_completed(all_task):\n data = f.result()\n print('data:{}'.format(data))\n print('多进程:last time:{}'.format(time.time() - start_time))\n","repo_name":"yangyuexiong/Python-from-entry-to-presumptuous","sub_path":"进程_线程_协程/进程/code/consume_cpu_compared.py","file_name":"consume_cpu_compared.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"4971306075","text":"'''\nCalcular la media de calificaciones de la asignatura de Programación. Deducir cuántas son\nmás altas que la media y cuántas más bajas que dicha media. Se solicita un mínimo de 10\nnotas. Estas calificaciones se ingresarán por teclado y no se permite notas inferiores a 1.0 ni\nmayores a 7.0.\n'''\ndef calcular_media_notas():\n notas = []\n total_notas = 0\n cant_notas = 0\n\n # Solicitar y validar las notas ingresadas por el usuario\n while cant_notas < 10:\n nota = float(input(\"Ingrese una nota (entre 1.0 y 7.0): \"))\n if nota < 1.0 or nota > 7.0:\n print(\"La nota ingresada está fuera del rango permitido.\")\n continue\n\n notas.append(nota)\n total_notas += nota\n cant_notas += 1\n\n # Calcular la media de las notas\n media = total_notas / cant_notas\n\n # Determinar cuántas notas son más altas y cuántas son más bajas que la media\n notas_altas = 0\n notas_bajas = 0\n for nota in notas:\n if nota > media:\n notas_altas += 1\n elif nota < media:\n notas_bajas += 1\n\n return media, notas_altas, notas_bajas\n\n# Llamar a la función para calcular la media de las notas y obtener las cantidades\nmedia, cant_notas_altas, cant_notas_bajas = calcular_media_notas()\n\n# Imprimir los resultados\nprint(\"La media de las calificaciones es:\", media)\nprint(\"Cantidad de notas más altas que la media:\", cant_notas_altas)\nprint(\"Cantidad de notas más bajas que la media:\", cant_notas_bajas)\n\n\n","repo_name":"notKT4/Python-semestre-1","sub_path":"Parcial-N°2/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"14898401197","text":"import io\nfrom logging import StreamHandler, getLogger\nimport sys\n\nfrom qiskit import BasicAer\nfrom qiskit import ClassicalRegister, QuantumCircuit, QuantumRegister\nfrom qiskit.compiler import transpile\nfrom qiskit.compiler import assemble\nfrom qiskit.qobj import QobjHeader\nfrom qiskit.test import QiskitTestCase\n\n\nclass StreamHandlerRaiseException(StreamHandler):\n \"\"\"Handler class that will raise an exception on formatting errors.\"\"\"\n\n def handleError(self, record):\n raise sys.exc_info()\n\n\nclass TestBasicAerQobj(QiskitTestCase):\n \"\"\"Tests for all the Terra simulators.\"\"\"\n\n def setUp(self):\n super().setUp()\n logger = getLogger()\n self.addCleanup(logger.setLevel, logger.level)\n logger.setLevel(\"DEBUG\")\n\n self.output = io.StringIO()\n logger.addHandler(StreamHandlerRaiseException(self.output))\n\n qr = QuantumRegister(1)\n cr = ClassicalRegister(1)\n self.qc1 = QuantumCircuit(qr, cr, name=\"circuit0\")\n self.qc1.h(qr[0])\n\n def test_qobj_headers_in_result(self):\n \"\"\"Test that the qobj headers are passed onto the results.\"\"\"\n custom_qobj_header = {\"x\": 1, \"y\": [1, 2, 3], \"z\": {\"a\": 4}}\n\n for backend in BasicAer.backends():\n with self.subTest(backend=backend):\n new_circ = transpile(self.qc1, backend=backend)\n qobj = assemble(new_circ, shots=1024)\n\n # Update the Qobj header.\n qobj.header = QobjHeader.from_dict(custom_qobj_header)\n # Update the Qobj.experiment header.\n qobj.experiments[0].header.some_field = \"extra info\"\n\n result = backend.run(qobj).result()\n self.assertEqual(result.header.to_dict(), custom_qobj_header)\n self.assertEqual(result.results[0].header.some_field, \"extra info\")\n","repo_name":"peiyi1/nassc_code","sub_path":"qiskit-terra/test/python/basicaer/test_basicaer_qobj_headers.py","file_name":"test_basicaer_qobj_headers.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"2"}
+{"seq_id":"14009283571","text":"from pathlib import Path\n\nfrom typer import Typer, Option\n\nfrom trecover.config import var, log\n\ncli = Typer(name='Download-cli', add_completion=False, help='Download train data or pre-trained model')\n\n\n@cli.command(name='data', help='Download train data')\ndef download_data(link: str = Option(var.TRAIN_DATA_URL, help='Link to the train data on Yandex disk or GitHub'),\n save_dir: Path = Option(var.DATA_DIR, help='Path where to store downloaded data'),\n yandex_disk: bool = Option(False, is_flag=True, help='If the link is to Yandex disk')\n ) -> None:\n \"\"\"\n Download train data from Yandex disk or GitHub.\n\n Parameters\n ----------\n link : str, default=var.TRAIN_DATA_URL\n Sharing link to the train data on Yandex disk or GitHub.\n save_dir : Path, default=var.DATA_DIR\n Path where to store downloaded data.\n yandex_disk : bool, default=False\n If the link is to Yandex disk.\n\n \"\"\"\n\n from trecover.utils.cli import download_archive\n\n download_archive(link=link, save_dir=save_dir, yandex_disk=yandex_disk)\n\n\n@cli.command(name='artifacts', help='Download model artifacts by specified version or archive_link')\ndef download_artifacts(version: str = Option('latest', help=\"Artifacts' version\"),\n archive_link: str = Option(None, help='Link to the artifacts archive on Yandex disk or GitHub'),\n save_dir: Path = Option(var.INFERENCE_DIR, help='Path where to save downloaded artifacts'),\n yandex_disk: bool = Option(False, is_flag=True, help='If the archive_link is to Yandex disk'),\n show: bool = Option(False, is_flag=True, help=\"Print available artifacts' versions\")\n ) -> None:\n \"\"\"\n Download model artifacts by specified version or archive_link to Yandex disk or GitHub.\n\n Parameters\n ----------\n version : str, default='latest'\n Artifacts' version.\n archive_link : str, default=None\n Sharing link to the model artifacts archive on Yandex disk or GitHub.\n save_dir : Path, default=var.INFERENCE_DIR\n Path where to save downloaded artifacts.\n yandex_disk : bool, default=False\n If the link is to Yandex disk.\n show : bool, default=False\n Print available artifacts' versions.\n\n \"\"\"\n\n from rich.prompt import Confirm\n from trecover.utils.cli import download_archive, download_from_github\n\n if show:\n log.project_console.print(var.CHECKPOINT_URLS.keys())\n\n elif archive_link:\n download_archive(link=archive_link, save_dir=save_dir, yandex_disk=yandex_disk)\n\n elif version in var.CHECKPOINT_URLS:\n download_from_github(direct_link=var.CHECKPOINT_URLS[version]['model'], save_dir=save_dir)\n download_from_github(direct_link=var.CHECKPOINT_URLS[version]['config'], save_dir=save_dir)\n\n elif Confirm.ask(prompt='[bright_blue]Specified version was not found. Continue downloading the latest version?',\n default=True,\n console=log.project_console):\n download_from_github(direct_link=var.CHECKPOINT_URLS['latest']['model'], save_dir=save_dir)\n download_from_github(direct_link=var.CHECKPOINT_URLS['latest']['config'], save_dir=save_dir)\n\n\nif __name__ == '__main__':\n cli()\n","repo_name":"alex-snd/TRecover","sub_path":"src/trecover/app/cli/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"}
+{"seq_id":"6080471087","text":"import os\nimport sys\nimport importlib\nfrom dataset.pascal_voc import PascalVoc\nfrom dataset.iterator import DetIter\nfrom detect.detector import Detector\nfrom config.config import cfg\nimport logging\n\ndef evaluate_net(net, dataset, devkit_path, mean_pixels, data_shape,\n model_prefix, epoch, ctx, year=None, sets='test',\n batch_size=1, nms_thresh=0.5, force_nms=False):\n \"\"\"\n Evaluate entire dataset, basically simple wrapper for detections\n\n Parameters:\n ---------\n dataset : str\n name of dataset to evaluate\n devkit_path : str\n root directory of dataset\n mean_pixels : tuple of float\n (R, G, B) mean pixel values\n data_shape : int\n resize input data shape\n model_prefix : str\n load model prefix\n epoch : int\n load model epoch\n ctx : mx.ctx\n running context, mx.cpu() or mx.gpu(0)...\n year : str or None\n evaluate on which year's data\n sets : str\n evaluation set\n batch_size : int\n using batch_size for evaluation\n nms_thresh : float\n non-maximum suppression threshold\n force_nms : bool\n force suppress different categories\n \"\"\"\n # set up logger\n logging.basicConfig()\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n if dataset == \"pascal\":\n if not year:\n year = '2007'\n imdb = PascalVoc(sets, year, devkit_path, shuffle=False, is_train=False)\n data_iter = DetIter(imdb, batch_size, data_shape, mean_pixels,\n rand_samplers=[], rand_mirror=False, is_train=False, shuffle=False)\n sys.path.append(os.path.join(cfg.ROOT_DIR, 'symbol'))\n net = importlib.import_module(\"symbol_\" + net) \\\n .get_symbol(imdb.num_classes, nms_thresh, force_nms)\n model_prefix += \"_\" + str(data_shape)\n detector = Detector(net, model_prefix, epoch, data_shape, mean_pixels, batch_size, ctx)\n logger.info(\"Start evaluation with {} images, be patient...\".format(imdb.num_images))\n detections = detector.detect(data_iter)\n imdb.evaluate_detections(detections)\n else:\n raise NotImplementedError(\"No support for dataset: \" + dataset)\n","repo_name":"burness/mxnet-101","sub_path":"day7/ssd/evaluate/evaluate_net.py","file_name":"evaluate_net.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"2"}
+{"seq_id":"5358066032","text":"# Chocolate Scraping with Beautiful Soup\n# Project Chocolate Scraping with Beautiful Soup\n\nimport seaborn as sns\nfrom bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom bs4 import BeautifulSoup\n\nwebpage_response = requests.get(\"https://s3.amazonaws.com/codecademy-content/courses/beautifulsoup/cacao/index.html\")\nwebpage = webpage_response.content\nsoup = BeautifulSoup(webpage,\"html.parser\")\n#print(soup)\nsoup.find_all(attrs={\"class\": \"Rating\"})\n\nratings = []\nfor elements in soup.find_all(attrs={\"class\": \"Rating\"})[1:]:\n ratings.append(float(elements.get_text()))\n\nplt.hist(ratings)\nplt.show()\n\nsoup.select(\".Company\")\n\ncompanies = []\nfor company in soup.select(\".Company\")[1:]:\n companies.append(company.get_text())\n\ncocoa_percents = []\ncocoa_percent_tags = soup.select(\".CocoaPercent\")\n\nfor td in cocoa_percent_tags[1:]:\n percent = float(td.get_text().strip('%'))\n cocoa_percents.append(percent)\n\nd = {\"Company\": companies, \"Ratings\": ratings, \"CocoaPercentage\":cocoa_percents}\ncacao_df = pd.DataFrame.from_dict(d)\n\nmean_vals = cacao_df.groupby(\"Company\").Ratings.mean()\nten_best = mean_vals.nlargest(10)\nprint(ten_best)\n\nplt.clf()\nplt.scatter(cacao_df.CocoaPercentage, cacao_df.Ratings)\n\nz = np.polyfit(cacao_df.CocoaPercentage, cacao_df.Ratings, 1)\nline_function = np.poly1d(z)\nplt.plot(cacao_df.CocoaPercentage, line_function(cacao_df.CocoaPercentage), \"r--\")\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"meoclark/Data-Science-DropBox","sub_path":"Beautiful_Soap/BS1.py","file_name":"BS1.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"11336540010","text":"import html\nfrom data import question_data\n\nclass QuizBrain:\n\n def __init__(self, q_list):\n self.question_number = 0\n self.score = 0\n self.question_list = q_list\n self.current_question = None\n self.true_false= None\n self.next_question()\n\n def still_has_questions(self):\n return self.question_number < len(self.question_list)\n\n def next_question(self):\n self.current_question = self.question_list[self.question_number]\n self.question_number += 1\n qui = f\"Q.{self.question_number}: {html.unescape(self.current_question.text)}\"\n return qui\n # user_answer = input(f\"Q.{self.question_number}: {html.unescape(self.current_question.text)} (True/False): \")\n # self.check_answer(user_answer)\n\n def check_answer(self, is_yes: str):\n correct_answer = self.current_question.answer\n print(correct_answer)\n if is_yes == correct_answer :\n self.score += 1\n self.true_false = True\n else:\n self.true_false = False\n","repo_name":"abdazmi/Quiz-app","sub_path":"quiz_brain.py","file_name":"quiz_brain.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"7601714413","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy_splash import SplashRequest\n\n\nclass QuotesSpider(scrapy.Spider):\n name = 'frases_splash'\n start_urls = ['http://quotes.toscrape.com/js']\n\n def start_requests(self):\n yield SplashRequest(url=self.start_urls[0], callback=self.parse)\n\n def parse(self, response):\n frases = response.css(\"div.quote\")\n for frase in frases:\n yield self.procesar_frase(frase)\n siguiente_pagina = response.urljoin(response.css(\"li.next>a::attr(href)\").extract_first())\n if siguiente_pagina:\n yield SplashRequest(siguiente_pagina)\n\n def procesar_frase(self, frase):\n texto_frase = frase.css(\"span.text::text\").extract_first()\n autor = frase.css(\"small.author::text\").extract_first()\n etiquetas = frase.css(\"div.tags>a::text\").extract()\n return {\n \"autor\": autor,\n \"frase\": texto_frase,\n \"etiquetas\": etiquetas\n }\n","repo_name":"manugarri/curso_data_science","sub_path":"Secciones/Seccion7.WebScraping/scraping_javascript/frases/frases/spiders/frases_splash.py","file_name":"frases_splash.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":110,"dataset":"github-code","pt":"2"}
+{"seq_id":"35300717134","text":"from .shell import run_shell\n\ndef img2mov(ifile, ofile, framerate=1):\n '''Convert images into a movie.\n\n ifile can be in a glob pattern, e.g. *.png.\n\n See: http://trac.ffmpeg.org/wiki/Create%20a%20video%20slideshow%20from%20images'''\n\n cmd = ' '.join(['ffmpeg',\n '-framerate {}'.format(framerate),\n '-pattern_type glob',\n '-i \"{}\"'.format(ifile),\n '-pix_fmt yuv420p',\n '{}'.format(ofile)])\n run_shell(cmd)\n","repo_name":"wy2136/wython","sub_path":"misc/ffmpeg.py","file_name":"ffmpeg.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"26051716865","text":"# dictionaries are collections of pairs of keys and values\n#use the key as an index to access the value.\n\n#the .get() method will not raise an exception if it can't find the key\n#.get() can take in a second argument, that will be returned if key can't be found\n\n# How do I store multiple values for a single key\nbars = {\n 'LLoyds' : {\n 'item' : 'Cheap Bourbon',\n 'day' : 'Tuesday'\n },\n 'Manuels' : {\n 'item' : 'Dogzilla',\n 'day' : 'Wednesday'\n },\n 'The Imperial' :{\n 'item' : 'Philly',\n 'day' : ['Tuesday', 'Friday']\n },\n 'El Myr' :{\n 'item' : 'grizz',\n 'day' : 'Every Day'\n }\n }\n\nplaces = {\n \"US\":{\n \"Georgia\":{\n \"Atlanta\" : {\n \"work\" : \"DigitalCrafts\"\n }\n }\n }\n}\n\n\n# How do I store more complicated data for a single key?\n\n# How do I store new values to an existing dictionary?\n\n# How do I access information in useful dictionaries?\n# how do I access nested information?\n# how do I loop through information in a dictionay?\n\n\n# How do I modify a dictionary?# How do I store new values to n existing dictionary?\n# how do I remove value values from an existing dictionary?\n\n","repo_name":"austindryden/dictionaries","sub_path":"dictionaries.py","file_name":"dictionaries.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"8150084277","text":"import numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nimport time\nfrom typing import Union\n\nfrom models.BaseGAN import BaseGAN\nfrom visualizers.BaseSampler import BaseSampler\nfrom utils.common import random_batch_getter\n\n\nclass GAN(BaseGAN):\n def __init__(self,\n input_dim, latent_factor=5,\n D=None, G=None, d_optimizer=None, g_optimizer=None):\n super().__init__(input_dim, latent_factor)\n super()._setup_models(D, G, d_optimizer, g_optimizer)\n\n def _build_discriminator(self):\n return keras.Sequential([\n layers.Dense(32, input_shape=(self.input_dim,)), layers.LeakyReLU(),\n layers.Dense(16), layers.LeakyReLU(),\n layers.Dense(1)\n ])\n\n def _build_generator(self):\n return keras.Sequential([\n layers.Dense(32, input_shape=(self.latent_factor,)), layers.LeakyReLU(),\n layers.Dense(16), layers.LeakyReLU(),\n layers.Dense(self.input_dim)\n ])\n\n def _build_d_optimizer(self) -> tf.keras.optimizers.Optimizer:\n return tf.keras.optimizers.SGD(0.01)\n\n def _build_g_optimizer(self) -> tf.keras.optimizers.Optimizer:\n return tf.keras.optimizers.SGD(0.01)\n\n ####################################################\n # losses and training\n ####################################################\n\n cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n\n @staticmethod\n def _generator_loss(fake_output):\n return GAN.cross_entropy(tf.ones_like(fake_output), fake_output)\n\n @staticmethod\n def _discriminator_loss(real_output, fake_output):\n real_loss = GAN.cross_entropy(tf.ones_like(real_output), real_output)\n fake_loss = GAN.cross_entropy(tf.zeros_like(fake_output), fake_output)\n return real_loss + fake_loss\n\n @tf.function\n def _train_step_discriminator(self, real_x):\n print('Tracing d_step...')\n discriminator, generator = self.discriminator, self.generator\n noise = tf.random.normal([len(real_x), self.latent_factor])\n\n with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:\n fake_x = generator(noise, training=True) # training=True for differentiable D\n real_output = discriminator(real_x, training=True)\n fake_output = discriminator(fake_x, training=True)\n\n disc_loss = self._discriminator_loss(real_output, fake_output)\n\n disc_grads = disc_tape.gradient(disc_loss, discriminator.trainable_variables)\n self.d_optimizer.apply_gradients(zip(disc_grads, discriminator.trainable_variables))\n return disc_loss\n\n @tf.function\n def _train_step_both(self, real_x):\n print('Tracing both_step...') # tf.function trace for only a few times\n discriminator, generator = self.discriminator, self.generator\n noise = tf.random.normal([len(real_x), self.latent_factor])\n\n with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:\n fake_x = generator(noise, training=True) # training=True for differentiable D\n real_output = discriminator(real_x, training=True)\n fake_output = discriminator(fake_x, training=True)\n\n gen_loss = self._generator_loss(fake_output)\n disc_loss = self._discriminator_loss(real_output, fake_output)\n\n # update gradient\n disc_grads = disc_tape.gradient(disc_loss, discriminator.trainable_variables)\n gen_grads = gen_tape.gradient(gen_loss, generator.trainable_variables)\n self.d_optimizer.apply_gradients(zip(disc_grads, discriminator.trainable_variables))\n self.g_optimizer.apply_gradients(zip(gen_grads, generator.trainable_variables))\n\n return disc_loss, gen_loss\n\n def train(\n self, dataset: Union[tf.Tensor, np.ndarray], epochs, batch_size=64,\n sample_interval=20, sampler: BaseSampler = None, sample_number=300,\n metrics=None, dg_train_ratio=1\n ):\n dataset = self._check_dataset(dataset)\n seed = tf.random.normal([sample_number, self.latent_factor])\n n_samples = dataset.shape[0]\n n_batch = n_samples // batch_size\n metrics = metrics or []\n losses, metric_values = [], [[] for m in metrics]\n\n batch_getter = random_batch_getter(dataset, batch_size)\n\n for epoch in range(epochs):\n start = time.time()\n kwargs = {'model': self, 'dataset': dataset, 'epoch': epoch}\n total_d_loss = total_g_loss = .0\n\n # in each batch, train D for dg_train_ratio times and G once\n with tf.profiler.experimental.Trace('train', step_num=epoch, _r=1):\n for i in range(n_batch):\n for _ in range(dg_train_ratio - 1):\n self._train_step_discriminator(next(batch_getter))\n pass\n\n d_loss, g_loss = self._train_step_both(next(batch_getter))\n total_d_loss += d_loss\n total_g_loss += g_loss\n\n if epoch % sample_interval == 0 and sampler is not None:\n sampler(self.generator(seed), epoch)\n for i, v in enumerate(metric_values):\n v.append(metrics[i](**kwargs))\n\n total_g_loss /= n_batch\n total_d_loss /= n_batch\n losses.append((total_d_loss, total_g_loss))\n self.print_epoch(epoch, epochs, time.time() - start, total_d_loss, total_g_loss)\n\n # last sample\n sampler(self.generator(seed), epochs - 1)\n self.trained_epoch += epochs\n\n return np.array(losses), np.array(metric_values)\n\n def _train_deprecated(self, dataset, epochs, batch_size=32, sample_interval=20, sampler: BaseSampler = None,\n sample_number=300,\n dg_train_ratio=1):\n \"\"\"\n Deprecated. dataset with tf.data.Dataset without tf.function is slow when dg_train_ratio > 1\n \"\"\"\n seed = tf.random.normal([sample_number, self.latent_factor])\n dataset = dataset.shuffle(len(dataset)).repeat(dg_train_ratio).batch(batch_size, drop_remainder=True)\n n_batch = len(dataset)\n losses = [] # save tuple (d_loss, g_loss) of each epoch\n\n for epoch in range(epochs):\n start = time.time()\n\n with tf.profiler.experimental.Trace('train', step_num=epoch, _r=1):\n total_g_loss = total_d_loss = 0.0\n i = dg_train_ratio - 1\n for through_dataset in range(dg_train_ratio):\n for batch in dataset:\n if i == 0:\n # s = time.time()\n d_loss, g_loss = self._train_step_both(batch)\n # print(f'train step_d cost {time.time() - s:.3f} s')\n total_d_loss += d_loss\n total_g_loss += g_loss\n i = dg_train_ratio - 1 # reset counter\n else:\n self._train_step_discriminator(batch)\n i -= 1\n\n if epoch % sample_interval == 0 and sampler is not None:\n sampler(self.generator(seed), epoch)\n\n total_g_loss /= n_batch\n total_d_loss /= n_batch\n losses.append((total_d_loss, total_g_loss))\n self.print_epoch(epoch, epochs, time.time() - start, total_d_loss, total_g_loss)\n\n self.trained_epoch += epochs\n\n return np.array(losses)\n\n ####################################################\n # save and load\n ####################################################\n\n # using inherited config save and load\n","repo_name":"PurplePower/GAN_fitting","sub_path":"models/GAN.py","file_name":"GAN.py","file_ext":"py","file_size_in_byte":7794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"16915505775","text":"from django.http import JsonResponse\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.urls import reverse\n\nfrom .forms import DynamicInputs\nfrom .models import DynamicInputsDatas\n\n\ndef dynamic_inputs(request):\n template = 'dynamic_inputs/add.html'\n form = DynamicInputs()\n if request.method == 'POST':\n if 'submit' in request.POST:\n form = DynamicInputs(request.POST)\n if form.is_valid():\n inst = DynamicInputsDatas(data=form.cleaned_data)\n inst.save()\n return redirect(reverse('list'))\n if 'add_input' in request.POST:\n form = DynamicInputs(request.POST, add_new_input=True)\n context = {'form': form, }\n return render(request, template, context=context)\n\ndef data_list(request):\n template = 'dynamic_inputs/list.html'\n datas = DynamicInputsDatas.objects.all()\n context = {\n 'datas': datas,\n }\n return render(request, template, context=context)\n\n\ndef concrete_data(request, id: int):\n data = get_object_or_404(DynamicInputsDatas, id=id)\n return JsonResponse(data.data)\n","repo_name":"YaraslavBondar/boya22","sub_path":"dynamic_inputs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"1394902563","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Apr 10 13:39:46 2020\r\n\r\n@author: James\r\n\"\"\"\r\n\r\n\r\nimport numpy as np\r\nfrom scipy.integrate import solve_ivp\r\nimport matplotlib.pyplot as plt\r\n\r\n####################\r\n# Solver Constants #\r\n####################\r\n\r\nsolver = \"RK45\" # Radau, DOP853 # RK45 Works with rotating frame but not inertial\r\n\r\nT_Min = 0\r\n\r\nT_Max = 500\r\n\r\nResolution = 1000\r\n\r\naxesLimits = 8\r\n\r\ninertial = True\r\n\r\n\r\n\r\n######################\r\n# Physical Constants #\r\n######################\r\n\r\nG = 4*np.pi**2 # Solar system units: unit time = 1 year, unit length = 1 AU\r\n\r\n\r\n\r\n#################\r\n# Sun Constants #\r\n#################\r\n\r\nMsun = 1\r\n\r\nsunPos = [0,0,0]\r\n\r\n\r\n\r\n###############################\r\n# Asteroid Initial Conditions #\r\n###############################\r\n\r\nasteroidAngle = np.pi/180 * 30\r\n\r\nRAst = 5.2\r\n\r\nx0 = RAst * np.cos(asteroidAngle)\r\ny0 = RAst * np.sin(asteroidAngle)\r\nz0 = 0\r\n\r\ntheta0 = 0 * np.pi/180 \r\n\r\nw = np.sqrt( G*Msun/(RAst**3) )\r\n\r\norbitalSpeed = w * RAst \r\n\r\nvx0 = -orbitalSpeed*np.sin(asteroidAngle + theta0)\r\nvy0 = orbitalSpeed*np.cos(asteroidAngle + theta0)\r\nvz0 = 0\r\n\r\ninertial_y0 = [x0, y0, z0, vx0, vy0, vz0]\r\n\r\nrotating_y0 = [x0, y0, z0, vx0 + w*y0, vy0 - w*x0, vz0]\r\n\r\n\r\n\r\ndef inertialField(t, vec):\r\n \r\n retVec = np.zeros(6)\r\n \r\n # Unpack asteroid position & velocity from the last step\r\n x = vec[0]\r\n y = vec[1]\r\n z = vec[2]\r\n \r\n vx = vec[3]\r\n vy = vec[4]\r\n vz = vec[5]\r\n \r\n # Return the new position ODEs [ dr_i/dt = v_i ]\r\n retVec[0] = vx\r\n retVec[1] = vy\r\n retVec[2] = vz\r\n \r\n # Asteroid parameters wrt the Sun\r\n r_s = np.sqrt( (x-sunPos[0])**2 + (y-sunPos[1])**2 + (z-sunPos[2])**2 )\r\n phi_s = np.arctan2(y-sunPos[1], x-sunPos[0])\r\n theta_s = np.arccos( z/r_s )\r\n \r\n # New velocity ODEs [ d(v_i)/dt = F_i/m ]\r\n k = ( -G*Msun/r_s**2)\r\n retVec[3] = k*np.cos(phi_s)*np.sin(theta_s) # ax\r\n retVec[4] = k*np.sin(phi_s)*np.sin(theta_s) # ay\r\n retVec[5] = k*np.cos(theta_s) # az\r\n \r\n return retVec\r\n\r\ndef rotatingField(t, vec):\r\n \r\n retVec = np.zeros(6)\r\n \r\n # Unpack asteroid position & velocity from the last step\r\n x = vec[0]\r\n y = vec[1]\r\n z = vec[2]\r\n \r\n vx = vec[3]\r\n vy = vec[4]\r\n vz = vec[5]\r\n \r\n # Return the new position ODEs [ dr_i/dt = v_i ]\r\n retVec[0] = vx\r\n retVec[1] = vy\r\n retVec[2] = vz\r\n \r\n # Asteroid parameters wrt the Sun\r\n r_s = np.sqrt( (x-sunPos[0])**2 + (y-sunPos[1])**2 + (z-sunPos[2])**2 )\r\n phi_s = np.arctan2(y-sunPos[1], x-sunPos[0])\r\n theta_s = np.arccos( z/r_s )\r\n \r\n # New velocity ODEs [ d(v_i)/dt = F_i/m ]\r\n k = ( -G*Msun/r_s**2)\r\n retVec[3] = k*np.cos(phi_s)*np.sin(theta_s) + w**2 * x + 2*w*vy # ax\r\n retVec[4] = k*np.sin(phi_s)*np.sin(theta_s) + w**2 * y - 2*w*vx # ay\r\n retVec[5] = k*np.cos(theta_s) # az\r\n \r\n return retVec\r\n\r\nsol = solve_ivp(\r\n inertialField if inertial else rotatingField,\r\n [T_Min, T_Max],\r\n inertial_y0 if inertial else rotating_y0,\r\n dense_output=False,\r\n vectorized=False,\r\n method=solver,\r\n t_eval=np.linspace(T_Min, T_Max, (T_Max - T_Min)*Resolution)\r\n )\r\n\r\n\r\n\r\n\r\n# All operations and functions are in a vectorised form\r\n\r\nt = sol.t\r\n \r\nx = sol.y[0]\r\ny = sol.y[1]\r\nz = sol.y[2]\r\n \r\nvx = sol.y[3] \r\nvy = sol.y[4]\r\nvz = sol.y[5]\r\n \r\ntheta = w*t if inertial else -w*t\r\n \r\nxArray = np.cos(theta) * x + np.sin(theta) * y\r\n \r\nyArray = - np.sin(theta) * x + np.cos(theta) * y\r\n \r\nkineticEnergy = (1/2)*(vx**2 + vy**2 + vz**2) if inertial else (1/2)*( (vx - w*y)**2 + (vy + w*x)**2 + vz**2)\r\n \r\npotentialEnergy = -G*Msun / np.sqrt( (x - sunPos[0])**2 + (y - sunPos[1])**2 + (z - sunPos[2])**2 )\r\n \r\nenergyArray = kineticEnergy + potentialEnergy\r\n\r\nplt.plot(sol.y[0], sol.y[1])\r\n\r\n# plt.plot(xArray, yArray)\r\n\r\n# plt.plot(xArray if inertial else sol.y[0], yArray if inertial else sol.y[1], label=\"Relative Position\")\r\n\r\nplt.xlim(-axesLimits, axesLimits)\r\nplt.ylim(-axesLimits, axesLimits)\r\n\r\nplt.xlabel(\"x / AU\")\r\nplt.ylabel(\"y / AU\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"jalsop24/Trojan-Asteroids","sub_path":"singleBodyTest.py","file_name":"singleBodyTest.py","file_ext":"py","file_size_in_byte":4175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"12798121917","text":"import argparse\nimport lut\n\ndef assembly_to_machine(line):\n \"\"\"Converts a line of $NAME assembly code to 9-bit machine instruction\"\"\"\n\n opcode = '0000'\n\n # Grab the first 3 chars of the line, and use LUT to generate opcode\n inst_name= line[:3]\n opcode = lut.LUT[inst_name] \n\n # Store space-delimited elements of the instruction\n elements = line.split()\n\n # only the halt instruction has no operands\n if len(elements) == 1:\n return '111111111'\n\n # immediate instruction: use 5 bits of numerical constant\n if inst_name in lut.IMM:\n\n # Check that no registers are specified. Assumes base 10\n #if \"$\" not in elements[1]:\n immval = int(elements[1])\n tail = format(immval, '05b')\n return opcode + tail\n # register instruction: uses 4 bits of register + 0 bit on tail\n else:\n\n # Check that no immediate value is specified. Assumes base 10\n #if \"$r\" in elements[1]:\n regval = int(elements[1])\n tail = format(regval, '04b')\n return opcode + tail + '0'\n\n\ndef main():\n \"\"\" Drives the program. \"\"\"\n\n parser = argparse.ArgumentParser(description='Convert $NAME assembly to' \\\n + ' machine code.')\n parser.add_argument('file_in', metavar='in', type=str, help='name of input' \\\n + ' file containing $NAME assembly code')\n parser.add_argument('file_out', metavar='outfile', type=str, help='name of'\\\n + ' output file to write machine instructions')\n\n results = parser.parse_args()\n \n with open(results.file_in, 'r') as fi, open(results.file_out, 'w') as fo:\n lines = [line.rstrip() for line in fi]\n\n line_ct = 0\n for inst in lines:\n if inst[0] == '#':\n continue\n else:\n fo.write(assembly_to_machine(inst))\n fo.write('\\n')\n line_ct += 1\n\n print(f'Wrote {line_ct} instructions to {results.file_out}.\\n')\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","repo_name":"ackamal/cse141l-lab2","sub_path":"src/assembler/assembler.py","file_name":"assembler.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"71393680688","text":"import os\nimport json\nimport logging\nimport requests\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\nfrom socketserver import ThreadingMixIn\nimport io\nfrom keras.preprocessing.image import (\n ImageDataGenerator,\n load_img,\n array_to_img,\n img_to_array,\n)\n\n# Constants\nFORMAT = os.getenv(\"FORMAT\", \"JPEG\")\nARG_TYPE = os.getenv(\"ARG_TYPE\", \"bytes\")\n\n# Environment Variables\nhost_target = os.environ.get(\"AIS_TARGET_URL\")\nTRANSFORM = os.environ.get(\"TRANSFORM\")\nif not host_target:\n raise EnvironmentError(\"AIS_TARGET_URL environment variable missing\")\nif not TRANSFORM:\n raise EnvironmentError(\n \"TRANSFORM environment variable missing. Check documentation for examples (link)\"\n )\ntransform_dict = json.loads(TRANSFORM)\n\n\nclass Handler(BaseHTTPRequestHandler):\n def log_request(self, code=\"-\", size=\"-\"):\n \"\"\"Override log_request to not log successful requests.\"\"\"\n pass\n\n def _set_headers(self):\n \"\"\"Set standard headers for responses.\"\"\"\n self.send_response(200)\n self.send_header(\"Content-Type\", \"application/octet-stream\")\n self.end_headers()\n\n def transform(self, data: bytes) -> bytes:\n \"\"\"Process image data as bytes using the specified transformation.\"\"\"\n try:\n img = load_img(io.BytesIO(data))\n img = img_to_array(img)\n datagen = ImageDataGenerator()\n img = datagen.apply_transform(x=img, transform_parameters=transform_dict)\n img = array_to_img(img)\n buf = io.BytesIO()\n img.save(buf, format=FORMAT)\n return buf.getvalue()\n except Exception as e:\n logging.error(\"Error processing data: %s\", str(e))\n raise\n\n def do_PUT(self):\n \"\"\"PUT handler supports `hpush` operation.\"\"\"\n try:\n content_length = int(self.headers[\"Content-Length\"])\n post_data = self.rfile.read(content_length)\n processed_data = self.transform(post_data)\n if processed_data is not None:\n self._set_headers()\n self.wfile.write(processed_data)\n else:\n self.send_response(500)\n self.end_headers()\n self.wfile.write(b\"Data processing failed\")\n except Exception as e:\n logging.error(\"Error processing PUT request: %s\", str(e))\n self.send_response(500)\n self.end_headers()\n self.wfile.write(b\"Data processing failed\")\n\n def do_GET(self):\n \"\"\"GET handler supports `hpull` operation.\"\"\"\n try:\n if self.path == \"/health\":\n self._set_headers()\n self.wfile.write(b\"Running\")\n return\n\n query_path = host_target + self.path\n\n if ARG_TYPE == \"url\": # need this for webdataset\n result = self.transform(query_path)\n else:\n input_bytes = requests.get(query_path).content\n result = self.transform(input_bytes)\n\n if result is not None:\n self._set_headers()\n self.wfile.write(result)\n else:\n self.send_response(500)\n self.end_headers()\n self.wfile.write(b\"Data processing failed\")\n except Exception as e:\n logging.error(\"Error processing GET request: %s\", str(e))\n self.send_response(500)\n self.end_headers()\n self.wfile.write(b\"Data processing failed\")\n\n\nclass ThreadedHTTPServer(ThreadingMixIn, HTTPServer):\n \"\"\"Handle requests in a separate thread.\"\"\"\n\n\ndef run(addr=\"0.0.0.0\", port=80):\n server = ThreadedHTTPServer((addr, port), Handler)\n logging.info(f\"Starting HTTP server on {addr}:{port}\")\n server.serve_forever()\n\n\nif __name__ == \"__main__\":\n run(addr=\"0.0.0.0\", port=80)\n","repo_name":"NVIDIA/ais-etl","sub_path":"transformers/keras_preprocess/http-multithreaded-server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3871,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"2"}
+{"seq_id":"12036112257","text":"#BOJ 2231 분해합\nn=int(input())\nfor i in range(1,n+1):\n num=sum((map(int,str(i))))\n res=i+num\n if res==n:\n print(i)\n break\n elif i==n:\n print(0)","repo_name":"Yoosuean/BOJ","sub_path":"브루트 포스/2231.py","file_name":"2231.py","file_ext":"py","file_size_in_byte":180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"72074649328","text":"#Simple pong game like baby table tennis\n#Turtle is simplest and in-built but you can install pygames\n\n\nimport turtle\nwn=turtle.Screen()\nwn.title('Pong by Kerry')\nwn.bgcolor('black')\nwn.setup(width=800,height=600)\nwn.tracer(0)\n#he claims tis will make your game run much faster and does not make your window be updated\n\npaddle_a=turtle.Turtle()\npaddle_a.shape('square')\npaddle_a.speed(0)\npaddle_a.color('white')\npaddle_a.shapesize(stretch_wid=5,stretch_len=1)\npaddle_a.penup()\npaddle_a.goto(-350,0)\n\n\npaddle_b=turtle.Turtle()\npaddle_b.shape('square')\npaddle_b.speed(0)\npaddle_b.color('white')\npaddle_b.shapesize(stretch_wid=5,stretch_len=1)\npaddle_b.penup()\npaddle_b.goto(350,0)\n\n\nball=turtle.Turtle()\nball.shape('square')\nball.color('white')\nball.penup()\nball.goto(0,0)\n\n\n\n\n#Every game must have a main game loop\nwhile True:\n wn.update()","repo_name":"Kerry-Jilak/Simple-projects","sub_path":"#Simple pong game like baby table tennis.py","file_name":"#Simple pong game like baby table tennis.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"15424858500","text":"\"\"\" The area of a triangle can be computed using the following formula, where\n b is the length of the base of the trianble, and\n h is its height:\n area = (b * h)/2\n\n Write a programme that allows the user to enter values for b and h.\n The program should then compute and display the area of the triangle with base length b and height h.\"\"\"\n\nbase = int(input(\"What is the base of the triangle? \"))\nheight = int(input(\"What is the height of the triangle? \"))\n\narea_triangle = (base * height)/2\n\nprint(\"The area of the triangle with base\", base, \"and height\", height, \"is\", area_triangle )","repo_name":"szateva/ThePythonWorkbook","sub_path":"Ex21 - Area of a Triangle.py","file_name":"Ex21 - Area of a Triangle.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"36788935333","text":"# -*- coding: utf-8 -*-\nfrom django_assets import Bundle, register\nfrom webassets.filter import get_filter\n# less_filter = get_filter('less', line_numbers='comments')\ncss = Bundle(\n # Bundle(\n # 'bootstrap3/less/bootstrap.less',\n # filters=less_filter,\n # # depends='bootstrap3/less/*.less',\n # # output='assets/bootstrap3.css'\n # ),\n # Bundle(\n # 'botspmd/less/material.less',\n # 'botspmd/less/ripples.less',\n # filters=less_filter,\n # # depends='botspmd/less/*.less',\n # # output='assets/material.css'\n # ),\n # Bundle(\n # 'Swiper-3.3.1/src/less/swiper.less',\n # filters=less_filter,\n # # depends='Swiper-3.3.1/src/less/*.less',\n # # output='assets/swiper.css'\n # ),\n # Bundle(\n # 'css/main.less',\n # filters=less_filter,\n # # depends='css/*.less',\n # # output='assets/main.css'\n # ),\n 'bootstrap3/less/bootstrap.less',\n 'botspmd/less/material.less',\n 'botspmd/less/ripples.less',\n 'Swiper-3.3.1/src/less/swiper.less',\n 'css/main.less',\n \n filters='less,cssutils',\n output='assets/app.css')\nregister('css_all', css)\n\njs = Bundle(\n 'flatui/js/jquery-1.8.3.min.js',\n 'bootstrap3/js/bootstrap.js',\n 'botspmd/scripts/ripples.js',\n 'botspmd/scripts/material.js',\n 'Swiper-3.3.1/dist/js/swiper.jquery.js',\n 'js/spin.min.js',\n 'js/jquery.spin.js',\n 'js/pagination.js',\n 'js/application.js',\n 'js/usercenter.js',\n filters='rjsmin',\n output='assets/app.js')\nregister('js_all', js)\n","repo_name":"tkliuxing/bookspider","sub_path":"booksite/booksite/book/assets.py","file_name":"assets.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"2"}
+{"seq_id":"17428566782","text":"import asyncio\nimport base64\nimport json\nimport logging\nimport time\nfrom typing import Annotated, Any, AsyncGenerator, Literal, Optional, Tuple, Union\nfrom fastapi import WebSocket\nfrom vocode.streaming.agent.base_agent import AgentResponse, AgentResponseMessageChunk\nfrom vocode.streaming.models.agent import (\n EndInputStream,\n InputStreamChunk,\n InputStreamMessage,\n)\nfrom vocode.streaming.synthesizer import miniaudio_worker\nimport websockets\nfrom websockets.client import WebSocketClientProtocol\nimport aiohttp\nfrom opentelemetry.trace import Span\nfrom pydantic import BaseModel, Field\nfrom elevenlabs import generate\n\nfrom vocode import getenv\nfrom vocode.streaming.synthesizer.base_synthesizer import (\n BaseSynthesizer,\n SynthesisResult,\n encode_as_wav,\n tracer,\n)\nfrom vocode.streaming.models.synthesizer import (\n ElevenLabsSynthesizerConfig,\n SynthesizerType,\n)\nfrom vocode.streaming.agent.bot_sentiment_analyser import BotSentiment\nfrom vocode.streaming.models.message import BaseMessage\nfrom vocode.streaming.utils.mp3_helper import decode_mp3\nfrom vocode.streaming.synthesizer.miniaudio_worker import MiniaudioWorker\nfrom vocode.streaming.utils.worker import (\n AsyncQueueWorker,\n AsyncWorker,\n InterruptibleAgentResponseEvent,\n)\n\nlogger = logging.getLogger(__name__)\n\nADAM_VOICE_ID = \"pNInz6obpgDQGcFmaJgB\"\nELEVEN_LABS_BASE_URL = \"https://api.elevenlabs.io/v1/\"\nELEVEN_LABS_WEBSOCKET_BASE_URL = \"wss://api.elevenlabs.io/v1/\"\n\n\nclass ElevenLabsInputStreamWorker(AsyncWorker[AgentResponse]):\n def __init__(\n self,\n input_queue: asyncio.Queue[InterruptibleAgentResponseEvent[AgentResponse]],\n output_queue: asyncio.Queue[bytes | None],\n api_key: str,\n voice_id: str,\n model_id: str,\n voice_settings: Optional[dict] = None,\n ):\n super().__init__(input_queue, output_queue)\n self.api_key = api_key\n self.voice_id = voice_id\n self.model_id = model_id\n self.bos = dict(\n text=\" \",\n voice_settings={\n \"stability\": 0.5,\n \"similarity_boost\": True,\n },\n # generation_config=dict(\n # chunk_length_schedule=[50],\n # ),\n xi_api_key=self.api_key,\n )\n if voice_settings:\n self.bos[\"voice_settings\"] = voice_settings\n self.eos = dict(text=\"\")\n self.buffered_message = \"\"\n\n def get_message_so_far(self):\n # print(\"[SYNTHESIZER] returning buffered message\", self.buffered_message)\n return self.buffered_message\n\n async def _run_loop(self) -> None:\n url = (\n ELEVEN_LABS_WEBSOCKET_BASE_URL\n + f\"text-to-speech/{self.voice_id}/stream-input?model_type={self.model_id}\"\n )\n\n async with websockets.connect(\n url,\n # extra_headers={\"xi-api-key\": self.api_key},\n ) as websocket:\n try:\n await websocket.send(json.dumps(self.bos))\n except Exception as e:\n logger.error(e)\n return\n while True:\n item: InterruptibleAgentResponseEvent[\n AgentResponse\n ] = await self.input_queue.get()\n payload = item.payload\n # print(\"[SYNTHESIZER]\", payload)\n input_stream_message: InputStreamMessage\n if not isinstance(payload, AgentResponseMessageChunk):\n break\n else:\n input_stream_message = payload.chunk\n\n if isinstance(input_stream_message, InputStreamChunk):\n msg = dict(\n text=input_stream_message.text, try_trigger_generation=True\n )\n await websocket.send(json.dumps(msg))\n item.is_interruptible = False\n elif isinstance(input_stream_message, EndInputStream):\n await websocket.send(json.dumps(self.eos))\n item.is_interruptible = False\n break\n\n while True:\n try:\n response = await websocket.recv()\n except websockets.exceptions.ConnectionClosed:\n break\n try:\n data = json.loads(response)\n if data[\"audio\"]:\n self.output_queue.put_nowait(base64.b64decode(data[\"audio\"]))\n normalized_alignment = data.get(\"normalizedAlignment\")\n if normalized_alignment:\n text = \"\".join(data[\"normalizedAlignment\"][\"chars\"])\n self.buffered_message += text\n except json.JSONDecodeError:\n continue\n\n self.output_queue.put_nowait(None) # sentinel\n\n # await asyncio.gather(sender(websocket), receiver(websocket))\n # await sender(websocket)\n\n\nclass ElevenLabsSynthesizer(BaseSynthesizer[ElevenLabsSynthesizerConfig]):\n def __init__(\n self,\n synthesizer_config: ElevenLabsSynthesizerConfig,\n logger: Optional[logging.Logger] = None,\n aiohttp_session: Optional[aiohttp.ClientSession] = None,\n ):\n super().__init__(synthesizer_config, aiohttp_session)\n\n import elevenlabs\n\n self.elevenlabs = elevenlabs\n\n self.api_key = synthesizer_config.api_key or getenv(\"ELEVEN_LABS_API_KEY\")\n self.voice_id = synthesizer_config.voice_id or ADAM_VOICE_ID\n self.stability = synthesizer_config.stability\n self.similarity_boost = synthesizer_config.similarity_boost\n self.model_id = synthesizer_config.model_id\n self.optimize_streaming_latency = synthesizer_config.optimize_streaming_latency\n self.words_per_minute = 150\n self.experimental_streaming = synthesizer_config.experimental_streaming\n\n async def create_input_streamed_speech(\n self,\n chunk_size: int,\n input_queue: asyncio.Queue[InterruptibleAgentResponseEvent[AgentResponse]],\n ):\n voice = self.get_voice()\n miniaudio_worker_input_queue: asyncio.Queue[bytes | None] = asyncio.Queue()\n input_stream_worker = ElevenLabsInputStreamWorker(\n input_queue=input_queue,\n output_queue=miniaudio_worker_input_queue,\n api_key=self.api_key,\n voice_id=self.voice_id,\n model_id=self.model_id or \"eleven_monolingual_v1\",\n voice_settings=voice.settings.dict() if voice.settings else None,\n )\n miniaudio_worker = MiniaudioWorker(\n synthesizer_config=self.synthesizer_config,\n chunk_size=chunk_size,\n input_queue=miniaudio_worker_input_queue,\n output_queue=asyncio.Queue(),\n )\n input_stream_worker.start()\n miniaudio_worker.start()\n\n async def chunk_generator():\n try:\n # Await the output queue of the MiniaudioWorker and yield the wav chunks in another loop\n while True:\n # Get the wav chunk and the flag from the output queue of the MiniaudioWorker\n # print(\"[MINIAUDIO WORKER] getting chunk\")\n wav_chunk, is_last = await miniaudio_worker.output_queue.get()\n if self.synthesizer_config.should_encode_as_wav:\n wav_chunk = encode_as_wav(wav_chunk, self.synthesizer_config)\n\n yield SynthesisResult.ChunkResult(wav_chunk, is_last)\n\n if is_last:\n break\n except asyncio.CancelledError:\n pass\n finally:\n input_stream_worker.terminate()\n miniaudio_worker.terminate()\n\n return SynthesisResult(\n chunk_generator(),\n lambda seconds: input_stream_worker.get_message_so_far(),\n )\n\n def get_voice(self):\n voice = self.elevenlabs.Voice(voice_id=self.voice_id)\n if self.stability is not None and self.similarity_boost is not None:\n voice.settings = self.elevenlabs.VoiceSettings(\n stability=self.stability, similarity_boost=self.similarity_boost\n )\n return voice\n\n async def create_speech(\n self,\n message: BaseMessage,\n chunk_size: int,\n bot_sentiment: Optional[BotSentiment] = None,\n ) -> SynthesisResult:\n voice = self.get_voice()\n url = ELEVEN_LABS_BASE_URL + f\"text-to-speech/{self.voice_id}\"\n\n if self.experimental_streaming:\n url += \"/stream\"\n\n if self.optimize_streaming_latency:\n url += f\"?optimize_streaming_latency={self.optimize_streaming_latency}\"\n headers = {\"xi-api-key\": self.api_key}\n body = {\n \"text\": message.text,\n \"voice_settings\": voice.settings.dict() if voice.settings else None,\n }\n if self.model_id:\n body[\"model_id\"] = self.model_id\n\n create_speech_span = tracer.start_span(\n f\"synthesizer.{SynthesizerType.ELEVEN_LABS.value.split('_', 1)[-1]}.create_total\",\n )\n\n session = self.aiohttp_session\n\n response = await session.request(\n \"POST\",\n url,\n json=body,\n headers=headers,\n timeout=aiohttp.ClientTimeout(total=15),\n )\n if not response.ok:\n raise Exception(f\"ElevenLabs API returned {response.status} status code\")\n if self.experimental_streaming:\n return SynthesisResult(\n self.experimental_mp3_streaming_output_generator(\n response, chunk_size, create_speech_span\n ), # should be wav\n lambda seconds: self.get_message_cutoff_from_voice_speed(\n message, seconds, self.words_per_minute\n ),\n )\n else:\n audio_data = await response.read()\n create_speech_span.end()\n convert_span = tracer.start_span(\n f\"synthesizer.{SynthesizerType.ELEVEN_LABS.value.split('_', 1)[-1]}.convert\",\n )\n output_bytes_io = decode_mp3(audio_data)\n\n result = self.create_synthesis_result_from_wav(\n file=output_bytes_io,\n message=message,\n chunk_size=chunk_size,\n )\n convert_span.end()\n\n return result","repo_name":"marinho-gomes/vocode-python-marinho","sub_path":"vocode/streaming/synthesizer/eleven_labs_synthesizer.py","file_name":"eleven_labs_synthesizer.py","file_ext":"py","file_size_in_byte":10517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"28372326150","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 15 15:52:54 2021\n\n@author: ppj\n\"\"\"\n\nn=1\n\ndp_1=0\ndp=1\ncur=0\nif n==0:\n print(dp_1)\n\nfor i in range(n-1):\n cur=dp_1+dp\n dp_1=dp\n dp=cur\nprint(dp)\n \n","repo_name":"elena0624/leetcode_practice","sub_path":"Challenge_Fibonacci_Number1.py","file_name":"Challenge_Fibonacci_Number1.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"19119000526","text":"\"\"\"Serializers for indicator app\"\"\"\n\nfrom json import loads\n\nfrom rest_framework.serializers import ModelSerializer\n\nfrom console_api.search.models import History\nfrom console_api.users.models import User\n\n\nclass SearchHistorySerializer(ModelSerializer):\n \"\"\"Serializer for History model\"\"\"\n\n class Meta:\n \"\"\"Metainformation about the serializer\"\"\"\n\n model = History\n\n fields = [\n \"id\",\n \"search_type\",\n \"query_text\",\n \"query_data\",\n \"results\",\n \"created_by\",\n ]\n\n\nclass SearchHistoryListSerializer(ModelSerializer):\n \"\"\"Serializer for History objects list\"\"\"\n\n def to_representation(self, instance):\n \"\"\"Convert representation from null to valid value\"\"\"\n\n data = super().to_representation(instance)\n\n status = data[\"status\"]\n created_by = data[\"created-by\"]\n\n data[\"status\"] = \"detected\" if loads(status) else \"not-detected\"\n data[\"created-by\"] = {\n \"id\": created_by,\n \"login\": User.objects.get(id=created_by).login,\n }\n\n return data\n\n class Meta:\n \"\"\"Metainformation about the serializer\"\"\"\n\n model = History\n\n fields = [\n \"id\",\n \"status\",\n \"created-at\",\n \"created-by\",\n \"query\",\n ]\n\n extra_kwargs = {\n \"status\": {\"source\": \"results\"},\n \"created-at\": {\"source\": \"created_at\"},\n \"created-by\": {\"source\": \"created_by\"},\n \"query\": {\"source\": \"query_text\"},\n }\n","repo_name":"hulahoo/console-check-sonar","sub_path":"src/console_api/search/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"73501033646","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.select import Select\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\n\ndriver = webdriver.Chrome(executable_path=\"/Volumes/Macintosh HD/For Mac/python project/Browserdrivers/chromedriver\")\n\n# driver.get(\"https://echoecho.com/htmlforms11.htm\")\n\ndriver.get(\"https://www.wikipedia.org/\")\ndriver.maximize_window()\n\nwait = WebDriverWait(driver,10)\n#\n# driver.find_element(by=By.NAME,value=\"dropdownmenu\").send_keys(\"Milk\")\n\n## use to select the dropdown list\ndropdown = driver.find_element(by=By.ID,value=\"searchLanguage\")\nselect = Select(dropdown)\nselect.select_by_value(\"pl\")\n\noption = driver.find_elements(by=By.TAG_NAME,value=\"option\")\n\nfor op in option:\n print(\"Text is :\",op.text,\"Lang is :\"+op.get_attribute(\"lang\"))\n\n# print(\"Total dropdown values are,\",len(option))\n\nprint(\"------------------------------------------------------------\")\n\n## find the link by use the tag name\nlinks = driver.find_elements(by=By.TAG_NAME,value=\"a\")\nprint(len(links))\nfor link in links:\n print(\"Text is:\",link.text,\" --URL is :\"+link.get_attribute(\"href\"))\n\n## get the value from specfiy block\nprint(\"---------------------------------------------\")\nblock = driver.find_element(by=By.XPATH,value=\"//*[@class='other-projects']/div[1]\")\n\nprint(block.find_elements(by=By.TAG_NAME,value=\"a\").__getitem__(0).text)\n\n\ntime.sleep(1)\n\ndriver.quit()\ndriver.close()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"BalajiDhanaraj/Selenium_with_Python","sub_path":"main_page/HandlingWebElement.py","file_name":"HandlingWebElement.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"22978055614","text":"# fixed-sized sliding window problem\n\ndef max_subarray_sum_size_k(arr:list,k:int)->int:\n max_so_far = 0\n start = 0\n end = k-1\n while end < len(arr):\n current_window = arr[start:end+1]\n current_window_sum= sum(current_window)\n max_so_far = max(current_window_sum,max_so_far)\n start += 1\n end += 1\n return max_so_far\n\n\narr = [4,2,1,7,8,1,2,8,1,0]\nk=3\nprint(max_subarray_sum_size_k(arr,k))","repo_name":"elvisotieno/dynamicprogramming","sub_path":"slidingwindow/fixed-size_variant/max-subarray-sum-size-k.py","file_name":"max-subarray-sum-size-k.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"11514951654","text":"# swea 1219 길찾기\n\ndef dfs(s, g, v):\n stack = []\n visited = [0] * v # 방문 기록용 리스트\n\n n = s # n은 현재 방문한 정점\n visited[n] = 1\n\n while n > -1:\n\n for w in graph[n]:\n if visited[w] == 0: # 방문하지 않은 정점일 경우\n stack.append(n)\n n = w\n visited[n] = 1\n if n == g: # 정점이 도착점(99)일 경우\n return 1\n break\n else:\n if stack:\n n = stack.pop()\n else: # 스택이 비어 있으면 종료\n n = -1\n return 0\n\n\nT = 10\nfor _ in range(1, T+1):\n tc, e = map(int, input().split())\n arry = list(map(int, input().split()))\n graph = [[] for _ in range(100)] # 간선 정보 담을 그래프\n for i in range(e):\n start, end = arry[i*2], arry[i*2+1]\n graph[start].append(end)\n\n print(f'#{tc} {dfs(0, 99, 100)}')\n","repo_name":"HQkim/algorithm","sub_path":"swea/swea_1219.py","file_name":"swea_1219.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"13057518710","text":"import os\n\nfrom alison import read_wav_file\nfrom alison.recognition import SoundRecognizer\n\nrecognizer = None\n\ndef reset_recognizer():\n global recognizer\n recognizer = SoundRecognizer(callback=callback)\n recognizer.load_dictionary(\"samples/sample.dict\")\n\nif __name__ == \"__main__\":\n def callback(evt):\n print(\"Recognized\", evt.tag, \"at time\", evt.time, \"with value\",\n evt.value)\n \n reset_recognizer()\n \n directory = \"./samples/Cut_Up_Sounds/other_Fire_Alarm/\"\n files = os.listdir(directory)\n files.sort()\n \n print(\" - Processing with reset\")\n \n for file in files:\n print(file)\n \n if file.endswith(\".wav\"):\n _, signal = read_wav_file(directory + file)\n recognizer.process_audio(signal)\n reset_recognizer()\n \n print(\" - Processing without reset\")\n \n for file in files:\n print(file)\n \n if file.endswith(\".wav\"):\n _, signal = read_wav_file(directory + file)\n recognizer.process_audio(signal)\n \n \n ","repo_name":"VincentErb/AlisonProject","sub_path":"examples/test_cutup.py","file_name":"test_cutup.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"}
+{"seq_id":"21650371318","text":"import plotly.express as px\n\n# Data Set\nx = [1, 2, 3, 4, 5]\ny = [3, 5, 2, 7, 4]\n\n# Scatter Plot\nfig = px.scatter(x=x, y=y)\n\n# Adding title and axis layout\nfig.update_layout(\n title=\"Scatter Plot\",\n xaxis_title=\"X\",\n yaxis_title=\"Y\"\n)\n\n# Displaying the plot\nfig.show()\n","repo_name":"Kairos-T/Data-Visualisations-AI-SIG","sub_path":"scatterplot.py","file_name":"scatterplot.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"2"}
+{"seq_id":"74906136685","text":"from torch import nn\nfrom utils_0809 import *\nimport torch.nn.functional as F\nimport torchvision\nimport numpy as np\nfrom torchvision.ops import nms\nfrom torchvision.ops import RoIPool\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef truncated_vgg16():\n # the 30th layer of features is relu of conv5_3\n \n \n model = torchvision.models.vgg16(pretrained=True)\n \n features = list(model.features)[:30]\n classifier = model.classifier\n \n classifier = list(classifier)\n del classifier[6]\n \n del classifier[5]\n del classifier[2]\n \n classifier = nn.Sequential(*classifier)\n\n # freeze top4 conv\n for layer in features[:10]:\n for p in layer.parameters():\n p.requires_grad = False\n \n return nn.Sequential(*features), classifier\n\ndef generate_anchor_base(base_size=16, ratios=[0.5, 1, 2],\n anchor_scales=[8, 16, 32]):\n \n py = base_size / 2.\n px = base_size / 2.\n\n anchor_base = np.zeros((len(ratios) * len(anchor_scales), 4),\n dtype=np.float32)\n \n for i in range(len(ratios)):\n for j in range(len(anchor_scales)):\n h = base_size * anchor_scales[j] * np.sqrt(ratios[i])\n w = base_size * anchor_scales[j] * np.sqrt(1. / ratios[i])\n \n index = i * len(anchor_scales) + j\n anchor_base[index, 1] = py - h / 2.\n anchor_base[index, 0] = px - w / 2.\n anchor_base[index, 3] = py + h / 2.\n anchor_base[index, 2] = px + w / 2.\n \n return anchor_base\n\ndef _enumerate_shifted_anchor(anchor_base, feat_stride, height, width):\n \n import numpy as xp\n \n shift_y = xp.arange(0, height * feat_stride, feat_stride)\n shift_x = xp.arange(0, width * feat_stride, feat_stride)\n shift_x, shift_y = xp.meshgrid(shift_x, shift_y)\n \n shift = xp.stack((shift_x.ravel(), shift_y.ravel(),\n shift_x.ravel(), shift_y.ravel()), axis=1)\n \n A = anchor_base.shape[0]\n K = shift.shape[0]\n anchor = anchor_base.reshape((1, A, 4)) + \\\n shift.reshape((1, K, 4)).transpose((1, 0, 2))\n anchor = anchor.reshape((K * A, 4)).astype(np.float32)\n return anchor\nclass Anchor_Creator(object):\n def __init__(self,feat_width,feat_height):\n super(Anchor_Creator,self).__init__()\n self.width=feat_width\n self.height=feat_height\n self.anchor_base=generate_anchor_base()\n self.anchor_boxes_cxcy=self.make_anchor_boxes()\n def make_anchor_boxes(self):\n \n anchors = _enumerate_shifted_anchor(\n np.array(self.anchor_base),\n 16, self.height,self.width)\n\n \n anchors= torch.FloatTensor(anchors).to(device)\n\n \n anchors_cxcy=xy_to_cxcy(anchors)\n \n return anchors_cxcy#(W*H*K,4)\n\nclass Anchor_Target_Creator(object):\n def __init__(self,gt_boxes_xy,anchor_boxes_cxcy,img_width,img_height):\n super(Anchor_Target_Creator,self).__init__()\n self.gt_boxes_xy=gt_boxes_xy\n self.anchor_cxcy=anchor_boxes_cxcy\n \n self.anchor_xy=cxcy_to_xy(anchor_boxes_cxcy)\n \n \n self.gt_loc,self.gt_cls=self.Target_Creator(img_width,img_height)\n \n def Target_Creator(self,img_width,img_height):\n \n batch_size=1\n for i in range(batch_size):\n \n \n condition=(torch.Tensor([0,0,img_width,img_height])*torch.ones((self.anchor_xy.size(0),4))).to(device)\n\n min_cond=self.anchor_xy[:,:2]>=condition[:,:2]\n\n max_cond=((self.anchor_xy[:,2:])<=(condition[:,2:]))\n\n anchor_index=torch.cat([min_cond,max_cond],1)\n anchor_index=anchor_index.cpu().numpy()\n \n anchor_index=np.all(anchor_index,axis=1)\n \n gt_locs = torch.zeros((batch_size, self.anchor_xy.size(0), 4), dtype=torch.float).to(device) # (N, n_anchors, 4)\n gt_labels = torch.zeros((batch_size, self.anchor_xy.size(0)), dtype=torch.long).to(device) # (N, 8732)\n \n overlap=find_jaccard_overlap(self.gt_boxes_xy[i],self.anchor_xy)#(n_obj,n_anchors)\n \n overlap[:,~anchor_index]=0#일단 overlap에 영향없애기위한 임시negative, 필ignore처리!!\n overlap_each_prior,object_each_prior=overlap.max(dim=0)#(n_anchors)object_each_prior에는 최대 n_object 만큼 무작위 배치될수있고 최소 1개 배치될 수 있다\n _,prior_each_object=overlap.max(dim=1)#(n_obj)\n \n\n object_each_prior[prior_each_object] = object_each_prior[prior_each_object].to(device)\n \n label_each_prior=object_each_prior.clone()#(n_anchors)label\n \n label_each_prior[overlap_each_prior<0.3]=0#background\n \n label_each_prior[overlap_each_prior>=0.3]=-1\n \n# label_each_prior[prior_each_object] = 1#Ground Truth Box마다 IoU가 가장 높은 Anchor 1개를 뽑기\n kk = overlap[np.arange(overlap.size(0)),prior_each_object]\n ov=overlap.clone()\n kk=kk.cpu().numpy()\n ov=ov.cpu().numpy()\n peo=[]\n \n for j in range(overlap.size(0)):\n peo.extend(np.where(ov==kk[j])[1].tolist())\n peo=torch.LongTensor(peo).to(device)\n \n label_each_prior[overlap_each_prior>=0.7]=1\n \n \n# label_each_prior[~anchor_index]=-1\n \n# label_each_prior[peo] = 1#Ground Truth Box마다 IoU가 가장 높은 Anchor 1개를 뽑기\n gt_labels[i]=label_each_prior\n \n gt_labels[i][~anchor_index]=-1\n gt_labels[i][peo]=1\n \n gt_locs[i]=cxcy_to_gcxgcy(xy_to_cxcy(self.gt_boxes_xy[i][object_each_prior]),self.anchor_cxcy)#모델은 prior를 얼마나 움직일지를 예측하므로 \n \n gt_locs[i][~anchor_index]=0\n \n gt_labels=gt_labels.to(device)#()\n gt_locs=gt_locs.to(device)\n \n pos=(gt_labels==1)#positive\n \n neg=(gt_labels==0)#negative\n \n \n idx_pos=(np.where(pos.cpu().detach().numpy()[0]==True)[0])\n idx_neg=(np.where(neg.cpu().detach().numpy()[0]==True)[0])\n\n \n n_neg = idx_neg.size\n n_pos=idx_pos.size\n \n threshhold=128\n \n if n_pos > threshhold:\n idx_ignore = np.random.choice(\n idx_pos, size=n_pos-128, replace=False)\n gt_labels[0][idx_ignore]=-1\n \n n_th=256-((gt_labels==1).sum()).cpu().detach().numpy().item()\n if n_neg > n_th:\n idx_ignore = np.random.choice(\n idx_neg, size=n_neg-n_th, replace=False)\n gt_labels[0][idx_ignore]=-1\n \n# print((gt_labels==-1).sum())\n return gt_locs,gt_labels\n\nclass RPN(nn.Module):\n def __init__(self):\n super(RPN,self).__init__()\n# self.conv_first=nn.Conv2d(512,512,kernel_size=3,padding=1)\n \n# self.conv_cls=nn.Conv2d(512,2*9,kernel_size=1,padding=0)\n# self.conv_loc=nn.Conv2d(512,4*9,kernel_size=1,padding=0)\n self.conv_first=nn.Conv2d(512,512,3,1,1)\n self.conv_cls=nn.Conv2d(512,2*9,1,1,0)\n self.conv_loc=nn.Conv2d(512,4*9,1,1,0)\n \n\n self.init_conv2d() \n \n def init_conv2d(self):\n \n for c in self.children():\n if isinstance(c, nn.Conv2d):\n nn.init.normal_(c.weight,mean=0.,std=0.01)\n nn.init.constant_(c.bias, 0.)\n\n def forward(self, conv5_3_feats):\n feat=conv5_3_feats\n n, _, hh, ww = conv5_3_feats.shape\n out=F.relu(self.conv_first(feat))\n# rpn_cls=F.relu(self.conv_cls(out))\n# rpn_loc=F.relu(self.conv_loc(out))\n \n \n# rpn_cls = rpn_cls.view(1,-1,2)#(batch_size,HxWx9,2)\n \n \n \n rpn_loc=(self.conv_loc(out))\n \n rpn_loc=rpn_loc.permute(0, 2, 3, 1).contiguous()\n rpn_loc=rpn_loc.view(1,-1,4)#(batch_size,HxWx9,4)\n \n rpn_cls=(self.conv_cls(out))\n \n rpn_cls = rpn_cls.permute(0, 2, 3, 1).contiguous()\n \n rpn_softmax_scores = F.softmax(rpn_cls.view(1, hh, ww, 9, 2), dim=4)\n rpn_fg_scores = rpn_softmax_scores[:, :, :, :, 1].contiguous()\n rpn_fg_scores = rpn_fg_scores.view(1, -1)\n \n rpn_cls = rpn_cls.view(1, -1, 2)\n \n \n \n return rpn_loc,rpn_cls,rpn_fg_scores\n \n\nclass RPNLoss(nn.Module):\n def __init__(self):\n super(RPNLoss,self).__init__()\n \n \n self.rpn_sigma = 3.\n \n\n def forward(self,rpn_loc,rpn_cls, gt_loc,gt_cls):\n \"\"\"\n roi_loc=(1,H*W*k,4)\n gt_loc=(1,H*W*k,4)\n gt_cls=(1,H*W*K,2)\n \"\"\"\n rpn_loc=rpn_loc[0]\n rpn_cls=rpn_cls[0]\n \n gt_loc=gt_loc[0]\n gt_cls=gt_cls[0]\n \n try:\n rpn_cls_loss=F.cross_entropy(rpn_cls, gt_cls, ignore_index=-1)\n #ignore_index가 없는데 무시하라고 하면 에러가 뜸 즉 -1라벨링이 안된것임\n #무조건 -1라벨이 있어야함\n except:\n import pdb;pdb.set_trace()\n \n rpn_loc_loss = self._fast_rcnn_loc_loss(rpn_loc,gt_loc,gt_cls,self.rpn_sigma)\n \n \n return rpn_loc_loss+rpn_cls_loss\n \n def _smooth_l1_loss(self,x, t, in_weight, sigma):\n sigma2 = sigma ** 2\n diff = in_weight * (x - t)\n abs_diff = diff.abs()\n flag = (abs_diff.data < (1. / sigma2)).float()\n y = (flag * (sigma2 / 2.) * (diff ** 2) +\n (1 - flag) * (abs_diff - 0.5 / sigma2))\n return y.sum()\n\n\n def _fast_rcnn_loc_loss(self,pred_loc, gt_loc, gt_label, sigma):\n \n in_weight = torch.zeros(gt_loc.shape).cuda()\n \n # Localization loss is calculated only for positive rois.\n # NOTE: unlike origin implementation, \n # we don't need inside_weight and outside_weight, they can calculate by gt_label\n in_weight[(gt_label > 0).view(-1, 1).expand_as(in_weight).cuda()] = 1\n\n loc_loss = self._smooth_l1_loss(pred_loc, gt_loc, in_weight.detach(), sigma)\n # Normalize by total number of negtive and positive rois.\n loc_loss /= ((gt_label >= 0).sum().float()) # ignore gt_label==-1 for rpn_loss\n \n return loc_loss\n\n \nclass Proposal(nn.Module):\n def __init__(self):\n super(Proposal,self).__init__()\n \n self.nms_thresh=0.7\n self.n_train_pre_nms=12000\n self.n_train_post_nms=2000\n self.n_test_pre_nms=6000\n self.n_test_post_nms=300\n \n self.min_size=16\n def forward(self,rpn_loc,rpn_fg,anchor_boxes_cxcy,scale,img_width,img_height,train):\n if train:\n n_pre_nms = self.n_train_pre_nms\n n_post_nms = self.n_train_post_nms\n else:\n n_pre_nms = self.n_test_pre_nms\n n_post_nms = self.n_test_post_nms\n rois_cxcy=gcxgcy_to_cxcy(rpn_loc[0].to(device), anchor_boxes_cxcy)\n rois_xy=cxcy_to_xy(rois_cxcy)#(x,y,x,y)\n \n #1 WHK -> for train=2000 for test=300 by condition of (boundary,NMS-Score)\n batch_size=1\n \n min_size = self.min_size * scale\n rpn_fg=rpn_fg[0].to(device)\n \n for i in range(batch_size):\n # rois_xy.clamp_(0,1)\n \n \n \n# rois_xy[:,0].clamp_(min=0)\n# rois_xy[:,1].clamp_(min=0)\n# rois_xy[:,2].clamp_(max=img_width)\n# rois_xy[:,3].clamp_(max=img_height)\n\n rois_xy[:,0].clamp_(min=0,max=img_width)\n rois_xy[:,1].clamp_(min=0,max=img_height)\n rois_xy[:,2].clamp_(min=0,max=img_width)\n rois_xy[:,3].clamp_(min=0,max=img_height)\n\n\n \n\n hs = rois_xy[:, 3] - rois_xy[:, 1]\n ws = rois_xy[:, 2] - rois_xy[:, 0]\n hs=hs.cpu().detach().numpy()\n ws=ws.cpu().detach().numpy()\n \n keep = np.where((hs >= min_size) & (ws >= min_size))[0]\n \n rpn_fg=rpn_fg[keep]\n rois_xy=rois_xy[keep,:]\n \n \n \n \n \n index=rpn_fg.argsort().cpu().numpy()[::-1]\n if n_pre_nms > 0:\n index = index[:n_pre_nms]\n index=torch.LongTensor(index.tolist()).to(device)\n \n \n \n rpn_fg=rpn_fg[index]\n rois_xy=rois_xy[index,:]\n \n keep = nms(rois_xy,rpn_fg,0.7)#(6000->2000)\n \n if n_post_nms > 0:\n keep = keep[:n_post_nms]\n \n \n rpn_fg=rpn_fg[keep]\n rois_xy=rois_xy[keep,:]\n \n return rois_xy\n \nclass Proposal_Target_Creator(nn.Module):\n def __init__(self):\n super(Proposal_Target_Creator,self).__init__() \n \n def forward(self,rois_xy,boxes,labels):\n batch_size=1\n gt_locs = torch.zeros((1, rois_xy.size(0), 4), dtype=torch.float).to(device) # (N, n_anchors, 4)\n gt_labels = torch.zeros((1, rois_xy.size(0)), dtype=torch.long).to(device) # (N, n_anchors)\n\n for i in range(batch_size):\n\n n_objects=labels[i].size(0)\n overlap=find_jaccard_overlap(boxes[i],rois_xy)#(n_obj,n_anchors)\n overlap_each_prior,object_each_prior=overlap.max(dim=0)#(n_anchors)object_each_prior에는 최대 n_object 만큼 무작위 배치될수있고 최소 1개 배치될 수 있다\n\n\n _,prior_each_object=overlap.max(dim=1)#(n_obj)\n object_each_prior[prior_each_object] = torch.LongTensor(range(n_objects)).to(device)#(n_obj)여기서 object_each_prior에 최소 n_object 만큼 무작위 배치한다.그러면 8732중에 적어도 n_object를 대표하는 박스가 생기게된다\n\n label_each_prior=labels[i][object_each_prior]\n overlap_each_prior[prior_each_object]=1\n\n\n label_each_prior[overlap_each_prior<0.5]=0#background\n# label_each_prior[overlap_each_prior<0.1]=-1\n# label_each_prior[overlap_each_prior<0.6]=0#background\n\n gt_labels[i]=label_each_prior#박스에 임의의 labeling but 반드시 정답 포함\n rois_cxcy=xy_to_cxcy(rois_xy)\n gt_locs[i]=cxcy_to_gcxgcy(xy_to_cxcy(boxes[i][object_each_prior]),rois_cxcy)#모델은 prior를 얼마나 움직일지를 예측하므로 \n \n \n \n# pos= (gt_labels!=0)*(gt_labels!=-1)#positive\n pos= (gt_labels!=0)#positive\n neg=(gt_labels==0)\n \n idx_pos=np.where(pos.cpu().numpy()[0]==True)[0]\n idx_neg=np.where(neg.cpu().numpy()[0]==True)[0]\n \n pos_roi_per_this_image = int(min(32, idx_pos.size))\n neg_roi_per_this_image = int(min(64+(64-pos_roi_per_this_image),idx_neg.size))\n# pos_roi_per_this_image = int(min(16, idx_pos.size))\n# neg_roi_per_this_image = int(min(32+(32-pos_roi_per_this_image),idx_neg.size))\n \n\n \n if idx_pos.size > 0:\n idx_pos = np.random.choice(\n idx_pos, size=pos_roi_per_this_image, replace=False)\n if idx_neg.size > 0:\n idx_neg = np.random.choice(\n idx_neg, size=neg_roi_per_this_image, replace=False)\n \n idx_pos=torch.LongTensor(idx_pos).to(device)\n idx_neg=torch.LongTensor(idx_neg).to(device)\n \n pos_rois=rois_xy[idx_pos]\n neg_rois=rois_xy[idx_neg]\n \n rois=torch.cat([pos_rois,neg_rois],0)\n rois_idx=torch.cat([idx_pos,idx_neg],0)\n \n \n gt_locs=gt_locs[0][rois_idx,:]\n gt_labels=gt_labels[0][rois_idx]\n \n \n \n plus_loc = torch.FloatTensor([0,0,0,0]).unsqueeze(0).expand_as(boxes[0]).to(device) \n \n rois=torch.cat([rois,boxes[0]],0)\n gt_locs=torch.cat([gt_locs,plus_loc],0)\n gt_labels=torch.cat([gt_labels,labels[0]],0)\n sample_rois=rois\n \n #pos+gt진짜 정답박스를 주기(train할때만)\n gt_locs[:,:2]=gt_locs[:,:2]*10\n gt_locs[:,2:]=gt_locs[:,2:]*20\n \n \n return sample_rois,gt_locs,gt_labels#xyxy , gcxgcy\n \n\n \nclass ROILoss(nn.Module):\n def __init__(self):\n super(ROILoss,self).__init__()\n \n self.roi_sigma = 1.\n self.cross_entropy=nn.CrossEntropyLoss()\n \n def forward(self,roi_locs, roi_scores,gt_locs,gt_labels):\n \n roi_locs=roi_locs.view(roi_locs.size(0),-1,4)#(131,21,4)\n #pick 1 in 21(0~20)\n roi_locs=roi_locs[torch.arange(roi_locs.size(0)).long().cuda(),gt_labels]\n \n roi_cls_loss=self.cross_entropy(roi_scores, gt_labels)\n \n \n roi_loc_loss = self._fast_rcnn_loc_loss(roi_locs,gt_locs,gt_labels,self.roi_sigma)\n \n \n return roi_loc_loss +roi_cls_loss\n\n \n def _smooth_l1_loss(self,x, t, in_weight, sigma):\n sigma2 = sigma ** 2\n diff = in_weight * (x - t)\n abs_diff = diff.abs()\n flag = (abs_diff.data < (1. / sigma2)).float()\n y = (flag * (sigma2 / 2.) * (diff ** 2) +\n (1 - flag) * (abs_diff - 0.5 / sigma2))\n return y.sum()\n\n\n def _fast_rcnn_loc_loss(self,pred_loc, gt_loc, gt_label, sigma):\n \n in_weight = torch.zeros(gt_loc.shape).cuda()\n # Localization loss is calculated only for positive rois.\n # NOTE: unlike origin implementation, \n # we don't need inside_weight and outside_weight, they can calculate by gt_label\n in_weight[(gt_label > 0).view(-1, 1).expand_as(in_weight).cuda()] = 1\n\n loc_loss = self._smooth_l1_loss(pred_loc, gt_loc, in_weight.detach(), sigma)\n \n # Normalize by total number of negtive and positive rois.\n loc_loss /= ((gt_label >= 0).sum().float()) # ignore gt_label==-1 for rpn_loss\n \n return loc_loss\nclass FasterRCNNVGG16(nn.Module):\n def __init__(self):\n super(FasterRCNNVGG16, self).__init__()\n self.extractor, self.classifier = truncated_vgg16()\n \n self.rpn =RPN()\n #rpn out->rpn_loc,rpn_cls\n self.head = VGG16RoIHead(\n classifier=self.classifier)\n #head out->roi_locs, roi_scores\n \nclass VGG16RoIHead(nn.Module):\n \n\n def __init__(self,classifier):\n \n super(VGG16RoIHead, self).__init__()\n\n self.classifier = classifier\n self.cls_loc = nn.Linear(4096, 21 * 4)\n self.score = nn.Linear(4096, 21)\n\n self.roi = RoIPool((7, 7),0.0625)\n self.init_Linear()\n self.rescale_factors = nn.Parameter(torch.FloatTensor(1, 512, 1, 1)) # there are 512 channels in conv4_3_feats\n nn.init.constant_(self.rescale_factors, 20)\n \n \n def init_Linear(self):\n \n for c in self.children():\n if isinstance(c, nn.Linear):\n nn.init.normal_(c.weight,mean=0.,std=0.01)\n nn.init.constant_(c.bias, 0.)\n \n def forward(self, x, rois_xy):\n \n x=x.to(device)\n \n norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() # (N, 1, 38, 38)\n x = x / norm # (N, 512, 38, 38)\n x = x * self.rescale_factors # (N, 512, 38, 38)\n \n rois=rois_xy\n ind=torch.zeros((rois.size(0))).to(device)\n roi_ind=torch.cat([ind[:, None], rois], dim=1)\n# height=x.size(2)\n# width=x.size(3)\n \n# roi_ind[:,1]=roi_ind[:,1]*x.size(3)\n# roi_ind[:,2]=roi_ind[:,2]*x.size(2)\n# roi_ind[:,3]=roi_ind[:,3]*x.size(3)\n# roi_ind[:,4]=roi_ind[:,4]*x.size(2)\n \n \n #x.shape ([1, 512, 37, 50])\n \n pool = self.roi(x, roi_ind)#([131, 512, 7, 7])\n pool = pool.view(pool.size(0), -1)\n out=self.classifier(pool)\n \n roi_locs = self.cls_loc(out)#[128+alpha,4]\n roi_scores = self.score(out)#[128+alpha,21]\n \n return roi_locs, roi_scores#gcxgcy\n \n\n \n\nclass Detector(nn.Module):\n def __init__(self):\n super(Detector,self).__init__()\n \n \n def forward(self,rois_xy,predicted_locs,predicted_scores,min_score,max_overlap,top_k):\n \n batch_size = 1\n rois_cxcy=xy_to_cxcy(rois_xy)\n \n predicted_locs=predicted_locs.view(predicted_locs.size(0),-1,4)#(131,21,4)\n #pick 1 in 21(0~20)\n predicted_scores=F.softmax(predicted_scores, dim=1)\n A=torch.arange(predicted_locs.size(0)).long().cuda()\n B=predicted_scores.max(dim=1)[1].long().cuda()\n \n predicted_locs=predicted_locs[A,B]\n \n \n\n all_images_boxes = list()\n all_images_labels = list()\n all_images_scores = list()\n\n\n\n for i in range(batch_size):\n \n predicted_locs[:,:2]=predicted_locs[:,:2]/10.\n predicted_locs[:,2:]=predicted_locs[:,2:]/20.\n decoded_locs = cxcy_to_xy(\n gcxgcy_to_cxcy(predicted_locs, rois_cxcy)) # (300, 4)\n \n \n image_boxes = list()\n image_labels = list()\n image_scores = list()\n\n max_scores, best_label = predicted_scores.max(dim=1) # (300)\n\n\n for c in range(1, 21):\n\n class_scores = predicted_scores[:, c] # (300)\n score_above_min_score = class_scores > min_score \n n_above_min_score = score_above_min_score.sum().item()\n if n_above_min_score == 0:\n continue\n class_scores = class_scores[score_above_min_score] \n class_decoded_locs = decoded_locs[score_above_min_score] \n\n\n class_scores, sort_ind = class_scores.sort(dim=0, descending=True) \n class_decoded_locs = class_decoded_locs[sort_ind] \n\n\n overlap = find_jaccard_overlap(class_decoded_locs, class_decoded_locs) \n # (NMS)\n\n\n suppress = torch.zeros((n_above_min_score), dtype=torch.uint8).to(device) \n\n\n for box in range(class_decoded_locs.size(0)):\n\n\n if suppress[box] == 1:\n continue\n\n\n suppress = torch.max(suppress, (overlap[box] > max_overlap).byte())\n\n suppress[box] = 0\n suppress=suppress.bool()\n\n image_boxes.append(class_decoded_locs[~suppress])\n\n image_labels.append(torch.LongTensor((~suppress).sum().item() * [c]).to(device))\n image_scores.append(class_scores[~suppress])\n\n\n if len(image_boxes) == 0:\n image_boxes.append(torch.FloatTensor([[0., 0., 1., 1.]]).to(device))\n image_labels.append(torch.LongTensor([0]).to(device))\n image_scores.append(torch.FloatTensor([0.]).to(device))\n\n\n image_boxes = torch.cat(image_boxes, dim=0) \n image_labels = torch.cat(image_labels, dim=0) \n image_scores = torch.cat(image_scores, dim=0) \n n_objects = image_scores.size(0)\n\n\n if n_objects > top_k:\n image_scores, sort_ind = image_scores.sort(dim=0, descending=True)\n image_scores = image_scores[:top_k] \n image_boxes = image_boxes[sort_ind][:top_k] \n image_labels = image_labels[sort_ind][:top_k] \n\n\n all_images_boxes.append(image_boxes)\n all_images_labels.append(image_labels)\n all_images_scores.append(image_scores)\n \n return all_images_boxes, all_images_labels, all_images_scores\n\n \n","repo_name":"chjung99/rcv_badge_fastser_rcnn","sub_path":"model_0810.py","file_name":"model_0810.py","file_ext":"py","file_size_in_byte":24139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"7413976720","text":"import math\nfrom pprint import pprint\n\nclass Tagger(object):\n \"\"\"\n Tagger for tagging threegrams\n \"\"\"\n\n def __init__(self):\n self.bigrams = {}\n self.trigrams = {}\n\n def classify(self, filename):\n \"\"\"\n Classifies based on a count file.\n \"\"\"\n\n fp = open(filename)\n\n for line in fp:\n lst = line.split()\n class_type = lst[1]\n if class_type == \"2-GRAM\":\n self.bigrams[(lst[2], lst[3])] = int(lst[0])\n elif class_type == \"3-GRAM\":\n self.trigrams[(lst[2], lst[3], lst[4])] = int(lst[0])\n\n def transit(self, first, second, third):\n bigram = (first, second)\n if bigram in self.bigrams:\n trigram = (first, second, third)\n if trigram in trigrams:\n return 1.0 * self.trigrams[trigram] / self.bigrams[bigram]\n else:\n return 0\n else:\n return 0\n\n fp.close()\n\n def simple_printer(self, filename):\n \"\"\"\n Given the name of a file with three parts of speech, gives the log probability given that the previous 2 words had occured\n \"\"\"\n fp = open(filename)\n\n for line in fp:\n lst = line.split()\n num = self.transit(lst[0], lst[1], lst[2])\n if num > 0:\n pprint(math.log(num))\n else:\n pprint(0)\n fp.close()\n\n","repo_name":"mosesn/Tagger","sub_path":"threegram.py","file_name":"threegram.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"72142634608","text":"def checkParenthese(parenthese: str, sort: bool = False):\n OPENTAG: list[str] = ['[', 0, '{', 0, '(', 0, '<', 0]\n CLOSETAG: list[str] = [']', 0, '}', 0, ')', 0, '>', 0]\n length: int = len(parenthese)\n result: str = ''\n i = 0\n\n def findIndex(i):\n idx: int = OPENTAG.index(parenthese[i]) if parenthese[i] in OPENTAG else CLOSETAG.index(parenthese[i])\n return idx\n\n if sort:\n list_p = list(parenthese)\n for i in range(len(list_p)):\n idx: int = findIndex(i)\n if parenthese[i] in OPENTAG:\n OPENTAG[idx + 1] += 1\n else:\n CLOSETAG[idx + 1] += 1\n\n for i in range(1, 8, 2):\n diff: int = abs(OPENTAG[i] - CLOSETAG[i])\n for j in range(diff):\n if OPENTAG[i] < CLOSETAG[i]:\n list_p.append(OPENTAG[i - 1])\n else:\n list_p.append(CLOSETAG[i - 1])\n \n list_p.sort()\n new_p = ''.join(list_p)\n \n return new_p\n\n\n while length > 0:\n # find index in OPENTAG or CLOSETAG\n idx: int = findIndex(i)\n next: str = '' if i >= len(parenthese) - 1 else parenthese[i + 1]\n\n if parenthese[i] + next != OPENTAG[idx] + CLOSETAG[idx]:\n i += 1\n else:\n i += 2\n length -= 1\n result += OPENTAG[idx] + CLOSETAG[idx]\n\n length -= 1\n\n return result\n\nprint(checkParenthese(')()((', True)) # ()()()() || ((()))","repo_name":"barrimos/pairParentheses","sub_path":"pairParentheses.py","file_name":"pairParentheses.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"20581042515","text":"import facebook\nimport os\nfrom notipy.cli import Notipy\nimport pypapath\nimport testInternet\n\nNO = '\\033[0m' # white (normal)\nRED = '\\033[31m' # red\n \ndef handle():\n try:\n BASE_DIR = os.path.dirname(os.path.dirname(__file__))\n HOME_DIR = os.environ['HOME']\n os.chdir(HOME_DIR)\n print('''Do you have access token?\n If no, then goto https://developers.facebook.com/tools/explorer \n and click on 'Get Token' button then click on 'Get User Access Token' \n and on the User Data Permissions mark on 'pubish_actions'\n ''')\n imageName = input('Enter the image name with extension: ')\n imageLocation = input('Enter the image location: ')\n os.chdir(imageLocation)\n caption = input('Enter the caption to the image: ')\n token = input('Enter Facebook access token: ')\n graph = facebook.GraphAPI(access_token = token)\n graph.put_photo(image=open(imageName, 'rb'), message= caption)\n print('Your photo is uploaded!')\n Notipy().send('Your photo is uploaded!')\n except FileNotFoundError:\n print(RED + 'No such file or directory!' + NO)\n except:\n print(RED + '\\nClosing' + NO)\n \nif not testInternet.is_connected():\n print(RED + 'Internet is not working well! Check your connection.' + NO)\n exit(0)\nhandle()\n","repo_name":"sawin0/pypa","sub_path":"pypa/pict.py","file_name":"pict.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"25150981122","text":"import multiprocessing as mp\nimport time\ndef test(res = 0):\n print(\"Test start with res = \", res)\n time.sleep(res)\n print(\"Test stop\")\n\ndef check():\n try:\n proc = mp.Process(target=test,args=([10]))\n proc.start()\n proc.join(timeout=3)\n if proc.is_alive():\n print(\"process is alive\")\n proc.terminate()\n else:\n print(\"process ended normally\")\n except Exception as e:\n print(e)\n\nif __name__ == \"__main__\":\n check()","repo_name":"krishnabhunia/VSCode-Programs","sub_path":"mul_test.py","file_name":"mul_test.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"71409942447","text":"import tkinter as tk\r\nfrom tkinter import filedialog, Text\r\nimport os\r\n\r\nroot = tk.Tk()\r\nappsCollected = []\r\n\r\nif os.path.isfile('save.txt'):\r\n with open('save.txt', 'r') as exe:\r\n tempFile = exe.read()\r\n tempFile = tempFile.split(',')\r\n appsCollected = [x for x in tempFile if x.strip()]\r\n\r\ndef addApplications():\r\n for widget in frame.winfo_children():\r\n widget.destroy()\r\n\r\n appName = filedialog.askopenfilename(initialdir=\"/\", title=\"Select File\",\r\n filetypes=((\"executables\",\"*.exe\"),(\"All Files\", \"*.*\")))\r\n\r\n appsCollected.append(appName)\r\n print(appName)\r\n for mainAppOpener in appsCollected:\r\n label = tk.Label(frame, text=mainAppOpener, bg=\"grey\")\r\n label.pack() \r\n\r\ndef runApplicaitons():\r\n for mainAppOpener in appsCollected:\r\n os.startfile(mainAppOpener)\r\n\r\ncanvas = tk.Canvas(root, height=500, width=500, bg=\"#263D42\")\r\ncanvas.pack()\r\n\r\nframe = tk.Frame(root, bg=\"white\")\r\nframe.place(relwidth=0.8, relheight=0.8, relx=0.1, rely=0.1)\r\n\r\nopenFile = tk.Button(root, text=\"Open File\", padx=10, pady=5, fg=\"white\", bg=\"grey\", command=addApplications)\r\nopenFile.pack()\r\n\r\nrunFiles = tk.Button(root, text=\"Run File(s)\", padx=10, pady=5, fg=\"white\", bg=\"grey\", command=runApplicaitons)\r\nrunFiles.pack()\r\n\r\nfor mainAppOpener in appsCollected:\r\n label = tk.Label(frame, text=mainAppOpener)\r\n label.pack()\r\n\r\nroot.mainloop()\r\n\r\n\r\nwith open('save.txt', 'w') as exe:\r\n for mainAppOpener in appsCollected:\r\n exe.write(mainAppOpener + ',')","repo_name":"bantezana/MultiAppOpener","sub_path":"mainAppOpener.py","file_name":"mainAppOpener.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"73841528046","text":"#!/usr/bin/env python3\n\"\"\"Script to set up a Debian Linux based system as a Lokole client.\"\"\"\nfrom argparse import ArgumentParser\nfrom json import dumps\nfrom json import loads\nfrom logging import StreamHandler\nfrom logging import getLogger\nfrom multiprocessing import cpu_count\nfrom os import chmod\nfrom os import getenv\nfrom os import stat\nfrom os import urandom\nfrom pathlib import Path\nfrom shutil import chown\nfrom socket import gethostname\nfrom stat import S_IEXEC\nfrom string import ascii_letters\nfrom string import digits\nfrom subprocess import PIPE # nosec\nfrom subprocess import run # nosec\nfrom sys import executable as current_python_binary\nfrom sys import version_info\nfrom tempfile import gettempdir\nfrom time import sleep\nfrom time import time\nfrom urllib.error import HTTPError\nfrom urllib.request import Request\nfrom urllib.request import urlopen\n\nLOG = getLogger(__name__)\n\nTEMP_ROOT = Path(gettempdir()) / Path(__file__).name\nTEMP_ROOT.mkdir(parents=True, exist_ok=True)\n\nSIM_TYPES = ('hologram', 'Ethernet', 'LocalOnly', 'mkwvconf')\n\n\nclass Setup:\n groups = tuple()\n packages = tuple()\n\n def __init__(self, args, abort):\n self.args = args\n self.abort = abort\n\n @property\n def is_enabled(self):\n return True\n\n @property\n def user(self):\n base_user = getenv('USER')\n sudo_user = getenv('SUDO_USER')\n\n if sudo_user and base_user == 'root':\n return sudo_user\n elif base_user:\n return base_user\n else:\n return self.sh('whoami')\n\n @property\n def home(self):\n return Path('/') / 'home' / self.user\n\n def __call__(self):\n try:\n result = self.__is_complete()\n except FileNotFoundError:\n pass\n else:\n LOG.info('Skipping %s: already completed', self._step_name)\n return result\n\n if not self.is_enabled:\n LOG.info('Skipping %s: not enabled', self._step_name)\n return\n\n LOG.info('Running %s', self._step_name)\n\n self._grant_permissions()\n self._install_dependencies()\n result = self._run()\n self.__mark_complete(result)\n\n LOG.info('Done with %s', self._step_name)\n return result\n\n def _grant_permissions(self):\n for group in self.groups:\n self.sh('usermod -a -G \"{group}\" \"{user}\"'\n .format(group=group, user=self.user))\n\n def _install_dependencies(self):\n if self.packages:\n self.sh('apt-get install -y {}'.format(' '.join(self.packages)),\n retry_attempts=10, retry_interval=60)\n\n def _run(self):\n raise NotImplementedError\n\n @property\n def _step_name(self):\n return self.__class__.__name__\n\n @property\n def __guard_path(self):\n guard_name = '{}.done'.format(self._step_name)\n return self.abspath(TEMP_ROOT / guard_name)\n\n @property\n def __stdout_path(self):\n stdout_name = '{}.stdout'.format(self._step_name)\n return self.abspath(TEMP_ROOT / stdout_name)\n\n @property\n def __stderr_path(self):\n stderr_name = '{}.stderr'.format(self._step_name)\n return self.abspath(TEMP_ROOT / stderr_name)\n\n def __is_complete(self):\n return loads(Path(self.__guard_path).read_text(encoding='utf-8'))\n\n def __mark_complete(self, result):\n self.write_file(self.__guard_path, dumps(result))\n\n def assume_ownership(self, path):\n chown(path, self.user, self.user)\n\n def write_file(self, path, content, executable=False):\n if not isinstance(content, str):\n content = '\\n'.join(content)\n\n with open(path, 'w') as fobj:\n fobj.write(content)\n\n self.assume_ownership(path)\n\n if executable:\n mode = stat(path).st_mode\n chmod(path, mode | S_IEXEC)\n\n def create_daemon(self, program_name, command, user=None, env=None):\n env = env or {}\n user = user or self.user\n extra_conf = []\n\n if self.args.log_directory == '-':\n stderr = '/dev/fd/2'\n stdout = '/dev/fd/1'\n extra_conf.extend((\n 'stdout_logfile_maxbytes=0',\n 'stderr_logfile_maxbytes=0',\n ))\n else:\n stderr = self.abspath(Path(self.args.log_directory) / '{}.stderr.log'.format(program_name))\n stdout = self.abspath(Path(self.args.log_directory) / '{}.stdout.log'.format(program_name))\n\n self.write_file('/etc/supervisor/conf.d/{}.conf'.format(program_name), (\n '[program:{}]'.format(program_name),\n 'command={}'.format(command),\n 'autostart=true',\n 'autorestart=true',\n 'startretries=3',\n 'stopasgroup=true',\n 'stderr_logfile={}'.format(stderr),\n 'stdout_logfile={}'.format(stdout),\n 'user={}'.format(user),\n 'environment={}'.format(','.join('{}={}'.format(*kv) for kv in env.items())),\n *extra_conf,\n ))\n\n def abspath(self, file_path):\n file_path = Path(file_path).absolute()\n self._mkdir(file_path.parent)\n return str(file_path)\n\n def sh(self, command, user=None, accept_failure=False, retry_attempts=0, retry_interval=0):\n if user:\n command = \"su '{user}' -c '{command}'\".format(\n user=user,\n command=command)\n\n process = run(command, shell=True, stderr=PIPE, stdout=PIPE) # nosec\n stdout = process.stdout.decode('utf-8').strip()\n stderr = process.stderr.decode('utf-8').strip()\n status = process.returncode\n\n with open(self.__stdout_path, 'a', encoding='utf-8') as fobj:\n fobj.write('===== {} =====\\n{}\\n{}\\n\\n'.format(command, status, stdout))\n\n with open(self.__stderr_path, 'a', encoding='utf-8') as fobj:\n fobj.write('===== {} =====\\n{}\\n{}\\n\\n'.format(command, status, stderr))\n\n if status == 0 or accept_failure:\n return stdout\n\n if retry_attempts > 0:\n sleep(retry_interval)\n return self.sh(command, user, accept_failure, retry_attempts - 1, retry_interval)\n\n raise Exception(stderr)\n\n def _mkdir(self, path):\n path.mkdir(parents=True, exist_ok=True)\n home_prefix = Path(self.home)\n is_in_home = path.parts[:3] == home_prefix.parts\n if is_in_home:\n home_parts = path.parts[3:]\n for part in home_parts:\n home_prefix /= part\n self.assume_ownership(str(home_prefix))\n\n\nclass SystemSetup(Setup):\n def _run(self):\n self._ensure_root()\n self._ensure_apt()\n self._set_locale()\n self._set_timezone()\n self._set_password()\n\n def _ensure_root(self):\n if getenv('USER') != 'root' and self.sh('whoami') != 'root':\n self.abort('Must run script via sudo')\n\n def _ensure_apt(self):\n self.sh('apt-get update', retry_attempts=10, retry_interval=30)\n\n def _set_locale(self):\n locale_command = (\n 'export LANGUAGE=\"{0}\"; '\n 'export LC_ALL=\"{0}\"; '\n 'export LANG=\"{0}\"; '\n 'export LC_TYPE=\"{0}\";'\n ).format(self.args.locale)\n\n self.sh('locale-gen \"{}\"'.format(self.args.locale))\n self.sh('update-locale')\n self.sh('eval \"{}\"'.format(locale_command))\n\n self.write_file('/etc/profile.d/set-locale.sh', locale_command,\n executable=True)\n\n def _set_timezone(self):\n self.sh('timedatectl set-timezone \"{}\"'.format(self.args.timezone))\n\n def _set_password(self):\n if not self.args.password:\n return\n\n self.sh('echo \"{user}:{password}\" | chpasswd'.format(\n user=self.user,\n password=self.args.password))\n\n @property\n def is_enabled(self):\n return self.args.system_setup != 'no'\n\n\nclass WifiSetup(Setup):\n packages = (\n 'hostapd',\n 'dnsmasq',\n )\n\n ip_base = '10.0.0'\n\n def _run(self):\n if not self.ht_capab:\n self.abort('Unsupported device: {}'.format(self.device))\n\n self._configure_dns()\n self._configure_wifi()\n self._disable_system_power_management()\n\n def _configure_dns(self):\n hosts = [\n ('::1', 'localhost ip6-localhost ip6-loopback'),\n ('ff02::1', 'ip6-allnodes'),\n ('ff02::2', 'ip6-allrouters'),\n ('127.0.0.1', 'localhost'),\n ('127.0.0.1', self.device),\n ('127.0.1.1', self.device),\n ]\n\n for prefix in ['www.', '']:\n for tld in ['.com', '.org', '.ca', '.cd', '']:\n for host in ['lokole', 'opwen', 'ascoderu', 'email']:\n hosts.append((self.ip, prefix + host + tld))\n\n self.write_file('/etc/hosts', ('{}\\t{}'.format(ip, host) for (ip, host) in hosts))\n\n logfile = '/var/log/dnsmasq.log'\n\n self.write_file('/etc/dnsmasq.conf', (\n 'log-facility={}'.format(logfile),\n 'dhcp-range={0}.10,{0}.250,12h'.format(self.ip_base),\n 'interface=wlan0',\n 'no-resolv',\n 'log-queries',\n 'server=8.8.8.8',\n ))\n\n def _configure_wifi(self):\n hostapd_conf = '/etc/hostapd/hostapd.conf'\n\n self.write_file(hostapd_conf, (\n 'interface=wlan0',\n 'driver=nl80211',\n 'hw_mode=g',\n 'channel=6',\n 'ieee80211n=1',\n 'wmm_enabled=1',\n 'ht_capab={}'.format(self.ht_capab),\n 'macaddr_acl=0',\n 'auth_algs=1',\n 'wpa=2',\n 'wpa_key_mgmt=WPA-PSK',\n 'rsn_pairwise=CCMP',\n 'ssid={}'.format(self.args.wifi_name),\n 'wpa_passphrase={}'.format(self.args.wifi_password),\n ))\n\n self.write_file('/etc/default/hostapd', 'DAEMON_CONF={}'.format(hostapd_conf))\n\n self.write_file('/etc/network/interfaces', (\n 'auto lo',\n 'iface lo inet loopback',\n\n 'auto eth0',\n 'allow-hotplug eth0',\n 'iface eth0 inet dhcp',\n\n 'auto wlan0',\n 'allow-hotplug wlan0',\n 'iface wlan0 inet static',\n 'post-up service hostapd restart',\n 'post-up service dnsmasq restart',\n 'address {}'.format(self.ip),\n 'netmask 255.255.255.0',\n 'wireless-power off',\n\n 'auto ppp0',\n 'iface ppp0 inet wvdial',\n ))\n\n self.sh('systemctl unmask hostapd.service')\n self.sh('systemctl start hostapd.service')\n\n def _disable_system_power_management(self):\n self.sh('systemctl mask sleep.target suspend.target hibernate.target hybrid-sleep.target')\n\n @property\n def ip(self):\n return '{}.1'.format(self.ip_base)\n\n @property\n def device(self):\n return gethostname()\n\n @property\n def ht_capab(self):\n if self.device in ['OrangePI', 'orangepizero']:\n return '[HT40][DSS_CCK-40]'\n\n if self.device in ['raspberrypi']:\n return '[HT40][SHORT-GI-20][DSS_CCK-40]'\n\n return None\n\n @property\n def is_enabled(self):\n return self.args.wifi != 'no'\n\n\nclass ModemSetup(Setup):\n packages = (\n 'usb-modeswitch',\n 'usb-modeswitch-data',\n 'mobile-broadband-provider-info',\n 'ppp',\n 'wvdial',\n )\n\n groups = (\n 'dialout',\n 'dip',\n )\n\n def _run(self):\n self._configure_wvdial()\n\n return {\n 'OPWEN_SYNC_SCHEDULE': self.args.sync_schedule,\n }\n\n def _configure_wvdial(self):\n self.write_file('/etc/ppp/peers/wvdial', (\n 'noauth',\n 'name wvdial',\n 'usepeerdns',\n 'defaultroute',\n 'replacedefaultroute',\n ))\n\n @property\n def is_enabled(self):\n if not super().is_enabled:\n return False\n\n if self.args.sim_type == 'LocalOnly':\n return False\n\n if not self.args.sync_schedule or not self.args.registration_credentials:\n self.abort('Sync schedule and registration credentials are required.')\n\n return True\n\n\nclass ClientSetup(Setup):\n def _run(self):\n create_request_payload = dumps({'domain': self.client_domain}).encode('utf-8')\n create_request = Request(self.client_url_create)\n create_request.add_header('Content-Type', 'application/json; charset=utf-8')\n create_request.add_header('Content-Length', str(len(create_request_payload)))\n create_request.add_header('Authorization', 'Bearer {}'.format(self.args.registration_credentials))\n\n try:\n with urlopen(create_request, create_request_payload): # nosec\n pass\n except HTTPError as ex:\n self.abort('Unable to register client {client_name}: [{status_code}] {message}'.format(\n client_name=self.args.client_name,\n status_code=ex.code,\n message=ex.read().decode('utf-8').strip()))\n\n while True:\n get_request = Request(self.client_url_details)\n get_request.add_header('Authorization', 'Bearer {}'.format(self.args.registration_credentials))\n try:\n with urlopen(get_request) as response: # nosec\n response_body = response.read().decode('utf-8')\n except HTTPError as ex:\n if ex.code != 404:\n self.abort('Unable to fetch client {client_name}: [{status_code}] {message}'.format(\n client_name=self.args.client_name,\n status_code=ex.code,\n message=ex.read().decode('utf-8').strip()))\n sleep(2)\n else:\n client_info = loads(response_body)\n break\n\n return {\n 'OPWEN_CLIENT_ID': client_info['client_id'],\n 'OPWEN_REMOTE_ACCOUNT_NAME': client_info['storage_account'],\n 'OPWEN_REMOTE_ACCOUNT_KEY': client_info['storage_key'],\n 'OPWEN_REMOTE_RESOURCE_CONTAINER': client_info['resource_container'],\n }\n\n @property\n def client_domain(self):\n return '{}.{}'.format(self.args.client_name, self.args.client_domain)\n\n @property\n def client_url_create(self):\n return 'https://{}/api/email/register/'.format(self.args.server_host)\n\n @property\n def client_url_details(self):\n return 'https://{}/api/email/register/{}'.format(self.args.server_host, self.client_domain)\n\n @property\n def is_enabled(self):\n if ':' in self.args.registration_credentials:\n self.abort('Registration credential should be set to Github access token, not username and password')\n\n return self.args.sim_type != 'LocalOnly'\n\n\nclass WebappSetup(Setup):\n packages = (\n 'python3-bcrypt',\n 'libffi-dev',\n 'libssl-dev',\n 'libjpeg-dev',\n 'libopenjp2-7',\n 'libtiff5',\n 'nginx',\n 'python3',\n 'python3-dev',\n 'python3-pip',\n 'python3-venv',\n 'supervisor',\n )\n\n def __init__(self, args, abort, app_config):\n super().__init__(args, abort)\n self.app_config = app_config\n\n def _run(self):\n self._create_virtualenv()\n self._install_client()\n self._compile_translations()\n self._setup_secrets()\n self._create_admin_user()\n self._install_nginx()\n self._setup_gunicorn()\n self._setup_celery()\n self._setup_cron()\n self._setup_restarter()\n self._reboot()\n\n def _create_virtualenv(self):\n self.sh('{python} -m venv \"{venv_path}\"'.format(\n python=current_python_binary,\n venv_path=self.venv_path),\n user=self.user)\n\n self._pip_install('pip', 'setuptools', 'wheel')\n\n def _install_client(self):\n if self.args.client_dist and Path(self.args.client_dist).is_file():\n package = self.args.client_dist\n elif self.args.client_version:\n package = 'opwen_email_client=={}'.format(self.args.client_version)\n else:\n package = 'opwen_email_client'\n\n self._pip_install(package)\n\n def _compile_translations(self):\n self.sh('\"{pybabel}\" compile -d \"{translations}\"'.format(\n pybabel='{}/bin/pybabel'.format(self.venv_path),\n translations=self.abspath(self.webapp_files_root / 'translations')),\n user=self.user)\n\n def _setup_secrets(self):\n extra_settings = {\n 'OPWEN_APP_ROOT': self.args.app_root,\n 'OPWEN_STATE_DIRECTORY': self.abspath(self.args.state_directory),\n 'OPWEN_SESSION_KEY': generate_secret(32),\n 'OPWEN_MAX_UPLOAD_SIZE_MB': self.args.max_upload_size,\n 'OPWEN_SIM_TYPE': self.args.sim_type,\n 'OPWEN_EMAIL_SERVER_HOSTNAME': self.args.server_host,\n 'OPWEN_CLIENT_NAME': self.args.client_name,\n 'OPWEN_ROOT_DOMAIN': self.args.client_domain,\n 'OPWEN_RESTART_PATH': ','.join((\n '{}=HUP'.format(self.abspath(self.restarter_directory / self.args.server_name)),\n '{}='.format(self.abspath(self.restarter_directory / self.args.worker_name)),\n '{}='.format(self.abspath(self.restarter_directory / self.args.cron_name)),\n )),\n }\n\n self.write_file(self.settings_path, (\n '{}={}'.format(key, value)\n for settings in (extra_settings, self.app_config)\n for (key, value) in settings.items()))\n\n def _create_admin_user(self):\n if self.args.admin == 'no':\n return\n\n self.sh('OPWEN_SETTINGS=\"{settings}\" '\n 'export FLASK_APP=\"opwen_email_client.webapp:app\" '\n '\"{manage}\" createadmin --name=\"{name}\" --password=\"{password}\"'.format(\n settings=self.settings_path,\n manage='{}/bin/flask manage'.format(self.venv_path),\n name=self.args.admin_name,\n password=self.args.admin_password),\n user=self.user)\n\n def _install_nginx(self):\n self.write_file('/etc/nginx/sites-available/default', '''\n server {{\n listen {port};\n server_name localhost;\n\n location = {app_root}/favicon.ico {{\n alias {files_root}/static/favicon.ico;\n }}\n\n location ~ ^{app_root}/static/(.*)$ {{\n alias {files_root}/static/$1;\n }}\n\n location {app_root}/ {{\n include proxy_params;\n proxy_pass http://unix:{socket};\n }}\n }}'''.format(\n port=self.args.port,\n app_root=self.args.app_root,\n files_root=self.abspath(self.webapp_files_root),\n socket=self.socket_path))\n\n if self.args.log_directory == '-':\n access_log = 'stdout'\n error_log = 'stderr'\n else:\n access_log = self.abspath(Path(self.args.log_directory) / 'nginx_access.log')\n error_log = self.abspath(Path(self.args.log_directory) / 'nginx_error.log')\n\n self.write_file('/etc/nginx/nginx.conf', '''\n user www-data;\n worker_processes 4;\n pid /run/nginx.pid;\n\n events {{\n worker_connections 768;\n }}\n\n http {{\n sendfile on;\n tcp_nopush on;\n tcp_nodelay on;\n keepalive_timeout 65;\n types_hash_max_size 2048;\n include /etc/nginx/mime.types;\n default_type application/octet-stream;\n ssl_protocols TLSv1 TLSv1.1 TLSv1.2;\n ssl_prefer_server_ciphers on;\n access_log {access_log};\n error_log {error_log};\n gzip on;\n gzip_disable \"msie6\";\n client_max_body_size {max_upload_size}M;\n include /etc/nginx/conf.d/*.conf;\n include /etc/nginx/sites-enabled/*;\n\n fastcgi_connect_timeout {timeout_seconds};\n fastcgi_send_timeout {timeout_seconds};\n fastcgi_read_timeout {timeout_seconds};\n }}'''.format(\n access_log=access_log,\n error_log=error_log,\n max_upload_size=self.args.max_upload_size,\n timeout_seconds=self.args.timeout))\n\n self.sh('systemctl stop nginx', accept_failure=True)\n self.sh('systemctl disable nginx', accept_failure=True)\n\n self.create_daemon(\n program_name=self.args.nginx_name,\n command='/usr/sbin/nginx -g \"daemon off;\"',\n user='root')\n\n def _setup_gunicorn(self):\n gunicorn_script = (\n '\"{venv}/bin/gunicorn\" '\n '--bind=\"unix:{socket}\" '\n '--timeout={timeout} '\n '--workers={workers} '\n '--log-level={loglevel} '\n 'opwen_email_client.webapp:app'.format(\n venv=self.venv_path,\n socket=self.socket_path,\n timeout=self.args.timeout,\n workers=self.args.num_gunicorn_workers,\n loglevel=self.args.log_level))\n\n self.create_daemon(\n program_name=self.args.server_name,\n command=gunicorn_script,\n env={'OPWEN_SETTINGS': self.settings_path})\n\n def _setup_celery(self):\n celery_command = (\n '\"{venv}/bin/celery\" '\n '--app=opwen_email_client.webapp.tasks '\n 'worker '\n '--loglevel={loglevel} '\n '--concurrency={workers}'.format(\n venv=self.venv_path,\n loglevel=self.args.log_level,\n workers=self.args.num_celery_workers))\n\n self.create_daemon(\n program_name=self.args.worker_name,\n command=celery_command,\n env={'OPWEN_SETTINGS': self.settings_path})\n\n def _setup_cron(self):\n celery_command = (\n '\"{venv}/bin/celery\" '\n '--app=opwen_email_client.webapp.tasks '\n 'beat '\n '--pidfile=\"{cronstate_pid}\" '\n '--loglevel={loglevel} '.format(\n settings=self.settings_path,\n cronstate_pid=self.cronstate_pid,\n venv=self.venv_path,\n loglevel=self.args.log_level))\n\n self.create_daemon(\n program_name=self.args.cron_name,\n command=celery_command,\n env={'OPWEN_SETTINGS': self.settings_path})\n\n def _setup_restarter(self):\n restarter_command = (\n 'export FLASK_APP=\"opwen_email_client.webapp:app\" && \"{venv}/bin/flask\" '\n 'manage restarter '\n '--directory=\"{directory}\"'.format(\n venv=self.venv_path,\n directory=self.abspath(self.restarter_directory)))\n\n self.create_daemon(\n program_name=self.args.restarter_name,\n command=restarter_command,\n user='root')\n\n def _reboot(self):\n LOG.info('All done. Lokole client %s is ready to be used.', self.args.client_name)\n\n if self.args.reboot == 'yes':\n LOG.info('System is rebooting.')\n self.sh('shutdown --reboot now', user='root')\n\n def _pip_install(self, *packages):\n self.sh('\"{pip}\" install --no-cache-dir --upgrade {packages}'.format(\n pip='{}/bin/pip'.format(self.venv_path),\n packages=' '.join(packages)),\n retry_attempts=60,\n retry_interval=5,\n user=self.user)\n\n @property\n def webapp_files_root(self):\n return (Path(self.venv_path) /\n 'lib' /\n 'python{}.{}'.format(version_info.major, version_info.minor) /\n 'site-packages' /\n 'opwen_email_client' /\n 'webapp')\n\n @property\n def socket_path(self):\n return self.abspath(Path(self.args.state_directory)\n / '{}.sock'.format(self.args.server_name))\n\n @property\n def settings_path(self):\n return self.abspath(Path(self.args.state_directory)\n / 'settings.env')\n\n @property\n def cronstate_pid(self):\n return self.abspath(Path(self.args.state_directory)\n / '{}.pid'.format(self.args.cron_name))\n\n @property\n def restarter_directory(self):\n return Path(self.args.state_directory) / self.args.restarter_name\n\n @property\n def venv_path(self):\n return self.abspath(Path(self.args.venv_directory))\n\n\ndef generate_secret(length, chars=frozenset(ascii_letters + digits)):\n secret = '' # nosec\n\n while len(secret) < length:\n for char in urandom(length).decode('ascii', errors='ignore'):\n if char in chars:\n secret += char\n\n return secret[:length]\n\n\ndef _dump_state(args):\n with Path(__file__).open('r', encoding='utf-8') as fobj:\n version = hash(fobj.read())\n\n state_path = TEMP_ROOT / 'state_{:.0f}.json'.format(time())\n\n with state_path.open('w', encoding='utf-8') as fobj:\n fobj.write(dumps({\n 'version': version,\n 'args': args.__dict__,\n }))\n\n\ndef main(args, abort):\n LOG.setLevel(args.script_log_level)\n LOG.addHandler(StreamHandler())\n\n _dump_state(args)\n\n app_config = {}\n\n system_setup = SystemSetup(args, abort)\n system_setup()\n\n wifi_setup = WifiSetup(args, abort)\n wifi_setup()\n\n modem_setup = ModemSetup(args, abort)\n app_config.update(modem_setup() or {})\n\n client_setup = ClientSetup(args, abort)\n app_config.update(client_setup() or {})\n\n webapp_setup = WebappSetup(args, abort, app_config)\n webapp_setup()\n\n\ndef cli():\n parser = ArgumentParser(description=__doc__)\n\n parser.add_argument('client_name', type=str.lower, help=(\n 'The name that should be assigned to the Lokole device '\n 'that is being configured by this script. Usually this '\n 'will be a name that is descriptive for the location '\n 'where the device will be deployed. The client name '\n 'should be globally unique as it is used as the key for '\n 'a bunch of things.'\n ))\n parser.add_argument('sim_type', choices=SIM_TYPES, help=(\n 'The mobile network to which to connect to upload data.'\n ))\n parser.add_argument('sync_schedule', nargs='?', help=(\n 'How often the Lokole should sync with the server. '\n 'In cron syntax. '\n 'Example: \"34 * * * *\" for once per hour at the 34th minute.'\n ))\n parser.add_argument('registration_credentials', nargs='?', help=(\n 'Github access token for registering with the Lokole server.'\n ))\n parser.add_argument('--app_root', default=getenv('OPWEN_APP_ROOT', ''), help=(\n 'The URL prefix at which the app will be accessible.'\n ))\n parser.add_argument('--admin', default=getenv('LOKOLE_ADMIN', 'yes'), help=(\n 'If set to \"no\", skip creation of application admin user.'\n ))\n parser.add_argument('--admin_name', default=getenv('LOKOLE_ADMIN_NAME', 'admin'), help=(\n 'If set, create an admin user with this account name.'\n ))\n parser.add_argument('--admin_password', default=getenv('LOKOLE_ADMIN_PASSWORD', 'lokole1Admin'), help=(\n 'If set, create an admin user with this password.'\n ))\n parser.add_argument('--password', default=getenv('LOKOLE_PASSWORD', ''), help=(\n 'If set to a non-empty string, updates the password of '\n 'the current user to this value as part of the setup. '\n 'Useful for fully automated setups of new devices that '\n 'come with a default insecure password.'\n ))\n parser.add_argument('--system_setup', default=getenv('LOKOLE_SYSTEM_SETUP', 'yes'), help=(\n 'If set to \"no\", skip system setup.'\n ))\n parser.add_argument('--reboot', default=getenv('LOKOLE_REBOOT', 'yes'), help=(\n 'If set to \"no\", skip system reboot after setup.'\n ))\n parser.add_argument('--wifi', default=getenv('LOKOLE_WIFI', 'yes'), help=(\n 'If set to \"no\", skip setup of WiFi access point and '\n 'local DNS server configuration.'\n ))\n parser.add_argument('--wifi_name', default=getenv('LOKOLE_NETWORK_NAME', 'Lokole'), help=(\n 'The name of the WiFi network to create for the Lokole email app.'\n ))\n parser.add_argument('--wifi_password', default=getenv('LOKOLE_NETWORK_PASSWORD', 'Ascoderu'), help=(\n 'The password of the WiFi network to create for the Lokole email app.'\n ))\n parser.add_argument('--script_log_level', default=getenv('LOKOLE_SCRIPT_LOG_LEVEL', 'INFO'), help=(\n 'The logging verbosity of this script.'\n ))\n parser.add_argument('--server_host', default=getenv('LOKOLE_SERVER_HOST', 'mailserver.lokole.ca'), help=(\n 'The host of the email sync server to use.'\n ))\n parser.add_argument('--client_domain', default=getenv('LOKOLE_CLIENT_DOMAIN', 'lokole.ca'), help=(\n 'The root domain for which to set up the Lokole email app.'\n ))\n parser.add_argument('--client_version', default=getenv('LOKOLE_CLIENT_VERSION', ''), help=(\n 'The version of the Lokole email app to install.'\n ))\n parser.add_argument('--client_dist', default=getenv('LOKOLE_CLIENT_DIST', ''), help=(\n 'The dist package of the Lokole email app to install.'\n ))\n parser.add_argument('--port', default=getenv('LOKOLE_PORT', '80'), help=(\n 'The port on which to run the Lokole email app.'\n ))\n parser.add_argument('--state_directory', default=getenv('LOKOLE_STATE_DIRECTORY', 'lokole/state'), help=(\n 'The location where to store the Lokole email app state.'\n ))\n parser.add_argument('--log_directory', default=getenv('LOKOLE_LOG_DIRECTORY', 'lokole/logs'), help=(\n 'The location where to store the Lokole email app logs.'\n ))\n parser.add_argument('--venv_directory', default=getenv('LOKOLE_VENV_DIRECTORY', 'lokole/venv'), help=(\n 'The location where to store the Lokole email app Python environment.'\n ))\n parser.add_argument('--server_name', default=getenv('LOKOLE_SERVER_NAME', 'lokole_gunicorn'), help=(\n 'Name of the Lokole webapp server.'\n ))\n parser.add_argument('--nginx_name', default=getenv('LOKOLE_NGINX_NAME', 'lokole_nginx'), help=(\n 'Name of the Nginx service.'\n ))\n parser.add_argument('--worker_name', default=getenv('LOKOLE_WORKER_NAME', 'lokole_celery_worker'), help=(\n 'Name of the Lokole webapp worker.'\n ))\n parser.add_argument('--cron_name', default=getenv('LOKOLE_CRON_NAME', 'lokole_celery_beat'), help=(\n 'Name of the Lokole cron worker.'\n ))\n parser.add_argument('--restarter_name', default=getenv('LOKOLE_RESTARTER_NAME', 'lokole_restarter'), help=(\n 'Name of the Lokole restarter.'\n ))\n parser.add_argument('--log_level', default=getenv('LOKOLE_LOG_LEVEL', 'error'), help=(\n 'The log level for the Lokole email app.'\n ))\n parser.add_argument('--timeout', type=int, default=300, help=(\n 'Timeout for the Lokole email app. In seconds.'\n ))\n parser.add_argument('--max_upload_size', type=int, default=10, help=(\n 'Maximum allowed size of uploads to the Lokole email app. In MB.'\n ))\n parser.add_argument('--num_celery_workers', type=int, default=2, help=(\n 'Number of celery workers for the Lokole email app.'\n ))\n parser.add_argument('--num_gunicorn_workers', type=int, default=max(2, cpu_count() - 1), help=(\n 'Number of gunicorn workers for the Lokole email app.'\n ))\n parser.add_argument('--locale', default=getenv('LOKOLE_LOCALE', 'en_GB.UTF-8'), help=(\n 'Locale to set up on the system.'\n ))\n parser.add_argument('--timezone', default=getenv('LOKOLE_TIMEZONE', 'Etc/UTC'), help=(\n 'Timezone to set up on the system.'\n ))\n\n main(parser.parse_args(), parser.error)\n\n\nif __name__ == '__main__':\n cli()\n","repo_name":"ascoderu/lokole","sub_path":"install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":32122,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"2"}
+{"seq_id":"44298672989","text":"import os, time\nimport argparse\nimport json, jsonlines\nfrom tqdm import tqdm\nimport logging\n\n# from transformers import AutoTokenizer, AutoModelForCausalLM\n# import transformers\nimport torch\nfrom vllm import LLM, SamplingParams\n\nfrom data import load_dataset\nfrom build_prompt import build_demo, build_prompt\n\n\n# Configure the logger\nlogging.basicConfig(\n level=logging.INFO, \n format='%(asctime)s - %(name)s:%(lineno)s - %(levelname)s - %(message)s',\n # filename='app.log', # Uncomment this if you want to log to a file\n # filemode='w', # Overwrites the log file every time\n) \n\nlogger = logging.getLogger(__name__)\n\n\ndef main(args):\n\n # Load Data\n dataset_name = args.input_path.split(\"/\")[-2]\n input_data = load_dataset(args.input_path, dataset_name=dataset_name)\n\n if args.debug:\n logger.info(\"Debug mode. Only process the first two examples. \")\n input_data = input_data[:2]\n else:\n if args.max_samples > 0:\n logger.info(f'\"--max_samples\" is set. Only process the first {args.max_samples} examples. ')\n input_data = input_data[:args.max_samples]\n else:\n logger.info(f\"Process all {len(input_data)} examples. \")\n \n sampling_params = SamplingParams(\n temperature=0.0,\n top_p=1.0,\n max_tokens=args.max_tokens,\n stop=\"Problem No.{}\".format(len(args.demo_indices)+2),\n # n=5,\n # use_beam_search=True,\n )\n llm = LLM(model=args.model_name_or_path, tensor_parallel_size=args.num_gpus, max_num_batched_tokens=args.max_tokens_total)\n \n # \n output_path = args.output_path\n \n output_path = output_path.rstrip(\".json\")\n output_path = output_path + \".demo-{}.json\".format(\"_\".join([str(_) for _ in args.demo_indices]))\n\n all_responses = []\n if os.path.exists(output_path) and not args.debug and not args.overwrite_output:\n with open(output_path, \"r\") as f:\n all_responses = [json.loads(line) for line in f.readlines() if line.strip()]\n logger.info(f\"Continue from {len(all_responses)}-th example.\")\n logger.info(f\"Number of examples to be processed: {len(input_data)-len(all_responses)}\")\n else:\n logger.info(f\"Start from the beginning.\")\n os.makedirs(os.path.dirname(output_path), exist_ok=True)\n \n prompt_demo = build_demo(\n demo_indices=args.demo_indices,\n demonstration_path=args.demonstration_path,\n dataset_name=dataset_name,\n mode=args.mode,\n )\n\n if args.mode == \"cot\":\n system_message = \"You are a helpful, pattern-following assistant that helps people solve problems. \"\n elif args.mode == \"prolog\":\n system_message = \"You are a helpful, pattern-following assistant that helps people solve problems using Prolog. \"\n \n prompt_list = []\n for example in tqdm(input_data[len(all_responses):], total=len(input_data[len(all_responses):])):\n prompt = build_prompt(\n example=example,\n prompt_demo=prompt_demo,\n dataset_name=dataset_name,\n mode=args.mode,\n instruct=args.instruct,\n )\n # import pdb; pdb.set_trace()\n # prompt = f\"[INST] <>\\\\n{system_message}\\\\n<>\\\\n\\\\n{prompt}[/INST]\"\n prompt_list.append(prompt)\n\n if args.debug:\n # for i, _ in enumerate(all_responses):\n # logger.info(f\"****** Input-{i+1} ****** \\n\")\n # print(json.dumps(input_data[i]))\n # logger.info(f\"***** Response-{i+1} ***** \\n\")\n # print(json.dumps(_))\n pass\n else:\n\n outputs = llm.generate(prompt_list, sampling_params)\n # import pdb; pdb.set_trace()\n\n # Print the outputs.\n # for output in outputs:\n # prompt = output.prompt\n # generated_text = output.outputs[0].text\n # print(f\"Prompt: {prompt!r}, Generated text: {generated_text!r}\")\n all_responses = [\n {\n \"prompt\": output.prompt,\n \"response\": [_.text for _ in output.outputs]\n }\n for output in outputs\n ]\n\n with open(output_path, \"w\") as f:\n f.writelines([json.dumps(_)+\"\\n\" for _ in all_responses])\n logger.info(\"*\"*20)\n logger.info(f\"Finished. Output saved to {output_path}. \")\n logger.info(\"*\"*20)\n \n \nif __name__ == \"__main__\":\n argparser = argparse.ArgumentParser()\n argparser.add_argument('--input_path', type=str, required=True)\n argparser.add_argument('--demonstration_path', type=str, required=True)\n argparser.add_argument('--output_path', type=str, required=True)\n argparser.add_argument('--model_name_or_path', type=str, required=True)\n argparser.add_argument('--debug', action='store_true')\n argparser.add_argument('--demo_indices', nargs='+', type=int, required=True)\n argparser.add_argument('--max_tokens_total', type=int, default=16384, help=\"Max number of total tokens (prompt + generated). Default is Code-LLaMA max-len = 16384 \")\n argparser.add_argument('--max_tokens', type=int, default=2048, help=\"Max number of generated tokens. Default is 2048. \")\n argparser.add_argument('--num_gpus', type=int, default=1, help=\"Number of GPUs to use. \")\n argparser.add_argument('--self_debug', action='store_true', help=\"Adopt self-debugging mode, which execute the GPT's response and prompt GPT with error messages if the execution fails. \")\n argparser.add_argument('--self_debug_limit', type=int, default=3, help=\"The maximum number of self-debugging trials. Default is 3. \")\n argparser.add_argument('--max_samples', type=int, default=-1, help=\"The maximum number of samples to be processed. Default is -1. \")\n argparser.add_argument('--mode', type=str, required=True, choices=['cot', 'prolog', 'direct'])\n argparser.add_argument('--overwrite_output', action='store_true', help=\"Whether to overwrite the output file, or to read the output file and continue from the break point. \")\n argparser.add_argument('--instruct', action='store_true', help=\"Whether to use instruction-style prompt. If False, use ICL-style prompt. \")\n args = argparser.parse_args()\n\n if args.debug:\n logger.info(\"Debug mode. \")\n main(args)","repo_name":"DAMO-NLP-SG/CaRing","sub_path":"src/run_generation.py","file_name":"run_generation.py","file_ext":"py","file_size_in_byte":6246,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"2"}
+{"seq_id":"22394231640","text":"# coding=utf-8\r\nimport sys\r\nimport requests\r\nimport re\r\nfrom bs4 import BeautifulSoup\r\n\r\nreload(sys)\r\nsys.setdefaultencoding('utf-8')\r\n\r\n'''\r\n由于引用了第三方模块,请先 pip install beautifulsoup4\r\n'''\r\n'''\r\n函数说明:月度综合指数表\r\n参数说明:page 总共爬取页数 默认为10页\r\n数据较多,需要运行一小段时间,请耐心等候\r\n'''\r\n\r\ndef getCompositeIndexData(page=10):\r\n fileName = '../data/compositeindex/compositedata.csv'\r\n with open(fileName, 'w+') as f:\r\n for i in range(1, int(page)+1):\r\n url = \"http://sspi.csi.com.cn/ydzhzsb_\" + str(i) + \".html\"\r\n r = requests.get(url)\r\n html = r.text\r\n soup = BeautifulSoup(html, 'html.parser')\r\n table = soup.table\r\n if i == 1:\r\n thead = table.thead\r\n trArray = thead.find_all('tr')\r\n td1Array = trArray[0].find_all('td')\r\n headtime = td1Array[0].text.strip()\r\n # headqishu = td1Array[1].text.strip()\r\n headzonghe = td1Array[2].text.strip()\r\n # headgansa = td1Array[3].text.strip()\r\n # headyouchuan = td1Array[4].text.strip()\r\n # headhuochuan = td1Array[5].text.strip()\r\n # 文件第一行\r\n title = headtime + \",\" + headzonghe\r\n # print(title)\r\n f.write(title)\r\n f.write('\\n')\r\n\r\n tbody = table.tbody\r\n for tr in tbody.find_all('tr'):\r\n tdarr = tr.find_all('td')\r\n # print(len(tdarr))\r\n # print(tdarr)\r\n time = \"\"\r\n zhonghe = \"\"\r\n for i in range(len(tdarr) - 1):\r\n if i == 0:\r\n time = tdarr[i].text\r\n elif i==2:\r\n zhonghe = tdarr[i].text\r\n break\r\n # content = content + tdarr[i].text + \",\"\r\n # content = content[:-1]\r\n content = time + \",\" + zhonghe\r\n if len(content) != 1:\r\n f.write(content)\r\n f.write('\\n')\r\n\r\nif __name__ == '__main__':\r\n getCompositeIndexData(8)\r\n","repo_name":"neil-yc/crawler","sub_path":"datacrawler/src/compositeIndexCrawler.py","file_name":"compositeIndexCrawler.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"23955722750","text":"from cStringIO import StringIO\nfrom gettext import gettext as _\nimport copy\nimport httplib\nimport json\nimport logging\nimport os\nimport re\nimport traceback\nimport urlparse\n\nfrom nectar.downloaders.threaded import HTTPThreadedDownloader\nfrom nectar.listener import AggregatingEventListener\nfrom nectar.report import DownloadReport\nfrom nectar.request import DownloadRequest\nfrom pulp.server import exceptions as pulp_exceptions\n\nfrom pulp.plugins.util import misc\n\nfrom pulp_docker.common import constants, error_codes\nfrom pulp_docker.plugins import models\nfrom pulp_docker.plugins import auth_util\n\n\n_logger = logging.getLogger(__name__)\n\n\nclass V1Repository(object):\n \"\"\"\n This class represents a Docker v1 repository.\n \"\"\"\n ANCESTRY_PATH = '/v1/images/%s/ancestry'\n DOCKER_TOKEN_HEADER = 'x-docker-token'\n DOCKER_ENDPOINT_HEADER = 'x-docker-endpoints'\n IMAGES_PATH = '/v1/repositories/%s/images'\n TAGS_PATH = '/v1/repositories/%s/tags'\n API_VERSION_CHECK_PATH = '/v1/_ping'\n\n def __init__(self, name, download_config, registry_url, working_dir):\n \"\"\"\n Initialize the V1Repository.\n\n :param name: name of a docker repository\n :type name: basestring\n :param download_config: download configuration object\n :type download_config: nectar.config.DownloaderConfig\n :param registry_url: URL for the docker registry\n :type registry_url: basestring\n :param working_dir: full path to the directory where files should\n be saved\n :type working_dir: basestring\n \"\"\"\n self.name = name\n self.download_config = download_config\n self.registry_url = registry_url\n self.listener = AggregatingEventListener()\n self.downloader = HTTPThreadedDownloader(self.download_config, self.listener)\n self.working_dir = working_dir\n self.token = None\n self.endpoint = None\n\n def _get_single_path(self, path):\n \"\"\"\n Retrieve a single path within the upstream registry, and return its\n body after deserializing it as json\n\n :param path: a full http path to retrieve that will be urljoin'd to the\n upstream registry url.\n :type path: basestring\n\n :return: whatever gets deserialized out of the response body's json\n \"\"\"\n # if talking to docker hub, we'll get an endpoint specified, and then we'll have to get\n # tags from that endpoint instead of talking to the original feed URL.\n if self.endpoint:\n # we assume the same scheme that the registry URL used\n registry_url_parts = urlparse.urlsplit(self.registry_url)\n parts = urlparse.SplitResult(scheme=registry_url_parts.scheme, netloc=self.endpoint,\n path=path, query=None, fragment=None)\n url = urlparse.urlunsplit(parts)\n else:\n url = urlparse.urljoin(self.registry_url, path)\n request = DownloadRequest(url, StringIO())\n if path.endswith('/images'):\n # this is required by the docker index and indicates that it should\n # return an auth token\n if request.headers is None:\n request.headers = {}\n request.headers[self.DOCKER_TOKEN_HEADER] = 'true'\n # endpoints require auth\n if self.endpoint:\n self.add_auth_header(request)\n\n report = self.downloader.download_one(request)\n if report.state == report.DOWNLOAD_FAILED:\n raise IOError(report.error_msg)\n\n self._parse_response_headers(report.headers)\n return json.loads(report.destination.getvalue())\n\n def _parse_response_headers(self, headers):\n \"\"\"\n Some responses can include header information that we need later. This\n grabs those values and stores them for later use.\n\n :param headers: dictionary-like object where keys are HTTP header names\n and values are their values.\n :type headers: dict\n \"\"\"\n # this is used for authorization on an endpoint\n if self.DOCKER_TOKEN_HEADER in headers:\n self.token = headers[self.DOCKER_TOKEN_HEADER]\n # this tells us what host to use when accessing image files\n if self.DOCKER_ENDPOINT_HEADER in headers:\n self.endpoint = headers[self.DOCKER_ENDPOINT_HEADER]\n\n def api_version_check(self):\n \"\"\"\n Make a call to the registry URL's /v1/_ping API call to determine if the registry supports\n API v1.\n\n :return: True if the v1 API is found, else False\n :rtype: bool\n \"\"\"\n _logger.debug('Determining if the registry URL can do v1 of the Docker API.')\n\n try:\n self._get_single_path(self.API_VERSION_CHECK_PATH)\n except IOError:\n return False\n\n return True\n\n def add_auth_header(self, request):\n \"\"\"\n Given a download request, add an Authorization header if we have an\n auth token available.\n\n :param request: a download request\n :type request: nectar.request.DownloadRequest\n \"\"\"\n if self.token:\n if request.headers is None:\n request.headers = {}\n # this emulates what docker itself does\n request.headers['Authorization'] = 'Token %s' % self.token\n\n def get_image_ids(self):\n \"\"\"\n Get a list of all images in the upstream repository. This is\n conceptually a little ambiguous, as there can be images in a repo that\n are neither tagged nor in the ancestry for a tagged image.\n\n :return: list of image IDs in the repo\n :rtype: list\n\n :raises pulp_exceptions.PulpCodedException: if fetching the IDs fails\n \"\"\"\n path = self.IMAGES_PATH % self.name\n\n _logger.debug('retrieving image ids from remote registry')\n try:\n raw_data = self._get_single_path(path)\n except IOError as e:\n _logger.debug(traceback.format_exc())\n raise pulp_exceptions.PulpCodedException(error_code=error_codes.DKR1007,\n repo=self.name,\n registry=self.registry_url,\n reason=str(e))\n\n return [item['id'] for item in raw_data]\n\n def get_image_url(self):\n \"\"\"\n Get a URL for the registry or the endpoint, for use in retrieving image\n files. The \"endpoint\" is a host name that might be returned in a header\n when retrieving repository data above.\n\n :return: a url that is either the provided registry url, or if an\n endpoint is known, that same url with the host replaced by\n the endpoint\n :rtype: basestring\n \"\"\"\n if self.endpoint:\n parts = list(urlparse.urlsplit(self.registry_url))\n parts[1] = self.endpoint\n return urlparse.urlunsplit(parts)\n else:\n return self.registry_url\n\n def get_tags(self):\n \"\"\"\n Get a dictionary of tags from the upstream repo.\n\n :return: a dictionary where keys are tag names, and values are either\n full image IDs or abbreviated image IDs.\n :rtype: dict\n \"\"\"\n repo_name = self.name\n # this is a quirk of the docker registry API.\n if '/' not in repo_name:\n repo_name = 'library/' + repo_name\n\n path = self.TAGS_PATH % repo_name\n\n _logger.debug('retrieving tags from remote registry')\n raw_data = self._get_single_path(path)\n # raw_data will sometimes be a list of dicts, and sometimes just a dict,\n # depending on what version of the API we're talking to.\n if isinstance(raw_data, list):\n return dict((tag['name'], tag['layer']) for tag in raw_data)\n return raw_data\n\n def get_ancestry(self, image_ids):\n \"\"\"\n Retrieve the \"ancestry\" file for each provided image ID, and save each\n in a directory whose name is the image ID.\n\n :param image_ids: list of image IDs for which the ancestry file\n should be retrieved\n :type image_ids: list\n\n :raises IOError: if a download fails\n \"\"\"\n requests = []\n for image_id in image_ids:\n path = self.ANCESTRY_PATH % image_id\n url = urlparse.urljoin(self.get_image_url(), path)\n destination = os.path.join(self.working_dir, image_id, 'ancestry')\n misc.mkdir(os.path.split(destination)[0])\n\n request = DownloadRequest(url, destination)\n self.add_auth_header(request)\n requests.append(request)\n\n _logger.debug('retrieving ancestry files from remote registry')\n self.downloader.download(requests)\n if len(self.listener.failed_reports):\n raise IOError(self.listener.failed_reports[0].error_msg)\n\n def create_download_request(self, image_id, file_name, destination_dir):\n \"\"\"\n Return a DownloadRequest instance for the given file name and image ID.\n It is desirable to download the actual layer files with a separate\n downloader (for progress tracking, etc), so we just create the download\n requests here and let them get processed elsewhere.\n\n This adds the Authorization header if a token is known for this\n repository.\n\n :param image_id: unique ID of a docker image\n :type image_id: basestring\n :param file_name: name of the file, one of \"ancestry\", \"json\",\n or \"layer\"\n :type file_name: basestring\n :param destination_dir: full path to the directory where file should\n be saved\n :type destination_dir: basestring\n\n :return: a download request instance\n :rtype: nectar.request.DownloadRequest\n \"\"\"\n url = self.get_image_url()\n req = DownloadRequest(urlparse.urljoin(url, '/v1/images/%s/%s' % (image_id, file_name)),\n os.path.join(destination_dir, file_name))\n self.add_auth_header(req)\n return req\n\n\nclass V2Repository(object):\n \"\"\"\n This class represents a Docker v2 repository.\n \"\"\"\n API_VERSION_CHECK_PATH = '/v2/'\n LAYER_PATH = '/v2/{name}/blobs/{digest}'\n MANIFEST_PATH = '/v2/{name}/manifests/{reference}'\n TAGS_PATH = '/v2/{name}/tags/list'\n\n def __init__(self, name, download_config, registry_url, working_dir):\n \"\"\"\n Initialize the V2Repository.\n\n :param name: name of a docker repository\n :type name: basestring\n :param download_config: download configuration object\n :type download_config: nectar.config.DownloaderConfig\n :param registry_url: URL for the docker registry\n :type registry_url: basestring\n :param working_dir: full path to the directory where files should\n be saved\n :type working_dir: basestring\n \"\"\"\n\n # Docker's registry aligns non-namespaced images to the library namespace.\n # if we have a docker registry image, and no namespace, add the library\n # namespace to the image name.\n\n if '/' not in name and re.search(r'registry[-,\\w]*.docker.io', registry_url, re.IGNORECASE):\n self.name = \"library/\" + name\n else:\n self.name = name\n\n self.download_config = download_config\n self.registry_url = registry_url\n\n # Use basic auth information for retrieving tokens from auth server and for downloading\n # with basic auth\n self.auth_downloader = HTTPThreadedDownloader(copy.deepcopy(self.download_config),\n AggregatingEventListener())\n self.download_config.basic_auth_username = None\n self.download_config.basic_auth_password = None\n self.downloader = HTTPThreadedDownloader(self.download_config, AggregatingEventListener())\n self.working_dir = working_dir\n self.token = None\n\n def api_version_check(self):\n \"\"\"\n Make a call to the registry URL's /v2/ API call to determine if the registry supports API\n v2.\n\n :return: True if the v2 API is found, else False\n :rtype: bool\n \"\"\"\n _logger.debug('Determining if the registry URL can do v2 of the Docker API.')\n\n try:\n headers, body = self._get_path(self.API_VERSION_CHECK_PATH)\n except IOError:\n return False\n\n try:\n version = headers['Docker-Distribution-API-Version']\n if version != \"registry/2.0\":\n return False\n _logger.debug(_('The docker registry is using API version: %(v)s') % {'v': version})\n except KeyError:\n # If the Docker-Distribution-API-Version header isn't present, we will assume that this\n # is a valid Docker 2.0 API server so that simple file-based webservers can serve as our\n # remote feed.\n pass\n\n return True\n\n def create_blob_download_request(self, digest):\n \"\"\"\n Return a DownloadRequest instance for the given blob digest.\n It is desirable to download the blob files with a separate\n downloader (for progress tracking, etc), so we just create the download\n requests here and let them get processed elsewhere.\n\n :param digest: digest of the docker blob you wish to download\n :type digest: basestring\n\n :return: a download request instance\n :rtype: nectar.request.DownloadRequest\n \"\"\"\n path = self.LAYER_PATH.format(name=self.name, digest=digest)\n url = urlparse.urljoin(self.registry_url, path)\n req = DownloadRequest(url, os.path.join(self.working_dir, digest))\n return req\n\n def get_manifest(self, reference, headers=True, tag=True):\n \"\"\"\n Get the manifest and its digest for the given reference.\n\n :param reference: The reference (tag or digest) of the Manifest you wish to retrieve.\n :type reference: basestring\n :param headers: True if headers with accepted media type should be sent in the request\n :type headers: bool\n :param tag: True if the manifest should be retrieved by tag\n :type tag: bool\n\n :return: A 2-tuple of the digest and the manifest, both basestrings\n :rtype: tuple\n \"\"\"\n manifests = []\n request_headers = {}\n content_type_header = 'content-type'\n path = self.MANIFEST_PATH.format(name=self.name, reference=reference)\n # we need to skip the check of returned mediatype in case we pull\n # the manifest by digest\n if headers:\n # set the headers for first request\n request_headers['Accept'] = ','.join((constants.MEDIATYPE_MANIFEST_S2,\n constants.MEDIATYPE_MANIFEST_LIST,\n constants.MEDIATYPE_MANIFEST_S1,\n constants.MEDIATYPE_SIGNED_MANIFEST_S1))\n response_headers, manifest = self._get_path(path, headers=request_headers)\n # we need to disable here the digest check because of wrong digests registry returns\n # https://github.com/docker/distribution/pull/2310\n # we will just calculate it without camparing it to the value that registry has in the\n # docker-content-digest response header\n digest = models.UnitMixin.calculate_digest(manifest)\n # add manifest and digest\n manifests.append((manifest, digest, response_headers.get(content_type_header)))\n\n # since in accept headers we have man_list and schema2 mediatype, registry would return\n # whether man list, schema2 or schema1.\n # if it is schema1 we do not need to make any other requests\n # if it is manifest list, we do not need to make any other requests, the converted type\n # for older clients will be requested later during the manifest list process time\n # if it is schema2 we need to ask schema1 for older clients.\n if tag and response_headers.get(content_type_header) == constants.MEDIATYPE_MANIFEST_S2:\n request_headers['Accept'] = ','.join((constants.MEDIATYPE_MANIFEST_S1,\n constants.MEDIATYPE_SIGNED_MANIFEST_S1))\n try:\n # for compatibility with older clients, try to fetch schema1 in case it is available\n response_headers, manifest = self._get_path(path, headers=request_headers)\n digest = self._digest_check(response_headers, manifest)\n\n # add manifest and digest\n manifests.append((manifest, digest, response_headers.get(content_type_header)))\n except IOError as e:\n if '404 Client Error' not in str(e):\n raise\n pass\n\n # returned list will be whether:\n # [(S2, digest, content_type), (S1, digest, content_type)]\n # or\n # [(list, digest, content_type)]\n # or\n # [(S1, digest, content_type)]\n # [(S2, digest, content_type)]\n # note the tuple has a new entry content_type which we need later to process\n # returned manifest mediatypes\n return manifests\n\n def _digest_check(self, headers, manifest):\n\n digest_header = 'docker-content-digest'\n if digest_header in headers:\n expected_digest = headers[digest_header]\n # The digest is formatted as algorithm:sum, so let's ask our hasher to use the same\n # algorithm as we received in the headers.\n digest = models.Manifest.calculate_digest(manifest, expected_digest.split(':')[0])\n if digest != expected_digest:\n msg = _('The Manifest digest does not match the expected value. The remote '\n 'feed announced a digest of {e}, but the downloaded digest was {d}.')\n msg = msg.format(e=expected_digest, d=digest)\n raise IOError(msg)\n else:\n digest = models.Manifest.calculate_digest(manifest)\n\n return digest\n\n def get_tags(self):\n \"\"\"\n Get a list of the available tags in the repository.\n\n :return: A list of basestrings of the available tags in the repository.\n :rtype: list\n \"\"\"\n path = self.TAGS_PATH.format(name=self.name)\n _logger.debug('retrieving tags from remote registry')\n try:\n headers, tags = self._get_path(path)\n except IOError as e:\n raise pulp_exceptions.PulpCodedException(error_code=error_codes.DKR1007,\n repo=self.name,\n registry=self.registry_url,\n reason=str(e))\n tag_list = json.loads(tags)['tags'] or []\n # check for the presence of the pagination link header\n link = headers.get('Link')\n while link:\n # according RFC5988 URI-reference can be relative or absolute\n _, _, path, params, query, fragm = urlparse.urlparse(link.split(';')[0].strip('>, <'))\n link = urlparse.urlunparse((None, None, path, params, query, fragm))\n headers, tags = self._get_path(link)\n tag_list.extend(json.loads(tags)['tags'])\n link = headers.get('Link')\n return tag_list\n\n def _get_path(self, path, headers=None):\n \"\"\"\n Retrieve a single path within the upstream registry, and return a 2-tuple of the headers and\n the response body.\n\n :param path: a full http path to retrieve that will be urljoin'd to the upstream registry\n url.\n :type path: basestring\n :param headers: headers sent in the request\n :type headers: dict\n\n :return: (headers, response body)\n :rtype: tuple\n \"\"\"\n url = urlparse.urljoin(self.registry_url, path)\n _logger.debug(_('Retrieving {0}'.format(url)))\n request = DownloadRequest(url, StringIO())\n request.headers = headers\n\n if self.token:\n request.headers = auth_util.update_token_auth_header(request.headers, self.token)\n\n report = self.downloader.download_one(request)\n\n # If the download was unauthorized, check report header, if basic auth is expected\n # retry with basic auth, otherwise attempt to get a token and try again\n if report.state == report.DOWNLOAD_FAILED:\n if report.error_report.get('response_code') == httplib.UNAUTHORIZED:\n auth_header = report.headers.get('www-authenticate')\n if auth_header is None:\n raise IOError(\"401 responses are expected to \"\n \"contain authentication information\")\n elif \"Basic\" in auth_header:\n _logger.debug(_('Download unauthorized, retrying with basic authentication'))\n report = self.auth_downloader.download_one(request)\n else:\n _logger.debug(_('Download unauthorized, attempting to retrieve a token.'))\n self.token = auth_util.request_token(self.auth_downloader, request,\n auth_header, self.name)\n if not isinstance(self.token, DownloadReport):\n request.headers = auth_util.update_token_auth_header(request.headers,\n self.token)\n report = self.downloader.download_one(request)\n if report.state == report.DOWNLOAD_FAILED:\n # this condition was added in case the registry would not allow to access v2 endpoint\n # but still token would be valid for other endpoints.\n # see https://pulp.plan.io/issues/2643\n if path == '/v2/' and report.error_report.get('response_code') == httplib.UNAUTHORIZED:\n pass\n else:\n self._raise_path_error(report)\n\n return report.headers, report.destination.getvalue()\n\n @staticmethod\n def _raise_path_error(report):\n \"\"\"\n Raise an exception with an appropriate error message.\n\n Specifically because docker hub responds with a 401 for repositories that don't exist, pulp\n cannot disambiguate Unauthorized vs. Not Found. This function tries to make an error message\n that is clear on that point.\n\n :param report: download report\n :type report: nectar.report.DownloadReport\n\n :raises IOError: always, with an appropriate message based on the report\n \"\"\"\n if report.error_report.get('response_code') == httplib.UNAUTHORIZED:\n # docker hub returns 401 for repos that don't exist, so we cannot disambiguate.\n raise IOError(_('401 Client Error: \\'Unauthorized or Not Found\\' for url {0}'.format(\n report.url)))\n else:\n code = report.error_report.get('response_code')\n if code >= 400 and code < 500:\n raise IOError('{0} Client Error: \\'{1}\\' for url: {2}'.format(\n code, report.error_msg, report.url))\n elif code >= 500 and code < 600:\n raise IOError('{0} Server Error: \\'{1}\\' for url: {2}'.format(\n code, report.error_msg, report.url))\n else:\n raise IOError('\\'{0}\\' for url {1}'.format(report.error_msg, report.url))\n","repo_name":"pulp/pulp_docker","sub_path":"plugins/pulp_docker/plugins/registry.py","file_name":"registry.py","file_ext":"py","file_size_in_byte":24055,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"2"}
+{"seq_id":"26393739600","text":"from numpy import full, nan\n\n\ndef apply_with_matrix(ma1, ma2, fu):\n n_ro1 = ma1.shape[0]\n\n n_ro2 = ma2.shape[0]\n\n fu_ro1_ro2 = full([n_ro1, n_ro2], nan)\n\n for ie1 in range(n_ro1):\n ro1 = ma1[ie1]\n\n for ie2 in range(n_ro2):\n fu_ro1_ro2[ie1, ie2] = fu(ro1, ma2[ie2])\n\n return fu_ro1_ro2\n","repo_name":"KwatMDPhD/CCAL.TODO","sub_path":"kwat/matrix/apply_with_matrix.py","file_name":"apply_with_matrix.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"ro","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"}
+{"seq_id":"35786375023","text":"import os\nimport torch\nfrom collections import OrderedDict\n\n\nclass ModelsFactory(object):\n def __init__(self):\n pass\n\n @staticmethod\n def get_by_name(model_name, *args, **kwargs):\n model = None\n\n if model_name == \"imitator\":\n from .imitator import Imitator\n model = Imitator(*args, **kwargs)\n\n elif model_name == \"swapper\":\n from .imitator import Swapper\n model = Swapper(*args, **kwargs)\n\n elif model_name == \"viewer\":\n from .imitator import Viewer\n model = Viewer(*args, **kwargs)\n\n else:\n raise ValueError(f\"Model {model_name} not recognized.\")\n\n print(f\"Model {model.name} was created\")\n return model\n\n\nclass BaseModel(object):\n def __init__(self, opt):\n self._name = \"BaseModel\"\n\n self._opt = opt\n self._save_dir = opt.meta_data.checkpoints_dir\n\n @property\n def name(self):\n return self._name\n\n def load_network(self, network, network_label, epoch_label, need_module=False):\n load_filename = \"net_iter_%s_id_%s.pth\" % (epoch_label, network_label)\n load_path = os.path.join(self._save_dir, load_filename)\n\n self.load_params(network, load_path, need_module)\n\n def load_params(self, network, load_path, need_module=False):\n assert os.path.exists(\n load_path), \"Weights file not found. Have you trained a model!? We are not providing one %s\" % load_path\n\n def load(model, orig_state_dict):\n state_dict = OrderedDict()\n for k, v in orig_state_dict.items():\n # remove \"module\"\n name = k[7:] if \"module\" in k else k\n state_dict[name] = v\n\n # load params\n # model.load_state_dict(state_dict)\n model.load_state_dict(state_dict, strict=False)\n\n save_data = torch.load(load_path, map_location=\"cpu\")\n if need_module:\n # network.load_state_dict(save_data)\n network.load_state_dict(save_data, strict=False)\n else:\n load(network, save_data)\n\n print(\"Loading net: %s\" % load_path)\n\n\nclass BaseRunnerModel(BaseModel):\n\n def __init__(self, opt):\n super(BaseRunnerModel, self).__init__(opt)\n\n self._name = \"BaseRunnerModel\"\n\n def source_setup(self, *args, **kwargs):\n raise NotImplementedError\n\n def swap_params(self, *args, **kwargs):\n raise NotImplementedError\n\n def make_inputs_for_tsf(self, *args, **kwargs):\n raise NotImplementedError\n\n def post_update(self, *args, **kwargs):\n raise NotImplementedError\n\n","repo_name":"iPERDance/iPERCore","sub_path":"iPERCore/models/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","stars":2412,"dataset":"github-code","pt":"2"}
+{"seq_id":"73603209646","text":"\"\"\"Learner component for CQL.\"\"\"\nimport functools\nimport time\nfrom typing import Iterator, NamedTuple, Optional\n\nimport jax\nimport jax.numpy as jnp\nimport jax.scipy as jsp\nimport numpy as np\nimport optax\nfrom acme import core\nfrom acme import types\nfrom acme.jax import networks as networks_lib\nfrom acme.utils import counting\nfrom acme.utils import loggers\n\n\nclass TrainingState(NamedTuple):\n \"\"\"Training state for CQL Learner.\"\"\"\n\n policy_params: networks_lib.Params\n critic_params: networks_lib.Params\n critic_target_params: networks_lib.Params\n policy_optimizer_state: optax.OptState\n critic_optimizer_state: optax.OptState\n alpha_optimizer_state: optax.OptState\n alpha_params: jnp.ndarray\n alpha_prime_optimizer_state: Optional[optax.OptState]\n alpha_prime_params: Optional[jnp.ndarray]\n key: networks_lib.PRNGKey\n steps: int\n\n\nclass CQLLearner(core.Learner):\n \"\"\"Conservative Q Learning (CQL) learner component.\n\n This corresponds to CQL(H) agent from [1], with importance sampling\n (min_q_version == 3) according to Appendix F in [1].\n The implementation is based on\n\n https://github.com/aviralkumar2907/CQL/blob/master/d4rl/rlkit/torch/sac/cql.py\n\n References:\n [1]: Aviral Kumar and Aurick Zhou and George Tucker and Sergey Levine,\n Conservative Q-Learning for Offline Reinforcement Learning,\n arXiv Pre-print, https://arxiv.org/abs/2006.04779\n \"\"\"\n\n def __init__(\n self,\n policy_network: networks_lib.FeedForwardNetwork,\n critic_network: networks_lib.FeedForwardNetwork,\n random_key: networks_lib.PRNGKey,\n dataset: Iterator[types.Transition],\n policy_optimizer: optax.GradientTransformation,\n critic_optimizer: optax.GradientTransformation,\n alpha_optimizer: optax.GradientTransformation,\n target_entropy: float,\n discount: float = 0.99,\n tau: float = 5e-3,\n init_alpha: float = 1.0,\n num_bc_steps: int = 0,\n softmax_temperature: float = 1.0,\n cql_alpha: float = 5.0,\n max_q_backup: bool = False,\n deterministic_backup: bool = True,\n num_cql_samples: int = 10,\n with_lagrange: bool = False,\n target_action_gap: float = 10.0,\n logger: Optional[loggers.Logger] = None,\n counter: Optional[counting.Counter] = None,\n ):\n \"\"\"Initialize the CQL Learner.\n\n Args:\n policy_network: policy network\n critic_network: critic network\n random_key: key for random number generation\n dataset: iterator for the training data\n policy_optimizer: optimizer for policy network\n critic_optimizer: optimizer for critic network\n alpha_optimizer: optimizer for SAC alpha \"temperature\"\n target_entropy: target entropy for automatic entropy tuning\n discount: discount for TD updates.\n tau: coefficient for smoothing target network update.\n init_alpha: Initial alpha.\n num_bc_steps: Number of steps to perform BC on policy update.\n softmax_temperature: temperature for the logsumexp.\n min_q_weight: the value of alpha, set to 5.0 or 10.0 if not using lagrange.\n When adaptive cql weight is used, this determines the minimum\n weight for the cql loss.\n max_q_backup: set this to true to use max_{a} backup.\n deterministic_backup: set this to true to use deterministic backup, i.e.,\n it will not backup the entropy in the Q function.\n num_cql_samples: number of random samples to use for max backup and\n importance sampling.\n with_lagrange: with to use the lagrangian formulation of CQL.\n target_action_gap: Threshold for the lagrangian.\n logger: logger object to write the metrics to.\n counter: counter used for keeping track of the number of steps.\n\n References:\n Aviral Kumar, Aurick Zhou, George Tucker, Sergey Levine,\n Conservative Q-Learning for Offline Reinforcement Learning\n https://arxiv.org/abs/2006.04779\n\n \"\"\"\n if with_lagrange:\n # For now, use the alpha optimizer hyperparams\n alpha_prime_optimizer = optax.adam(3e-4)\n else:\n alpha_prime_optimizer = None\n\n polyak_average = functools.partial(optax.incremental_update, step_size=tau)\n\n def sample_action_and_log_prob(\n policy_params: networks_lib.Params,\n key: networks_lib.PRNGKey,\n observation: networks_lib.Observation,\n sample_shape=(),\n ):\n action_dist = policy_network.apply(policy_params, observation)\n action = action_dist.sample(sample_shape, seed=key)\n log_prob = action_dist.log_prob(action)\n return action, log_prob\n\n def critic_loss_fn(\n critic_params: networks_lib.Params,\n alpha_prime_params: jnp.ndarray,\n critic_target_params: networks_lib.Params,\n policy_params: networks_lib.Params,\n key: networks_lib.PRNGKey,\n log_alpha: jnp.ndarray,\n transitions: types.Transition,\n ):\n # For CQL(H), the loss is\n # min_Q alpha' * [logsumexp(Q(s,a')) - Q(s,a)] + (Q(s, a) - Q(s', a''))^2\n # = alpha' * cql_loss + critic_loss\n # First compute the SAC critic loss\n alpha = jnp.exp(log_alpha)\n q1_pred, q2_pred = critic_network.apply(\n critic_params, transitions.observation, transitions.action\n )\n\n if not max_q_backup:\n next_action_key, key = jax.random.split(key)\n new_next_actions, next_log_pi = sample_action_and_log_prob(\n policy_params, next_action_key, transitions.next_observation\n )\n target_q1, target_q2 = critic_network.apply(\n critic_target_params,\n transitions.next_observation,\n new_next_actions,\n )\n target_q_values = jnp.minimum(target_q1, target_q2)\n if not deterministic_backup:\n target_q_values = target_q_values - alpha * next_log_pi\n else:\n next_action_key, key = jax.random.split(key)\n # TODO(yl): allow configuting number of actions\n sampled_next_actions, next_log_pi = sample_action_and_log_prob(\n policy_params,\n next_action_key,\n transitions.next_observation,\n sample_shape=(num_cql_samples,),\n )\n target_q1, target_q2 = jax.vmap(critic_network.apply, (None, None, 0))(\n critic_target_params,\n transitions.next_observation,\n sampled_next_actions,\n )\n target_q1 = jnp.max(target_q1, axis=0)\n target_q2 = jnp.max(target_q2, axis=0)\n target_q_values = jnp.min(target_q1, target_q2)\n\n q_target = (\n transitions.reward + transitions.discount * discount * target_q_values\n )\n assert len(q_target.shape) == 1\n q_target = jax.lax.stop_gradient(q_target)\n qf1_loss = jnp.mean(jnp.square(q1_pred - q_target))\n qf2_loss = jnp.mean(jnp.square(q2_pred - q_target))\n qf_loss = qf1_loss + qf2_loss\n\n # Next compute the cql_loss\n batch_size = transitions.action.shape[0]\n action_size = transitions.action.shape[-1]\n vmapped_critic_apply = jax.vmap(\n critic_network.apply, (None, None, 0), out_axes=0\n )\n # Compute the logsumexp(Q(s,a')) according to Appendix F\n # for the importance sampled version\n # Sample actions from uniform-at-random distribution\n # (N, B, A)\n uniform_key, policy_key, key = jax.random.split(key, 3)\n uniform_actions = jax.random.uniform(\n uniform_key,\n shape=(num_cql_samples, batch_size, action_size),\n dtype=transitions.action.dtype,\n maxval=1.0,\n minval=-1.0,\n )\n uniform_log_probs = jnp.log(0.5**action_size)\n # Compute the q values for the uniform actions\n # Sample actions from the policy\n q_uniform1, q_uniform2 = vmapped_critic_apply(\n critic_params, transitions.observation, uniform_actions\n )\n uniform_log_probs1 = q_uniform1 * softmax_temperature - uniform_log_probs\n uniform_log_probs2 = q_uniform2 * softmax_temperature - uniform_log_probs\n sampled_actions, sampled_actions_log_probs = sample_action_and_log_prob(\n policy_params, policy_key, transitions.observation, (num_cql_samples,)\n )\n q_estimate1, q_estimate2 = vmapped_critic_apply(\n critic_params, transitions.observation, sampled_actions\n )\n policy_log_probs1 = (\n q_estimate1 * softmax_temperature - sampled_actions_log_probs\n )\n policy_log_probs2 = (\n q_estimate2 * softmax_temperature - sampled_actions_log_probs\n )\n combined_log_probs1 = jnp.concatenate(\n [policy_log_probs1, uniform_log_probs1], axis=0\n )\n combined_log_probs2 = jnp.concatenate(\n [policy_log_probs2, uniform_log_probs2], axis=0\n )\n\n logsumexp = jsp.special.logsumexp\n logsumexp1 = (\n logsumexp(combined_log_probs1, axis=0) * 1.0 / softmax_temperature\n )\n logsumexp2 = (\n logsumexp(combined_log_probs2, axis=0) * 1.0 / softmax_temperature\n )\n cql_loss = jnp.mean((logsumexp1 - q1_pred) + (logsumexp2 - q2_pred))\n alpha_prime = jnp.clip(jnp.exp(alpha_prime_params), 0.0, 10000.0)\n metrics = {\n \"qf_loss\": qf_loss,\n \"cql_loss\": cql_loss,\n \"q1\": jnp.mean(q1_pred),\n \"q2\": jnp.mean(q2_pred),\n \"q1_uniform\": jnp.mean(q_uniform1),\n \"q2_uniform\": jnp.mean(q_uniform2),\n }\n return qf_loss + alpha_prime * cql_loss, metrics\n\n def actor_loss_fn(\n policy_params: networks_lib.Params,\n critic_params: networks_lib.Params,\n key: networks_lib.PRNGKey,\n alpha_params: jnp.ndarray,\n observation: jnp.ndarray,\n ):\n alpha = jnp.exp(alpha_params)\n action_dist = policy_network.apply(policy_params, observation)\n new_actions = action_dist.sample(seed=key)\n log_probs = action_dist.log_prob(new_actions)\n q1, q2 = critic_network.apply(critic_params, observation, new_actions)\n q_new_actions = jnp.minimum(q1, q2)\n entropy = -log_probs.mean()\n actor_loss = alpha * log_probs - q_new_actions\n return jnp.mean(actor_loss), {\"entropy\": entropy}\n\n def bc_actor_loss_fn(\n policy_params: networks_lib.Params,\n key: networks_lib.PRNGKey,\n alpha_params: jnp.ndarray,\n observations: jnp.ndarray,\n actions: jnp.ndarray,\n ):\n # This is the loss function for pre-training the policy\n action_dist = policy_network.apply(policy_params, observations)\n policy_log_prob = action_dist.log_prob(actions)\n new_actions = action_dist.sample(seed=key)\n log_pi = action_dist.log_prob(new_actions)\n policy_loss = (jnp.exp(alpha_params) * log_pi - policy_log_prob).mean()\n return policy_loss, {\"entropy\": -log_pi.mean()}\n\n def alpha_loss_fn(alpha_params: jnp.ndarray, entropy: jnp.ndarray):\n # Use log_alpha here for numerical stability\n return alpha_params * (entropy - target_entropy)\n\n def alpha_prime_loss_fn(alpha_prime_params: jnp.ndarray, cql_loss: jnp.ndarray):\n # -alpha' * (cql_q1_loss - tau) + alpha' * (cql_q2_loss - tau)\n # -alpha' * (cql_q1_loss + cql_q2_loss - 2 * tau)\n # -alpha' * (cql_loss - 2 * tau)\n alpha_prime = jnp.clip(jnp.exp(alpha_prime_params), 0.0, 10000.0)\n return -alpha_prime * (cql_loss - 2 * target_action_gap)\n\n bc_policy_grad_fn = jax.value_and_grad(bc_actor_loss_fn, has_aux=True)\n policy_grad_fn = jax.value_and_grad(actor_loss_fn, has_aux=True)\n\n @jax.jit\n def sgd_step(state: TrainingState, transitions: types.Transition):\n metrics = {}\n # Update critic\n critic_key, actor_key, key = jax.random.split(state.key, 3)\n (critic_loss, critic_metrics), critic_grads = jax.value_and_grad(\n critic_loss_fn, has_aux=True\n )(\n state.critic_params,\n state.alpha_prime_params,\n state.critic_target_params,\n state.policy_params,\n critic_key,\n state.alpha_params,\n transitions,\n )\n metrics.update({\"critic_loss\": critic_loss, **critic_metrics})\n critic_updates, critic_optimizer_state = critic_optimizer.update(\n critic_grads, state.critic_optimizer_state\n )\n critic_params = optax.apply_updates(state.critic_params, critic_updates)\n # Update policy\n (policy_loss, actor_metrics), policy_grads = jax.lax.cond(\n state.steps < num_bc_steps,\n lambda _: bc_policy_grad_fn(\n state.policy_params,\n actor_key,\n state.alpha_params,\n transitions.observation,\n transitions.action,\n ),\n lambda _: policy_grad_fn(\n state.policy_params,\n critic_params,\n actor_key,\n state.alpha_params,\n transitions.observation,\n ),\n operand=None,\n )\n policy_updates, policy_optimizer_state = policy_optimizer.update(\n policy_grads, state.policy_optimizer_state\n )\n policy_params = optax.apply_updates(state.policy_params, policy_updates)\n metrics.update({\"actor_loss\": policy_loss, **actor_metrics})\n\n # Update entropy alpha\n alpha_loss, grad = jax.value_and_grad(alpha_loss_fn)(\n state.alpha_params, actor_metrics[\"entropy\"]\n )\n alpha_update, alpha_optimizer_state = alpha_optimizer.update(\n grad, state.alpha_optimizer_state\n )\n alpha_params = optax.apply_updates(state.alpha_params, alpha_update)\n metrics.update({\"alpha_loss\": alpha_loss, \"alpha\": jnp.exp(alpha_params)})\n\n # Update adaptive alpha_prime\n if with_lagrange:\n alpha_prime_loss, alpha_prime_grads = jax.value_and_grad(\n alpha_prime_loss_fn\n )(state.alpha_prime_params, critic_metrics[\"cql_loss\"])\n # pytype: disable=attribute-error\n (\n alpha_prime_updates,\n alpha_prime_optimizer_state,\n ) = alpha_prime_optimizer.update(\n alpha_prime_grads, state.alpha_prime_optimizer_state\n )\n # pytype: enable=attribute-error\n alpha_prime_params = optax.apply_updates(\n state.alpha_prime_params, alpha_prime_updates\n )\n metrics.update(\n {\n \"alpha_prime_loss\": alpha_prime_loss,\n \"alpha_prime\": jnp.exp(alpha_prime_params),\n }\n )\n else:\n alpha_prime_params = state.alpha_prime_params\n alpha_prime_optimizer_state = None\n\n # Update target network params\n critic_target_params = polyak_average(\n critic_params, state.critic_target_params\n )\n steps = state.steps + 1\n state = TrainingState(\n policy_params=policy_params,\n critic_params=critic_params,\n critic_target_params=critic_target_params,\n policy_optimizer_state=policy_optimizer_state,\n critic_optimizer_state=critic_optimizer_state,\n alpha_optimizer_state=alpha_optimizer_state,\n alpha_params=alpha_params,\n alpha_prime_optimizer_state=alpha_prime_optimizer_state,\n alpha_prime_params=alpha_prime_params,\n key=key,\n steps=steps,\n )\n return state, metrics\n\n self._iterator = dataset\n self._logger = logger or loggers.make_default_logger(\n label=\"learner\", save_data=False\n )\n self._counter = counter or counting.Counter()\n\n self._sgd_step = sgd_step\n\n def make_initial_state(key):\n init_policy_key, init_critic_key, key = jax.random.split(random_key, 3)\n init_policy_params = policy_network.init(init_policy_key)\n init_critic_params = critic_network.init(init_critic_key)\n init_policy_optimizer_state = policy_optimizer.init(init_policy_params)\n init_critic_optimizer_state = critic_optimizer.init(init_critic_params)\n init_alpha_params = jnp.array(np.log(init_alpha), dtype=jnp.float32)\n init_alpha_optimizer_state = alpha_optimizer.init(init_alpha_params)\n\n init_alpha_prime_params = jnp.asarray(jnp.log(cql_alpha), dtype=jnp.float32)\n if alpha_prime_optimizer is not None:\n init_alpha_prime_optimizer_state = alpha_prime_optimizer.init(\n init_alpha_prime_params\n )\n else:\n init_alpha_prime_optimizer_state = None\n\n return TrainingState(\n policy_params=init_policy_params,\n critic_params=init_critic_params,\n critic_target_params=init_critic_params,\n policy_optimizer_state=init_policy_optimizer_state,\n critic_optimizer_state=init_critic_optimizer_state,\n alpha_optimizer_state=init_alpha_optimizer_state,\n alpha_prime_optimizer_state=init_alpha_prime_optimizer_state,\n alpha_params=init_alpha_params,\n alpha_prime_params=init_alpha_prime_params,\n key=key,\n steps=0,\n )\n\n self._state = make_initial_state(random_key)\n\n self._timestamp = None\n\n def step(self):\n # Get data from replay\n transitions = next(self._iterator)\n # Perform a single learner step\n self._state, metrics = self._sgd_step(self._state, transitions)\n\n # Compute elapsed time\n timestamp = time.time()\n elapsed_time = timestamp - self._timestamp if self._timestamp else 0\n self._timestamp = timestamp\n\n # Increment counts and record the current time\n counts = self._counter.increment(steps=1, walltime=elapsed_time)\n # Attempts to write the logs.\n self._logger.write({**metrics, **counts})\n\n def get_variables(self, names):\n variables = {\n \"policy\": self._state.policy_params,\n \"critic\": self._state.critic_params,\n }\n return [variables[name] for name in names]\n\n def save(self) -> TrainingState:\n return self._state\n\n def restore(self, state: TrainingState):\n self._state = state\n","repo_name":"ethanluoyc/magi","sub_path":"magi/agents/cql/learning.py","file_name":"learning.py","file_ext":"py","file_size_in_byte":20023,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"2"}
+{"seq_id":"20696489157","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('AreaOftalmologia', '0004_auto_20151203_1931'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='persona',\n old_name='seegundo_nombre',\n new_name='segundo_nombre',\n ),\n migrations.AlterField(\n model_name='tipo_examen',\n name='nombre',\n field=models.CharField(max_length=50),\n ),\n ]\n","repo_name":"Everlm/IPS","sub_path":"AreaOftalmologia/migrations/0005_auto_20151205_1041.py","file_name":"0005_auto_20151205_1041.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"41706710038","text":"from typing import List\r\n\r\n\r\nclass Solution:\r\n def findDuplicate(self, nums: List[int]) -> int:\r\n nums_dict = dict()\r\n for num in nums:\r\n if num not in nums_dict:\r\n nums_dict[num] = True\r\n else:\r\n return num\r\n","repo_name":"sgonzalezr94/Neetcode-Challenges","sub_path":"Arrays_and_Hashing/1.containsDuplicate.py","file_name":"1.containsDuplicate.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"5091850177","text":"# =============================================================================\r\n# \r\n'''CPSC-51100, Summer II 2019\r\nNAME: Amy Noyes\r\nPROGRAMMING ASSIGNMENT #2\r\n'''\r\n# =============================================================================\r\nprint(\"\\n\")\r\nprint(\"CPSC-51100, Summer II 2019\", \"NAME: Amy Noyes\", \\\r\n \"PROGRAMMING ASSIGNMENT #2\", sep=\"\\n\")\r\n \r\nprint(\"\\n\")\r\n\r\nimport os\r\n\r\nprint(\"Input File Directory\")\r\nprint(os.getcwd() + \"\\n\")\r\n\r\n# Read text file and convert strings to a list of floats\r\ndef get_datasets(): \r\n datasets_file = open('prog2-input-data.txt', 'r'); \r\n datasets = datasets_file.readlines() \r\n \r\n datasets = [float(dataset) for dataset in datasets]\r\n datasets_file.close()\r\n return datasets\r\n\r\n# User enters number of k clusters\r\nk = int(input('Enter the number of clusters: '))\r\nprint(\"\\n\")\r\n\r\n# Initializes centroids with the first k points as initial centroids \\\r\n# using a dict data structure\r\ndef initial_centroids(k, datasets):\r\n centroids = dict()\r\n for i in range(k):\r\n centroids[i] = datasets[i]\r\n return centroids\r\n\r\n# Initializes dict clusters\r\ndef initial_clusters(k):\r\n clusters = dict()\r\n for i in range(k):\r\n clusters[i] = []\r\n return clusters\r\n\r\n# Get closest cluster to the centroids\r\ndef get_closest(point, centroids):\r\n distances = [] \r\n# Lambda = anonymous function\r\n get_distance = lambda A, B: abs(A - B)\r\n for cluster, centroid in centroids.items():\r\n distance = (cluster, get_distance(point, centroid))\r\n distances.append(distance)\r\n closest_cluster = min(distances, key=lambda x: x[1])[0]\r\n return closest_cluster\r\n\r\n# Print each iteration\r\ndef print_clusters(iteration, clusters):\r\n print('Iteration', iteration)\r\n for cluster, datasets in clusters.items():\r\n print(cluster, datasets)\r\n print()\r\n\r\n# K-means clustering: get centroids, loop to get new centroids\r\ndef kmeans(k, datasets):\r\n centroids = initial_centroids(k, datasets)\r\n clusters = initial_clusters(k)\r\n clusters_copy = None\r\n get_centroid = lambda points: sum(points) / len(points)\r\n\r\n iteration = 0\r\n while clusters != clusters_copy:\r\n iteration += 1\r\n clusters_copy = clusters.copy()\r\n clusters = initial_clusters(k)\r\n for dataset in datasets:\r\n cluster = get_closest(dataset, centroids)\r\n clusters[cluster].append(dataset)\r\n for cluster in clusters.keys():\r\n centroids[cluster] = get_centroid(clusters[cluster])\r\n print_clusters(iteration, clusters)\r\n return clusters\r\n\r\n# Print output\r\ndef format_output(clusters):\r\n output = ''\r\n for cluster, datasets in clusters.items():\r\n for dataset in datasets:\r\n output += 'Point %s in cluster %s\\n' % (dataset, cluster)\r\n return output\r\n\r\ndatasets = get_datasets()\r\nclusters = kmeans(k, datasets)\r\n\r\n# Print final output\r\noutput = format_output(clusters)\r\nprint(output)\r\n\r\n# File output to Input File Directory shown by print(os.getcwd())\r\noutput_file = open('prog2-output-data.txt', 'w')\r\noutput_file.write(output)\r\noutput_file.close()\r\n","repo_name":"smgds/Python-Statistical-Programs","sub_path":"kMeans.py","file_name":"kMeans.py","file_ext":"py","file_size_in_byte":3134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"25725080944","text":"import numpy as np\nimport random as rn\nfrom collections import deque\nfrom math import log\n\nclass Error(Exception):\n '''Base class for exceptions in this module.'''\n pass\n\nclass BoxAndEmptySpaceError(Error):\n\tdef __init__(self, message):\n\t\tself.message = message\n\t\nclass Room:\n\t\n\t\n\tdef __init__(self, height, width, box_num):\n\t\tself.boxes=[]\n\t\tself.target_tile_list=[]\n\t\tself.player_curpos=[]\n\t\tself.width=width\n\t\tself.height=height\n\t\tself.box_num=box_num\n\t\tself.room = np.full((height,width), 'W')\n\t\t\n\tdef print_room(self):\n\t\tprint(self.room)\n\n\tdef choose_random_dir(self, d):\n\t\t'''select a new direction and then check yes(35%) and no(65%). \n\t\tIf yes, change, else dont'''\n\t\td1=np.random.randint(1,4)\n\t\tb = [0,1]\n\t\tc=np.random.choice(b,1,p=[0.65,0.35])\n\t\tif(c):\n\t\t\treturn d1\n\t\telse:\n\t\t\treturn d\n\t\t\t\n\tdef get_tile(self, x, y):\n\t\treturn self.room[y][x]\n\t\n\tdef set_tile(self, x, y, c):\n\t\tself.room[y][x]=c\n\t\n\tdef update_space(self, x, y, sym):\t\n\t\tif(x<=0 or x>=self.width-1 or y<=0 or y>=self.height-1):\n\t\t\treturn False\n\t\telse:\n\t\t\tself.set_tile(x,y,sym)\n\t\t\treturn True\n\t\t\n\tdef topology_gen(self, walk_steps):\n\t\tdirn=[1,2,3,4]\n\t\tx=np.random.randint(1,self.width-2) \n\t\ty=np.random.randint(1,self.height-2)\n\t\td=np.random.randint(1,4)\n\t\tself.update_space(x,y,'E')\n\t\tfor i in range(walk_steps):\n\t\t\tt=np.random.randint(1,5)\n\t\t\tif(t==1):\n\t\t\t\tself.update_space(x-1, y, 'E')\n\t\t\t\tself.update_space(x+1, y, 'E')\n\t\t\telif(t==2):\n\t\t\t\tself.update_space(x, y+1, 'E')\n\t\t\t\tself.update_space(x, y-1, 'E')\n\t\t\telif(t==3):\n\t\t\t\tself.update_space(x-1, y, 'E')\n\t\t\t\tself.update_space(x, y-1, 'E')\n\t\t\telif(t==4):\n\t\t\t\tself.update_space(x, y-1, 'E')\n\t\t\t\tself.update_space(x-1, y-1, 'E')\n\t\t\t\tself.update_space(x-1, y, 'E')\n\t\t\telif(t==5):\n\t\t\t\tself.update_space(x+1, y, 'E')\n\t\t\t\tself.update_space(x, y-1, 'E')\n\t\t\td=self.choose_random_dir(d)\n\t\t\tif(d==1):\n\t\t\t\tx=x-1\n\t\t\telif(d==2):\n\t\t\t\tx=x+1\n\t\t\telif(d==3):\n\t\t\t\ty=y-1\n\t\t\telif(d==4):\n\t\t\t\ty=y+1\n\t\t\tif(self.update_space(x,y,'E')):\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tif(d==1):\n\t\t\t\t\tx=x+1\n\t\t\t\telif(d==2):\n\t\t\t\t\tx=x-1\n\t\t\t\telif(d==3):\n\t\t\t\t\ty=y+1\n\t\t\t\telif(d==4):\n\t\t\t\t\ty=y-1\t\n\t\t\t\ti-=1\n\n\tdef position_configuration(self):\n\t\t#boxes config\t\n\t\tfor i in range(self.box_num):\n\t\t\twhile(True):\n\t\t\t\tx=np.random.randint(1,self.width-2) \n\t\t\t\ty=np.random.randint(1,self.height-2)\n\t\t\t\tif(self.get_tile(x,y)=='E'):\n\t\t\t\t\tself.target_tile_list.append((x,y))\n\t\t\t\t\tself.set_tile(x,y,'X') #setting this as X cuz initially B is on T. S\n\t\t\t\t\tself.boxes.append([x,y])\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tcontinue\n\t\t#player config\n\t\twhile(True):\n\t\t\tx=np.random.randint(1,self.width-2) \n\t\t\ty=np.random.randint(1,self.height-2)\n\t\t\tif(self.get_tile(x,y)=='E'):\n\t\t\t\tself.set_tile(x,y,'P')\n\t\t\t\tself.set_player_curpos(x,y)\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tcontinue\n\t\n\tdef is_target_tile(self, x, y):\n\t\tfor i in range(self.box_num):\n\t\t\tif self.target_tile_list[i][0]==x and self.target_tile_list[i][1]==y:\n\t\t\t\treturn True\n\t\treturn False\n\t\n\tdef which_box(self, x, y):\n\t\tfor i in range(self.box_num):\n\t\t\tif self.boxes[i][0]==x and self.boxes[i][1]==y:\n\t\t\t\treturn i\n\t\traise Exception(\"NO BOX FOUND\")\n\t\t\t\t\n\tdef set_player_curpos(self,x,y):\n\t\tself.player_curpos=[]\n\t\tself.player_curpos.append(x)\n\t\tself.player_curpos.append(y)\n\t\n\tdef update_box_pos(self, i, x, y):\n\t\tself.boxes[i][0]=x\n\t\tself.boxes[i][1]=y\n\t\n\tdef make_move(self,x,y, m):\n\t\tif(m==1):\n\t\t\tif(self.get_tile(x-1,y)=='E'):\n\t\t\t\tself.set_tile(x,y,'E')\n\t\t\t\tself.set_tile(x-1,y,'P')\n\t\t\t\tself.set_player_curpos(x-1,y)\n\t\t\t\treturn True\n\t\telif(m==2):\n\t\t\tif(self.get_tile(x+1,y)=='E'):\n\t\t\t\tself.set_tile(x,y,'E')\n\t\t\t\tself.set_tile(x+1,y,'P')\n\t\t\t\tself.set_player_curpos(x+1,y)\n\t\t\t\treturn True\n\t\telif(m==3):\n\t\t\tif(self.get_tile(x,y+1)=='E'):\n\t\t\t\tself.set_tile(x,y,'E')\n\t\t\t\tself.set_tile(x,y+1,'P')\n\t\t\t\tself.set_player_curpos(x,y+1)\n\t\t\t\treturn True\n\t\telif(m==4):\n\t\t\tif(self.get_tile(x,y-1)=='E'):\n\t\t\t\tself.set_tile(x,y,'E')\n\t\t\t\tself.set_tile(x,y-1,'P')\n\t\t\t\tself.set_player_curpos(x,y-1)\n\t\t\t\treturn True\n\t\telif(m==5):\n\t\t\tif((self.get_tile(x+1,y)=='B' or self.get_tile(x+1,y)=='X') and self.get_tile(x-1,y)=='E'):\n\t\t\t\tbi=self.which_box(x+1,y)\n\t\t\t\tif(self.get_tile(x+1,y)=='X'):\n\t\t\t\t\tself.set_tile(x+1,y,'T')\n\t\t\t\telse:\n\t\t\t\t\tself.set_tile(x+1,y,'E')\n\t\t\t\tif self.is_target_tile(x,y):\n\t\t\t\t\tself.set_tile(x,y,'X')\n\t\t\t\telse:\n\t\t\t\t\tself.set_tile(x,y,'B')\n\t\t\t\tself.update_box_pos(bi, x,y)\n\t\t\t\tself.set_tile(x-1,y,'P')\n\t\t\t\tself.set_player_curpos(x-1,y)\n\t\t\t\treturn True\t\n\t\t\telse:\n\t\t\t\treturn False\t\n\t\telif(m==6):\n\t\t\tif((self.get_tile(x-1,y)=='B' or self.get_tile(x-1,y)=='X') and self.get_tile(x+1,y)=='E'):\n\t\t\t\tbi=self.which_box(x-1,y)\n\t\t\t\tif(self.get_tile(x-1,y)=='X'):\n\t\t\t\t\tself.set_tile(x-1,y,'T')\n\t\t\t\telse:\n\t\t\t\t\tself.set_tile(x-1,y,'E')\n\t\t\t\tif self.is_target_tile(x,y):\n\t\t\t\t\tself.set_tile(x,y,'X')\n\t\t\t\telse:\n\t\t\t\t\tself.set_tile(x,y,'B')\n\t\t\t\tself.update_box_pos(bi, x,y)\n\t\t\t\tself.set_tile(x+1,y,'P')\n\t\t\t\tself.set_player_curpos(x+1,y)\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\telif(m==7):#down\n\t\t\tif((self.get_tile(x,y-1)=='B' or self.get_tile(x,y-1)=='X') and self.get_tile(x,y+1)=='E'):\n\t\t\t\tbi=self.which_box(x,y-1)\n\t\t\t\tif(self.get_tile(x,y-1)=='X'):\n\t\t\t\t\tself.set_tile(x,y-1,'T')\n\t\t\t\telse:\n\t\t\t\t\tself.set_tile(x,y-1,'E')\n\t\t\t\tif self.is_target_tile(x,y):\n\t\t\t\t\tself.set_tile(x,y,'X')\n\t\t\t\telse:\n\t\t\t\t\tself.set_tile(x,y,'B')\n\t\t\t\tself.update_box_pos(bi, x,y)\n\t\t\t\tself.set_tile(x,y+1,'P')\n\t\t\t\tself.set_player_curpos(x,y+1)\n\t\t\t\treturn True\n\t\telif(m==8):\n\t\t\tif((self.get_tile(x,y+1)=='B' or self.get_tile(x,y+1)=='X') and self.get_tile(x,y-1)=='E'):\n\t\t\t\tbi=self.which_box(x,y+1)\n\t\t\t\tif(self.get_tile(x,y+1)=='X'):\n\t\t\t\t\tself.set_tile(x,y+1,'T')\n\t\t\t\telse:\n\t\t\t\t\tself.set_tile(x,y+1,'E')\n\t\t\t\tif self.is_target_tile(x,y):\n\t\t\t\t\tself.set_tile(x,y,'X')\n\t\t\t\telse:\n\t\t\t\t\tself.set_tile(x,y,'B')\n\t\t\t\tself.update_box_pos(bi, x,y)\n\t\t\t\tself.set_tile(x,y-1,'P')\n\t\t\t\tself.set_player_curpos(x,y-1)\n\t\t\t\treturn True\n\t\telif(m==-5):\n\t\t\tbi=self.which_box(x+1,y)\n\t\t\tif self.is_target_tile(x,y):\n\t\t\t\tself.set_tile(x,y,'T')\n\t\t\telse:\n\t\t\t\tself.set_tile(x,y,'E')\n\t\t\tself.set_tile(x+1,y,'P')\n\t\t\tif self.is_target_tile(x+2,y):\n\t\t\t\tself.set_tile(x+2,y,'X')\n\t\t\telse:\n\t\t\t\tself.set_tile(x+2,y,'B')\n\t\t\tself.set_player_curpos(x+1,y)\n\t\t\tself.update_box_pos(bi,x+2,y)\n\t\t\t\n\t\telif(m==-6):\n\t\t\tbi=self.which_box(x-1,y)\n\t\t\tif self.is_target_tile(x,y):\n\t\t\t\tself.set_tile(x,y,'T')\n\t\t\telse:\n\t\t\t\tself.set_tile(x,y,'E')\n\t\t\tself.set_tile(x-1,y,'P')\n\t\t\tif self.is_target_tile(x-2,y):\n\t\t\t\tself.set_tile(x-2,y,'X')\n\t\t\telse:\n\t\t\t\tself.set_tile(x-2,y,'B')\n\t\t\tself.set_player_curpos(x-1,y)\n\t\t\tself.update_box_pos(bi,x-2,y)\n\t\t\t\n\t\telif(m==-7):\n\t\t\tbi=self.which_box(x,y-1)\n\t\t\tif self.is_target_tile(x,y):\n\t\t\t\tself.set_tile(x,y,'T')\n\t\t\telse:\n\t\t\t\tself.set_tile(x,y,'E')\n\t\t\tself.set_tile(x,y-1,'P')\n\t\t\tif self.is_target_tile(x,y-2):\n\t\t\t\tself.set_tile(x,y-2,'X')\n\t\t\telse:\n\t\t\t\tself.set_tile(x,y-2,'B')\n\t\t\tself.set_player_curpos(x,y-1)\n\t\t\tself.update_box_pos(bi,x,y-2)\n\t\t\t\n\t\telif(m==-8):\n\t\t\tbi=self.which_box(x,y+1)\n\t\t\tif self.is_target_tile(x,y):\n\t\t\t\tself.set_tile(x,y,'T')\n\t\t\telse:\n\t\t\t\tself.set_tile(x,y,'E')\n\t\t\tself.set_tile(x,y+1,'P')\n\t\t\tif self.is_target_tile(x,y+2):\n\t\t\t\tself.set_tile(x,y+2,'X')\n\t\t\telse:\n\t\t\t\tself.set_tile(x,y+2,'B')\n\t\t\tself.set_player_curpos(x,y+1)\n\t\t\tself.update_box_pos(bi,x,y+2)\t\n\t\t\t\n\tdef reset_position_configuration(self):\n\t\tfor i in range(self.box_num):\n\t\t\tself.set_tile(self.boxes[i][0],self.boxes[i][1], 'E')\n\t\tfor i in range(self.box_num):\t\n\t\t\tself.set_tile(self.target_tile_list[i][0], self.target_tile_list[i][1], 'T')\n\t\tself.set_tile(self.player_curpos[0],self.player_curpos[1], 'E')\n\t\t \t\n\tdef set_position_configuration(self, c):\n\t\tj=0\n\t\tfor i in range(self.box_num):\n\t\t\tself.boxes[i][0]=c[j]\n\t\t\tj+=1\n\t\t\tself.boxes[i][1]=c[j]\n\t\t\tif self.is_target_tile(c[j-1], c[j]):\n\t\t\t\tself.set_tile(self.boxes[i][0],self.boxes[i][1], 'X')\n\t\t\telse:\n\t\t\t\tself.set_tile(self.boxes[i][0],self.boxes[i][1], 'B')\n\t\t\tj+=1\n\n\t\tself.player_curpos[0]=c[j]\n\t\tj+=1\n\t\tself.player_curpos[1]=c[j]\n\t\tself.set_tile(c[j-1],c[j],'P')\n\t\n\tdef create_config_obj(self):\n\t\tpos_conf=[]\n\t\tfor i in range(self.box_num):\n\t\t\tpos_conf.append(self.boxes[i][0])\n\t\t\tpos_conf.append(self.boxes[i][1])\n\t\tpos_conf.append(self.player_curpos[0])\n\t\tpos_conf.append(self.player_curpos[1])\n\t\treturn tuple(pos_conf) \t\t\t\t\n\nclass Tree:\n\tdef __init__(self):\n\t\tself.child=[]\n\t\tself.data=()\n\t\n\tdef create_child(self):\n\t\tself.child.append(Tree())\n\ndef create_config_tree(room): #it takes a single position config and creates a move tree for it. \n\tmoves=[1,2,3,4,5,6,7,8]\n\tdepth=0\n\texplored=set()\n\tconf_tree=Tree()\n\tconf_tree.data=room.create_config_obj()\n\texplored.add(room.create_config_obj())\n\trn.shuffle(moves)\n\tchild_q=deque()\n\tnodes_num=0\n\ti=0\n\tfor m in moves:\n\t\tnodes_num+=1\n\t\tmm=False\n\t\tmm=room.make_move(room.player_curpos[0],room.player_curpos[1],m) #mm: move made bool\n\t\tc=room.create_config_obj()\n\t\tif c not in explored: \n\t\t\texplored.add(c)\n\t\t\tconf_tree.create_child()\n\t\t\tconf_tree.child[i].data=c\n\t\t\tchild_q.append(conf_tree.child[i])\n\t\t\ti+=1\n\t\t#reverse the move made\t\n\t\tif m<5 and m%2==0 and mm:\n\t\t\tm-=1\n\t\t\troom.make_move(room.player_curpos[0],room.player_curpos[1],m)\n\t\telif m<5 and mm:\n\t\t\tm+=1\n\t\t\troom.make_move(room.player_curpos[0],room.player_curpos[1],m)\n\t\telif mm:\n\t\t\tm=m*-1\n\t\t\troom.make_move(room.player_curpos[0],room.player_curpos[1],m)\t\n\n\twhile((len(child_q)!=0) and (depth<=300)):\n\t\trn.shuffle(moves)\n\t\tch=child_q.popleft()\n\t\troom.reset_position_configuration()\n\t\troom.set_position_configuration(ch.data)\n\t\ti=0\n\t\tt=0\n\t\tdepth=log(nodes_num,8)\n\t\tfor m in moves:\n\t\t\tnodes_num+=1\n\t\t\tmm=False\n\t\t\tmm=room.make_move(room.player_curpos[0],room.player_curpos[1],m) #mm: move made bool\n\t\t\tc=room.create_config_obj()\n\t\t\tif c not in explored: \n\t\t\t\texplored.add(c)\n\t\t\t\tch.create_child()\n\t\t\t\tch.child[i].data=c\n\t\t\t\tchild_q.append(ch.child[i])\n\t\t\t\ti+=1\n\t\t\tif m<5 and m%2==0 and mm:\n\t\t\t\tm-=1\n\t\t\t\troom.make_move(room.player_curpos[0],room.player_curpos[1],m)\n\t\t\telif m<5 and mm:\n\t\t\t\tm+=1\n\t\t\t\troom.make_move(room.player_curpos[0],room.player_curpos[1],m)\n\t\t\telif mm:\n\t\t\t\tm=m*-1\n\t\t\t\troom.make_move(room.player_curpos[0],room.player_curpos[1],m)\n\t\t\n\treturn conf_tree\t\n\n#-------------score calculator-----------------------------------------\n\nroot_data=()\n\ndef calc_score( tup1, tup2, swaps, cur_box, num_b ):\n\tfor j in range(num_b):\n\t\tt=j\n\t\tif(j!=cur_box and (tup1[j+t] != tup2[j+t] or \n\t\t tup1[j+t+1] != tup2[j+t+1])):\n\t\t\tif cur_box==-1:\n\t\t\t\tcur_box=j\n\t\t\telse:\n\t\t\t\tcur_box=j\n\t\t\t\tswaps+=1\t\t\n\tmanh_d=0\n\tfor j in range(num_b*2):\n\t\tmanh_d+= abs(tup1[j]-tup2[j])\n\tscore_gen = swaps*(manh_d)\n\treturn score_gen, cur_box\n\n#for depth first traversal of a tree\ndef tree_dfs(conf_tree, max_score, swaps, cur_box, parent_d, num_b, max_config=(0,0,0,0)):\n\tif parent_d==-1:\n\t\tpass\n\telse:\n\t\tscore_gen, cur_box = calc_score(root_data, conf_tree.data, swaps, cur_box, num_b)\n\t\tif score_gen>max_score:\n\t\t\tmax_score=score_gen\n\t\t\tmax_config=conf_tree.data\t\n\tfor i in range(len(conf_tree.child)):\n\t\tparent_d=conf_tree.data\n\t\tmax_score, max_config = tree_dfs(conf_tree.child[i], max_score, swaps, cur_box, parent_d, num_b, max_config)\n\t\n\treturn max_score, max_config\t\t\n\t\n#calculates score for one tree\ndef score_controller(conf_tree):\n\tif conf_tree.child==[]:\n\t\treturn 0, 0\n\tglobal root_data\n\troot_data=conf_tree.data\n\tnum_b=int((len(conf_tree.data)-2)/2)\n\tscore=0\n\tmax_config=()\n\tscore, max_config = tree_dfs(conf_tree, 0, 0, -1, -1, num_b)\n\treturn score, max_config\n\ndef level_generator(width, height, num_box):\n\tfor i in range(10):\n\t\trm=Room(height, width, num_box)\n\t\trm.topology_gen(int(1.5*(height+width)))\n\t\trm.position_configuration()\n\t\ttre=create_config_tree(rm)\t\t\t\n\t\tscore, max_config = score_controller(tre)\n\t\tif score>0:\n\t\t\trm.reset_position_configuration()\n\t\t\trm.set_position_configuration(max_config)\n\t\t\treturn rm.room\n\t\tdel rm\n\t\tdel tre\n","repo_name":"krudutta/gym-sokoban","sub_path":"gym_sokoban/envs/level_generator.py","file_name":"level_generator.py","file_ext":"py","file_size_in_byte":11622,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"}
+{"seq_id":"2157695047","text":"# Import libraries\nimport numpy as np # Numeric and matrix computation\nimport pandas as pd # Optional: good package for manipulating data\nimport sklearn as sk # Package with learning algorithms implemented\n\n\ndef test_profe():\n url = \"http://archive.ics.uci.edu/ml/machine-learning-databases/ionosphere/ionosphere.data\"\n df = pd.read_csv(url,header =None)\n print(type(df))\n print(df.head())\n\n # No preprocessing needed. Numerical and scaled data\n # Separate data from labels\n\n y=df[34].values\n # print(y)\n X=df.values[:,0:34]\n\n from sklearn.model_selection import cross_val_score\n #from sklearn.linear_model import LogisticRegression\n from sklearn.naive_bayes import GaussianNB\n from sklearn.ensemble import VotingClassifier\n from sklearn.tree import DecisionTreeClassifier\n from sklearn.neighbors import KNeighborsClassifier\n from sklearn.model_selection import GridSearchCV\n\n cv=50\n\n clf1 = GaussianNB()\n\n params = {'n_neighbors':list(range(1,30,2)), 'weights':('distance','uniform')}\n knc = KNeighborsClassifier()\n clf = GridSearchCV(knc, param_grid=params,cv=cv,n_jobs=-1) # If cv is integer, by default is Stratifyed\n clf.fit(X, y)\n print(\"Best Params fo Knn=\",clf.best_params_, \"Accuracy=\", clf.best_score_)\n parval=clf.best_params_\n clf2 = KNeighborsClassifier(n_neighbors=parval['n_neighbors'],weights=parval['weights'])\n\n clf3 = DecisionTreeClassifier(criterion='entropy')\n\n\n for clf, label in zip([clf1, clf2, clf3], ['Naive Bayes','Knn (3)', 'Dec. Tree', ]):\n scores = cross_val_score(clf, X, y, cv=cv, scoring='accuracy')\n print(\"Accuracy: %0.3f [%s]\" % (scores.mean(), label))\n\n\ndef test_naive():\n path = '../dataset_diabetes/diabetic_data_output.csv'\n df = pd.read_csv(path)\n #print (df.corr())\n def get_redundant_pairs(df):\n '''Get diagonal and lower triangular pairs of correlation matrix'''\n pairs_to_drop = set()\n cols = df.columns\n for i in range(0, df.shape[1]):\n for j in range(0, i + 1):\n pairs_to_drop.add((cols[i], cols[j]))\n return pairs_to_drop\n\n def get_top_abs_correlations(df, n=5):\n au_corr = df.corr().abs().unstack()\n labels_to_drop = get_redundant_pairs(df)\n au_corr = au_corr.drop(labels=labels_to_drop).sort_values(ascending=False)\n return au_corr[0:n]\n\n print(\"Top Absolute Correlations\")\n print(get_top_abs_correlations(df, 10))\n #print (df.values[:,[1,6,7,8,9]])\n # insulin\n # admission_type_id, discharge_disposition_id\n # admission_source_id\n # time_in_hospital\n # print(df[\"insulin\"].values)\n #\n # print(df.values[:,0:34])\n # print(type(df.values))\n # print(type(df.columns.values.tolist().index(\"insulin\")))\n # print(df.columns.values.tolist().index(\"insulin\"))\n # print(type(np.where(df.columns.values == \"insulin\")))\n # print(np.where(df.columns.values == \"insulin\"))\n\nif __name__ == \"__main__\":\n test_naive()","repo_name":"paulovick/MD-Projecte2-diabetes","sub_path":"test/naive_bayes.py","file_name":"naive_bayes.py","file_ext":"py","file_size_in_byte":3006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"}
+{"seq_id":"39707593524","text":"# sync.py\nimport logging\nimport time\nfrom datetime import date, datetime, timedelta\nfrom json import JSONDecodeError\n\nfrom django.db.models import Model, Q, QuerySet\n\nfrom apps.teams.models import Team\nfrom apps.zp.fetch import ZPSession\nfrom apps.zp.models import (\n AllResults,\n Profile,\n Results,\n TeamPending,\n TeamResults,\n TeamRiders,\n)\n\n\ndef create_or_update_model(self, zp_id, api, data_set):\n # Create a dictionary with dynamic field names\n kwargs = {\n \"zp_id\": zp_id,\n api: data_set, # 'api' is the variable field name\n }\n # Unpack kwargs as arguments to get_or_create\n obj, created = self.model.objects.get_or_create(**kwargs)\n return obj, created\n\n\nclass FetchJsonRecords:\n \"\"\"\n This adds a new json dataset to the model for each zp_id. It does not update any existing records.\n This will most likely create a new record each time.\n \"\"\"\n\n def __init__(self, api: str, zp_id: int | list | str | QuerySet, model: Model):\n self.zps = ZPSession()\n self.try_count = 0\n self.api = api\n self.zp_id = zp_id\n self.model = model\n\n def fetch(self):\n if isinstance(self.zp_id, int | list | QuerySet): # queryset must be from .values_list(\"zp_id\", flat=True)\n zp_ids = set(self.zp_id)\n elif isinstance(self.zp_id, str) and self.zp_id == \"all\": # get all the Model objects\n zp_ids = set(self.model.objects.values_list(\"zp_id\", flat=True))\n logging.info(f\"zp_id count: {len(zp_ids)}\")\n else:\n raise ValueError(\"zp_id must be int, list, or 'all'\")\n\n for zp_id in zp_ids:\n logging.info(f\"Get {self.api} data: {zp_id}\")\n try:\n data_set = self.zps.get_api(id=zp_id, api=self.api)[self.api]\n if \"data\" in data_set:\n data_set = data_set[\"data\"]\n if len(data_set) > 0:\n obj, created = create_or_update_model(self, zp_id, self.api, data_set)\n if created:\n logging.info(f\"Created new {self.zp_id} : {zp_id}\")\n else:\n logging.info(f\"Updated {self.zp_id} : {zp_id}\")\n self.try_count = 0\n except JSONDecodeError as e:\n self.try_count += 1\n logging.warning(f\"Retry get {self.api} number {self.try_count} data: {zp_id}\")\n logging.warning(f\"{e}\")\n except Exception as e:\n self.try_count += 1\n logging.warning(f\"Failed to get data: {e}\")\n logging.warning(f\"Retry get {self.api} number {self.try_count} data: {zp_id}\")\n if self.try_count >= 4:\n logging.error(f\"to many retries: {self.api} last id: {zp_id}\")\n break\n time.sleep(5 + self.try_count * 5)\n\n\nclass FetchTeamResults(FetchJsonRecords):\n def __init__(self):\n super().__init__(api=\"team_results\", zp_id=Team.objects.values_list(\"zp_id\", flat=True), model=TeamResults)\n\n\nclass FetchTeamPending(FetchJsonRecords):\n def __init__(self):\n super().__init__(api=\"team_pending\", zp_id=Team.objects.values_list(\"zp_id\", flat=True), model=TeamPending)\n\n\nclass FetchTeamRiders(FetchJsonRecords):\n def __init__(self):\n super().__init__(api=\"team_riders\", zp_id=Team.objects.values_list(\"zp_id\", flat=True), model=TeamRiders)\n\n\nclass UpdateJsonRecords:\n \"\"\"\n This adds a UPDATES a json dataset in a model object.\n \"\"\"\n\n def __init__(self, api: str, zp_id: int | list | str | QuerySet, model: Model):\n self.zps = ZPSession()\n self.try_count = 0\n self.api = api\n self.zp_id = zp_id\n self.model = model\n\n def update(self):\n if isinstance(self.zp_id, int | list | QuerySet):\n zp_ids = list(self.zp_id)\n elif isinstance(self.zp_id, str) and self.zp_id == \"all\":\n zp_ids = set(self.model.objects.values_list(\"zp_id\", flat=True))\n logging.info(f\"zp_id count: {len(zp_ids)}\")\n else:\n raise ValueError(\"zp_id must be int, list, or 'all'\")\n for zp_id in zp_ids:\n logging.info(f\"Get {self.api} data: {zp_id}\")\n if self.try_count >= 4:\n logging.error(f\"To many errors: {self.api} last zp_id: {zp_id}\")\n break\n time.sleep(3 + self.try_count * 5)\n try:\n data_set = self.zps.get_api(id=zp_id, api=self.api)[self.api]\n if [\"data\"] == list(data_set.keys()):\n data_set = data_set[\"data\"]\n if self.api in [\n \"profile_profile\",\n ]:\n data_set = sorted(data_set, key=lambda x: int(x.get(\"event_date\", 0)), reverse=True)\n except JSONDecodeError:\n self.try_count += 1\n logging.warning(f\"JSONDecodeError: {self.api}, Retry count: {self.try_count} zp_id: {zp_id}\")\n # logging.warning(f\"{e}\")\n obj, created = self.model.objects.get_or_create(zp_id=zp_id)\n obj.error = \"JSONDecodeError\"\n obj.save()\n continue\n except Exception as e:\n self.try_count += 1\n logging.warning(f\"Failed to get data: {e}\")\n logging.warning(f\"Failded api: {self.api} retry count: {self.try_count} zp_id: {zp_id}\")\n obj, created = self.model.objects.get_or_create(zp_id=zp_id)\n obj.error = f\"fetch error: {str(e)}\"\n obj.save()\n continue\n\n try:\n # TODO: This is an exception for the profile field name\n api = \"profile\" if self.api == \"profile_profile\" else self.api\n obj, created = self.model.objects.get_or_create(zp_id=zp_id) # this must uniquly identify the object\n current_data = getattr(obj, api) if getattr(obj, api) else []\n if not created and len(data_set) >= len(current_data):\n logging.info(f\"Updated {self.model} for zp_id: {zp_id}\")\n setattr(obj, api, data_set)\n obj.error = \"\"\n if self.api == \"profile_profile\":\n obj.status[\"sorted\"] = True\n obj.save()\n elif created and len(data_set) > 0:\n logging.info(f\"Created {self.model} for zp_id: {zp_id}\")\n setattr(obj, api, data_set)\n if self.api == \"profile_profile\":\n obj.status[\"sorted\"] = True\n obj.error = \"\"\n obj.save()\n elif created and len(data_set) == 0:\n logging.warning(f\"Empty data set for zp_id: {zp_id}\")\n if self.api == \"profile_profile\":\n obj.status[\"sorted\"] = False\n obj.error = f\"Empty data set: {data_set}\"\n obj.save()\n elif len(data_set) < len(current_data):\n logging.warning(f\"Data set < existing data: {api}, zp_id: {zp_id}\")\n obj.error = \"Dataset < existing data\"\n obj.save()\n else:\n continue\n self.try_count += 0\n except Exception as e:\n self.try_count += 1\n logging.warning(f\"Failed: {self.api} count: {self.try_count} zp_id: {zp_id}\")\n logging.warning(f\": {e}\")\n obj.error = str(e)\n obj.save()\n\n\nclass UpdateProfiles(UpdateJsonRecords):\n \"\"\"See also management command and task\"\"\"\n\n # TODO: At some point we will have more then 100 inactive profiles that we keep trying to update. Then we need to add a filter for inactive profiles.\n def __init__(self):\n super().__init__(\n api=\"profile_profile\",\n zp_id=Profile.objects.filter(error=\"\", status__needs_update=True)\n .order_by(\"modified_at\")\n .values_list(\"zp_id\", flat=True)[:100],\n model=Profile,\n )\n\n\nclass UpdateProfileErrors(UpdateJsonRecords):\n def __init__(self):\n super().__init__(\n api=\"profile_profile\",\n zp_id=Profile.objects.filter(error__icontains=\"Empty data set\")\n .order_by(\"modified_at\")\n .values_list(\"zp_id\", flat=True)[:100],\n model=Profile,\n )\n\n\ndef update_last_event(self):\n logging.info(f\"Review {len(self.zp_ids)} profiles\")\n for zp_id in self.zp_ids:\n try:\n obj = Profile.objects.get(zp_id=zp_id)\n obj.status[\"last_event\"] = (\n datetime.today().date() - datetime.fromtimestamp(obj.profile[0][\"event_date\"]).date()\n ).days\n obj.save()\n except Exception as e:\n logging.error(f\"Failed to update last event: {zp_id}\\n {e}\")\n continue\n\n\nclass UpdateSelected(UpdateJsonRecords):\n def __init__(self, api, zp_id, model):\n self.api = api\n self.zp_id = zp_id\n self.model = Profile\n self.zps = ZPSession()\n self.try_count = 0\n\n\nclass FetchAllResults:\n \"\"\"\n Get list of resent event results from ZP and update the Results table.\n \"\"\"\n\n def __init__(self):\n self.zps = ZPSession()\n self.try_count = 0\n self.api = \"all_results\"\n self.model = AllResults\n\n def fetch(self):\n # Get the data\n try:\n data_set = self.zps.get_api(id=None, api=self.api)[self.api]\n data_set = data_set[\"data\"]\n except JSONDecodeError as e:\n logging.error(f\"JSONDecodeError: {self.api} \\n{e}\")\n return None\n except Exception as e:\n logging.error(f\"Unknown getting: {self.api} \\n{e}\")\n return None\n\n # Add the data to the model\n for event in data_set:\n try:\n obj, created = self.model.objects.get_or_create(zp_id=event[\"zid\"])\n if created:\n logging.info(f\"Created new {self.model} for zp_id: {event['zid']}\")\n obj.event = event\n obj.zp_id = event[\"zid\"]\n obj.save()\n else:\n logging.info(f\"Already have {self.model} for zp_id: {event['zid']}\")\n except Exception as e:\n logging.error(f\"Unknown creating: {self.api} \\n{e}\")\n\n\n#############################################################\n#### Inter table data migrations and Table field updates ####\n#############################################################\n\n\nclass ProfilesFromTeams:\n \"\"\"See also management command\"\"\"\n\n def update(self):\n logging.info(\"Move profiles from teams to profiles table\")\n # zp_team_riders = TeamRiders.objects.all()\n zp_team_riders = TeamRiders.objects.order_by(\"zp_id\", \"-modified_at\").distinct(\"zp_id\")\n for team in zp_team_riders:\n logging.info(f\"Adding profiles from team: {team.zp_id}\")\n for rider in team.team_riders:\n logging.info(f\"Get or creat zp Profile: {rider['zwid']}\")\n obj, created = Profile.objects.get_or_create(zp_id=int(rider[\"zwid\"]))\n logging.info(f\"Created? {created} rider Profile{rider['zwid']}\")\n\n\nclass ResultsFromProfiles:\n \"\"\"\n Migrate results from profiles to Results Table\n See also management command\n \"\"\"\n\n def update(self, days=60):\n logging.info(\"Move results from profiles to results table\")\n zp_profiles = Profile.objects.all()\n count = zp_profiles.count()\n for i, profile in enumerate(zp_profiles):\n logging.info(f\"Adding results from profile: {profile.zp_id}\")\n logging.info(f\"total profile: {count}, remaining{count - i}\")\n if profile.profile:\n if not isinstance(profile.profile[0], dict):\n logging.warning(f\"not a valid profile: {profile.zp_id}\")\n continue\n for result in profile.profile:\n try:\n event_date = datetime.fromtimestamp(result[\"event_date\"]).date()\n obj, created = Results.objects.get_or_create(\n zp_id=int(result[\"zid\"]), zwid=profile.zp_id, defaults={\"event_date\": event_date}\n )\n if created or not obj.tid:\n logging.info(f\"Created new result: (zid, zwid): {result['zid']}, {result['zwid']}\")\n obj.team = result.get(\"tname\", \"\")\n obj.tid = result.get(\"tid\", \"\")\n obj.name = result.get(\"name\", \"\")\n obj.event_title = result.get(\"event_title\", \"\")\n obj.results = result\n obj.save()\n else:\n logging.info(f\"Result exisits: (zid, zwid): {result['zid']}, {result['zwid']}\")\n if event_date > date.today() - timedelta(days=days):\n logging.info(\n f\"Updating result within {days} days: (zid, zwid): {result['zid']}, {result['zwid']}\"\n )\n obj.team = result.get(\"tname\", \"\")\n obj.tid = result.get(\"tid\", \"\")\n obj.name = result.get(\"name\", \"\")\n obj.event_title = result.get(\"event_title\", \"\")\n obj.results = result\n obj.save()\n\n except TypeError as e:\n logging.error(f\"Failed to get or create result:\\n {e}\")\n data = {\n c: result.get(c, \"_\")\n for c in [\"event_date\", \"zid\", \"zwid\", \"tname\", \"tid\", \"name\", \"event_title\"]\n }\n logging.error(f\"result:\\n {data}\")\n except Exception as e:\n logging.error(f\"Failed to get or create result: {e}\")\n data = {\n c: result.get(c, \"_\")\n for c in [\"event_date\", \"zid\", \"zwid\", \"tname\", \"tid\", \"name\", \"event_title\"]\n }\n logging.error(f\"result:\\n {data}\")\n\n\nclass SetLastEventProfile:\n \"\"\"\n Some profiles are very inactive so we want to update the profile less often.\n There is a Profile model prperty but it is faster if we set a filed that is the num,ber of days since last event.\n Then we can update less often.\n \"\"\"\n\n def update(self):\n logging.info(\"Set days since last event\")\n zp_profiles = Profile.objects.all()\n for profile in zp_profiles:\n if profile.profile:\n try:\n profile.status[\"last_event\"] = (\n date.today() - datetime.fromtimestamp(profile.profile[0][\"event_date\"]).date()\n ).days\n profile.save()\n except Exception as e:\n logging.warning(f\"Failed to set last event: {e}\")\n continue\n\n\ndef sort_json_event_date():\n \"\"\"See also management command\"\"\"\n logging.info(\"Sort the profile json field\")\n profiles = Profile.objects.filter(status__sorted=False)\n for p in profiles:\n try:\n if not p.profile:\n continue\n if not isinstance(p.profile[0], dict):\n logging.warning(f\"not a valid profile: {p.zp_id}\")\n continue\n p.profile = sorted(p.profile, key=lambda x: int(x.get(\"event_date\", 0)), reverse=True)\n p.status[\"sorted\"] = True\n p.save()\n except Exception as e:\n logging.warning(f\" issues with: {p.zp_id}\\n {e}\")\n continue\n\n\nclass FetchResults:\n def __init__(self):\n self.zps = ZPSession()\n self.try_count = 0\n self.api_view = \"event_results_view\"\n self.api_zwift = \"event_results_zwift\"\n self.api_history = \"event_race_history\"\n self.model = Results\n\n def fetch(self):\n \"\"\"\n Create events from results\n \"\"\"\n logging.info(\"Create or update results\")\n # These are the results that need updating\n result_zp_ids = (\n Results.objects.filter(Q(zp_view__isnull=True) | Q(zp_zwift__isnull=True) | Q(race_history__isnull=True))\n .values_list(\"zp_id\", flat=True)\n .distinct()\n )\n # AllResults missing history data whiich has the date.\n all_results_zp_ids = (\n AllResults.objects.filter(race_history__isnull=False).values_list(\"zp_id\", flat=True).distinct()\n )\n try:\n history = self.zps.get_api(id=None, api=self.api_history)[self.api_history][\"data\"]\n history_zp_ids = {row[\"zid\"] for row in history}\n history = {row[\"zid\"]: row for row in history}\n except Exception as e:\n logging.error(f\"Failed to get history:\\n{e}\")\n raise\n # first lets get all the uknown events.from the history\n unknown_events = history_zp_ids - set(all_results_zp_ids)\n logging.info(f\"history_zp_ids, - all_results_zp_ids: {len(history_zp_ids)} - {len(all_results_zp_ids)}\")\n for zp_id in history_zp_ids:\n obj, created = AllResults.objects.get_or_create(zp_id=zp_id)\n obj.event_date = datetime.fromtimestamp(history[zp_id][\"tm\"]).date()\n obj.race_history = history[zp_id]\n obj.save()\n\n # Now we have made all unkown events we can get the results (view and zwift).\n # We need to get the results for all the events that are missing data.\n all_results_missing_data_zp_ids = AllResults.objects.filter(\n (Q(view__isnull=True) | Q(zwift__isnull=True)) & Q(event_date__gte=date.today() - timedelta(days=365))\n )\n errors = 0\n for count, result in enumerate(all_results_missing_data_zp_ids):\n if errors >= 4 or count >= 100:\n break\n if result.view is None:\n try:\n data_view = self.zps.get_api(id=result.zp_id, api=self.api_view)[self.api_view][\"data\"]\n result.view = data_view\n errors = 0\n except Exception as e:\n logging.warning(f\"Failed to get view data: {result.zp_id}\\n {e}\")\n errors += 1\n if result.zwift is None:\n try:\n data_zwift = self.zps.get_api(id=result.zp_id, api=self.api_zwift)[self.api_zwift][\"data\"]\n result.zwift = data_zwift\n errors = 0\n except Exception as e:\n logging.warning(f\"Failed to get zwift data: {result.zp_id}\\n {e}\")\n errors += 1\n result.save()\n time.sleep(5 + self.try_count * 5)\n","repo_name":"vincentdavis/VeloTeams","sub_path":"apps/zp/sync.py","file_name":"sync.py","file_ext":"py","file_size_in_byte":19158,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"}
+{"seq_id":"20216251639","text":"from typing import Sequence, Tuple, Mapping, Any\nfrom core import computation_graph\nimport time\n\n\n# TODO(@jakeval): Remove the debugging name attribute from the Node class.\n\n\ndef copy_node(node: computation_graph.Node) -> computation_graph.Node:\n \"\"\"Shallow copies a node without its edges.\n\n Args:\n node: The node to copy.\n\n Returns:\n A copied node identical to the original but without any edges.\n \"\"\"\n if isinstance(node, computation_graph.Artifact):\n new_node = computation_graph.Artifact(node._data)\n elif isinstance(node, computation_graph.Process):\n new_node = computation_graph.Process(node._transformation)\n new_node.returns_indices = node.returns_indices\n else:\n raise RuntimeError(\n f\"{node} is not an Artifact or a Process. Did you forget to call the optex decorator?\"\n )\n new_node.name = node.name\n return new_node\n\n\ndef get_composed_children(\n artifact: computation_graph.Artifact,\n) -> Sequence[Tuple[str, computation_graph.Process]]:\n \"\"\"Gets the innermost children of an Artifact, even if they are composed\n within another OptexProcess.\n\n Normally, the children of an Artifact are the processes that directly use\n that Artifact, even if those processes are just the compositions of other\n processes that use it. This function recurses over the composed processes\n using this Artifact, eliminating all composition functions and including\n only the leaf process children.\n\n Args:\n artifact: The artifact whose children to find.\n\n Returns:\n A list of (key, child) pairs where each child is a child node and each\n key is the role connecting that child to the original Artifact.\n \"\"\"\n composed_children = []\n top_level_children = set(zip(artifact.children, artifact.children_roles))\n while top_level_children:\n child, edge_key = top_level_children.pop()\n\n if not edge_key:\n continue\n\n if not child.child_processes:\n composed_children.append((edge_key, child))\n else:\n child_roles = []\n child_processes = []\n for child_process in child.child_processes:\n for child_parent, child_role in zip(\n child_process.parents, child_process.parent_roles\n ):\n if child_parent == artifact:\n child_roles.append(child_role)\n child_processes.append(child_process)\n continue\n top_level_children = top_level_children.union(\n zip(child_processes, child_roles)\n )\n\n return composed_children\n\n\ndef make_expanded_graph_copy(\n graph: computation_graph.Graph,\n) -> computation_graph.EdgeGraph:\n \"\"\"Makes a copy of the graph where all composition processes have been\n replaced with their subgraphs.\n\n Args:\n graph: The graph to copy.\n\n Returns:\n A new graph where composition processes have been replaced by the\n subgraphs they contain.\n \"\"\"\n explored_set = set()\n new_nodes = {}\n old_nodes = {}\n\n new_inputs: Sequence[computation_graph.Node] = []\n for old_input in graph.inputs:\n new_input = copy_node(old_input)\n new_inputs.append(new_input)\n new_nodes[old_input] = new_input\n old_nodes[new_input] = old_input\n\n open_set = set(new_inputs)\n while open_set:\n new_node = open_set.pop()\n if new_node in explored_set:\n continue\n new_node.agents.append(graph.name)\n old_node = old_nodes[new_node]\n\n if isinstance(old_node, computation_graph.Artifact):\n old_children = get_composed_children(old_node)\n elif isinstance(old_node, computation_graph.Process):\n old_children = zip(old_node.children_roles, old_node.children)\n\n for old_key, old_child in old_children:\n if old_child in new_nodes:\n new_child = new_nodes[old_child]\n else:\n new_child = copy_node(old_child)\n new_nodes[old_child] = new_child\n old_nodes[new_child] = old_child\n new_child.parents.append(new_node)\n new_child.parent_roles.append(old_key)\n new_node.children.append(new_child)\n new_node.children_roles.append(old_key)\n open_set.add(new_child)\n explored_set.add(new_node)\n\n # TODO(@jakeval): Why can graph.outputs not be a list? This is a bug.\n try:\n outputs = [new_nodes[old_output] for old_output in graph.outputs]\n except TypeError:\n outputs = [new_nodes[graph.outputs]]\n\n return computation_graph.EdgeGraph.from_output_artifacts(\n outputs, graph.name\n )\n\n\ndef topological_sort_graph(\n edges: Sequence[Tuple[computation_graph.Node, computation_graph.Node]]\n) -> Sequence[computation_graph.Node]:\n node_count = 0\n node_to_int = {}\n int_to_node = {}\n int_edges = []\n for _, parent_node, child_node in edges:\n if parent_node not in node_to_int:\n node_to_int[parent_node] = node_count\n int_to_node[node_count] = parent_node\n node_count += 1\n if child_node not in node_to_int:\n node_to_int[child_node] = node_count\n int_to_node[node_count] = child_node\n node_count += 1\n parent_int = node_to_int[parent_node]\n child_int = node_to_int[child_node]\n int_edges.append((parent_int, child_int))\n\n sorted_nodes = topologicalSort(node_count, int_edges)\n return [int_to_node[node_int] for node_int, _ in sorted_nodes]\n\n\ndef topologicalSort(totalVertices, prerequisites):\n ##make graph\n graph = {}\n for edge in prerequisites:\n if edge[0] not in graph:\n graph[edge[0]] = [edge[1]]\n else:\n graph[edge[0]].append(edge[1])\n\n n = totalVertices\n indegree = [0] * n\n answer = []\n for key in graph:\n for nbrs in graph[key]:\n indegree[nbrs] += 1\n queue = []\n for i in range(0, n):\n if indegree[i] == 0:\n queue.append((i, 0))\n\n while len(queue) > 0:\n rem = queue.pop(0)\n answer.append((rem[0], rem[1]))\n if rem[0] in graph:\n for child in graph.get(rem[0]):\n indegree[child] -= 1\n if indegree[child] == 0:\n queue.append((child, rem[1] + 1))\n\n if len(answer) != n:\n raise RuntimeError(\n \"Graph had cycles -- topological sort is impossible.\"\n )\n\n return answer\n\n\n# @TODO(@jakeval): This ignores roles\ndef get_merge_candidates(\n old_node: computation_graph.Node,\n old_to_new: Mapping[computation_graph.Node, computation_graph.Node],\n) -> Sequence[computation_graph.Node]:\n old_parents = old_node.parents\n new_parents = [old_to_new[old_parent] for old_parent in old_parents]\n\n children_sets = []\n for parent in new_parents:\n children_sets.append(set(parent.children))\n if children_sets:\n return children_sets[0].intersection(*children_sets[1:])\n else:\n return []\n\n\ndef can_merge(\n old_node: computation_graph.Node,\n new_node: computation_graph.Node,\n old_to_new: Mapping[computation_graph.Node, computation_graph.Node],\n):\n \"\"\"Returns true if the original node can be merged into an already-existing\n node in the new graph.\n\n Args:\n old_node: The node from the original graph.\n new_node: The merge candidate in the new graph.\n old_to_new: A mapping from original graph nodes to merged graph\n nodes.\"\"\"\n\n # Check that their types match\n if type(old_node) != type(new_node):\n return False\n\n # Check that process transformation functions match\n if isinstance(old_node, computation_graph.Process):\n if old_node._transformation != new_node._transformation:\n return False\n\n # Check that parents match\n old_to_new_parents = [\n (role, old_to_new[parent])\n for role, parent in zip(old_node.parent_roles, old_node.parents)\n ]\n if set(old_to_new_parents) == set(\n zip(new_node.parent_roles, new_node.parents)\n ):\n return True\n else:\n return False\n\n\ndef add_input_artifacts(inputs):\n old_to_new = {}\n for new_artifact in inputs:\n for graph_name, old_artifacts in inputs[new_artifact].items():\n for old_artifact in old_artifacts:\n new_artifact.agents.append(graph_name)\n old_to_new[old_artifact] = new_artifact\n\n return old_to_new\n\n\ndef merge_nodes(\n old_node: computation_graph.Node,\n merge_candidate: computation_graph.Node,\n old_to_new: Mapping[computation_graph.Node, computation_graph.Node],\n) -> Mapping[computation_graph.Node, computation_graph.Node]:\n old_to_new[old_node] = merge_candidate\n merge_candidate.agents += old_node.agents\n return old_to_new\n\n\ndef add_new_node(\n old_node: computation_graph.Node,\n old_to_new: Mapping[computation_graph.Node, computation_graph.Node],\n) -> Tuple[\n computation_graph.Node,\n Mapping[computation_graph.Node, computation_graph.Node],\n Mapping[computation_graph.Node, computation_graph.Node],\n]:\n new_node = copy_node(old_node)\n new_node.agents = old_node.agents\n old_to_new[old_node] = new_node\n return new_node, old_to_new\n\n\ndef add_edges(\n old_node: computation_graph.Node,\n new_node: computation_graph.Node,\n old_to_new: Mapping[computation_graph.Node, computation_graph.Node],\n) -> None:\n for role, old_parent in zip(old_node.parent_roles, old_node.parents):\n new_parent = old_to_new[old_parent]\n new_node.parents.append(new_parent)\n new_node.parent_roles.append(role)\n new_parent.children.append(new_node)\n new_parent.children_roles.append(role)\n\n\ndef add_nodes(\n old_nodes: Sequence[computation_graph.Node],\n old_to_new: Mapping[computation_graph.Node, computation_graph.Node],\n) -> Tuple[\n Mapping[computation_graph.Node, computation_graph.Node],\n Mapping[computation_graph.Node, computation_graph.Node],\n]:\n for old_node in old_nodes:\n if old_node in old_to_new: # the node was previously added\n continue\n\n merge_candidates = get_merge_candidates(old_node, old_to_new)\n\n # Try to merge the node in to the new graph.\n did_merge = False\n for merge_candidate in merge_candidates:\n if can_merge(old_node, merge_candidate, old_to_new):\n old_to_new = merge_nodes(old_node, merge_candidate, old_to_new)\n did_merge = True\n break\n\n # If it can't merge, add a new node to the new graph.\n if not did_merge:\n new_node, old_to_new = add_new_node(old_node, old_to_new)\n add_edges(old_node, new_node, old_to_new)\n\n return old_to_new\n\n\ndef merge_graphs(\n graphs: Sequence[computation_graph.EdgeGraph],\n inputs: Mapping[\n computation_graph.Artifact,\n Mapping[str, Sequence[computation_graph.Artifact]],\n ],\n name: str,\n):\n \"\"\" \"\"\"\n graph_nodes = dict(\n [(graph.name, topological_sort_graph(graph.edges)) for graph in graphs]\n )\n\n inputs_copy = {}\n for node, v in inputs.items():\n node_copy = copy_node(node)\n inputs_copy[node_copy] = v\n\n old_to_new = add_input_artifacts(inputs_copy)\n\n for graph_name, nodes in graph_nodes.items():\n old_to_new = add_nodes(nodes, old_to_new)\n\n inputs = {}\n all_inputs = set()\n for graph in graphs:\n new_inputs = [old_to_new[input] for input in graph.inputs]\n inputs[graph.name] = new_inputs\n all_inputs = all_inputs.union(new_inputs)\n inputs[\"all_inputs\"] = list(all_inputs)\n\n outputs = {}\n all_outputs = set()\n for graph in graphs:\n new_outputs = [old_to_new[output] for output in graph.outputs]\n outputs[graph.name] = new_outputs\n all_outputs = all_outputs.union(new_outputs)\n outputs[\"all_outputs\"] = list(all_outputs)\n\n return (\n computation_graph.EdgeGraph.from_output_artifacts(\n list(all_outputs), name=name\n ),\n inputs,\n outputs,\n )\n\n\ndef get_inputs(call_list):\n \"\"\"\n [(graph, transformation, role, value)]\n\n {\n merged_artifact: {\n 'graph_name': [input_artifacts]\n }\n }\n \"\"\"\n inputs = {}\n\n for graph, transformation, role, artifact in call_list:\n input_artifact = None\n for input in graph.inputs:\n for child_role, child in zip(input.children_roles, input.children):\n if (child._transformation == transformation.__wrapped__) and (\n child_role == role\n ):\n input_artifact = input\n break\n if input_artifact is not None:\n break\n if not input_artifact:\n raise RuntimeError(\n f\"Can't find the corresponding process for {transformation.__wrapped__}, {role}\"\n )\n\n if artifact in inputs:\n if graph.name in inputs[artifact]:\n inputs[artifact][graph.name].append(input_artifact)\n else:\n inputs[artifact][graph.name] = [input_artifact]\n else:\n inputs[artifact] = {graph.name: [input_artifact]}\n return inputs\n\n\n# TODO(@jakeval): Because values are cached per-artifact and not per-process,\n# processes which return multiple values must be recomputed.\ndef compute_artifact_ancestors(artifact, artifact_values):\n \"\"\"Recursively computes the value of an artifact and its ancestors given\n sufficient values for its ancestors.\n\n This is used to lazily compute an artifact given values for its root\n ancestors. The function will recursively evaluate all of the necessary\n ancestor values starting from the root ancestors until it computes the\n target artifact value.\n\n Args:\n artifact: The artifact whose value and ancestors to compute.\n artifact_values: The values of the artifact's root ancestors.\n\n Returns:\n The values of the artifact and all its ancestors.\n \"\"\"\n # artifacts only have 1 parent\n process = artifact.parents[0]\n process_args = {}\n for arg_name, parent_artifact in zip(\n process.parent_roles, process.parents\n ):\n if parent_artifact not in artifact_values:\n artifact_values = compute_artifact_ancestors(\n parent_artifact, artifact_values\n )\n process_args[arg_name] = artifact_values[parent_artifact]\n if process.returns_indices:\n index = process.returns_indices[artifact.parent_roles[0]]\n start_time = time.time()\n artifact_values[artifact] = process._transformation(**process_args)[\n index\n ]\n process.execution_time = time.time() - start_time\n else:\n start_time = time.time()\n artifact_values[artifact] = process._transformation(**process_args)\n process.execution_time = time.time() - start_time\n return artifact_values\n\n\ndef execute_graph(\n graph: computation_graph.Graph,\n inputs: Mapping[computation_graph.Artifact, Any],\n) -> Mapping[computation_graph.Artifact, Any]:\n \"\"\"Executes a statically-generated computation graph on some inputs.\n\n graph: The graph to execute.\n inputs: A mapping from input Artifact to the value it should take on in\n the computation.\n\n Returns:\n A mapping from output Artifact to the value it computes to.\n \"\"\"\n output_values = {}\n artifact_values = inputs.copy()\n\n for output in graph.outputs:\n artifact_values = compute_artifact_ancestors(output, artifact_values)\n output_values[output] = artifact_values[output]\n\n return output_values\n\n\ndef get_merged_inputs(graph_inputs, call_list):\n \"\"\"\n Given:\n {\n value: (graph_name, transformation, role)\n }\n\n Return:\n {\n value: merged_graph_input_artifact\n }\n \"\"\"\n inputs = {}\n\n for value, (graph_name, transformation, role) in call_list.items():\n input_artifact = None\n for input in graph_inputs[graph_name]:\n for child_role, child in zip(input.children_roles, input.children):\n if (child._transformation == transformation.__wrapped__) and (\n child_role == role\n ):\n input_artifact = input\n break\n if input_artifact is not None:\n break\n if not input_artifact:\n raise RuntimeError(\n f\"Can't find the corresponding process for {transformation.__wrapped__}, {role}\"\n )\n inputs[input_artifact] = value\n\n return inputs\n\n\ndef execute_merged_graph(\n graph: computation_graph.Graph,\n inputs: Mapping[computation_graph.Artifact, Any],\n) -> Mapping[computation_graph.Artifact, Any]:\n \"\"\"Executes a statically-generated computation graph on some inputs.\n\n Given:\n\n input_artifact, value\n\n input_artifact:\n - graph, role,\n -\n\n\n graph: The graph to execute.\n inputs: A mapping from input Artifact to the value it should take on in\n the computation.\n\n Returns:\n A mapping from output Artifact to the value it computes to.\n \"\"\"\n output_values = {}\n artifact_values = inputs.copy()\n\n for output in graph.outputs:\n artifact_values = compute_artifact_ancestors(output, artifact_values)\n output_values[output] = artifact_values[output]\n\n return output_values\n","repo_name":"jakeval/optex","sub_path":"core/graph_merge.py","file_name":"graph_merge.py","file_ext":"py","file_size_in_byte":17552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"}
+{"seq_id":"3667336498","text":"#!/usr/bin/python\n\nimport java\nimport sys\n\nfrom javax.swing import *\nfrom java.awt import *\nfrom java.lang import *\n\nclass BoxLayoutDemo(java.lang.Runnable):\n def run(self):\n frame = JFrame(\n 'BoxLayoutDemo',\n defaultCloseOperation = JFrame.EXIT_ON_CLOSE\n )\n \n self.addTabs(frame.getContentPane())\n frame.size = (300, 175)\n frame.visible = 1\n\n def addTabs(self, container):\n align = [\n ['Left', Component.LEFT_ALIGNMENT ],\n ['Center', Component.CENTER_ALIGNMENT ],\n ['Right', Component.RIGHT_ALIGNMENT ],\n ]\n\n names = '1,2,3 being the third number'.split(',')\n tabs = JTabbedPane()\n for aName, aConst in align:\n tab = JPanel()\n tab.setLayout(BoxLayout(tab, BoxLayout.Y_AXIS))\n for name in names:\n tab.add( JButton(name, alignmentX = aConst))\n\n tabs.addTab(aName, tab)\n\n container.add(tabs)\n\nif __name__ == '__main__':\n EventQueue.invokeLater(BoxLayoutDemo())\n\n\n \n \n\n","repo_name":"ToroLiu/TTPrac_SwingForJython","sub_path":"scripts/ch5_boxLayout.py","file_name":"ch5_boxLayout.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"}
+{"seq_id":"39661935977","text":"import time\nimport RPi.GPIO as GPIO\n\nclass Light(object):\n def __init__(self,x):\n self.number = x\n GPIO.setup(x,GPIO.OUT)\n self.pwmid = GPIO.PWM(x,1000)\n\ndef main():\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n lights = [18,23,24]\n brightness = 0 \n avgOf = 2\n leds = []\n # Sets up the lights to be turned on and off\n for count in range(len(lights)):\n leds.append(Light(lights[count]))\n for count in range(len(leds)):\n leds[count].pwmid.ChangeDutyCycle(brightness)\n stuff = []\n while True:\n dist = reading(0)\n stuff.append(dist)\n if(len(stuff)!=0 and len(stuff)%avgOf==0):\n brightness = blinkLights(stuff[-(avgOf):],leds,brightness,avgOf)\n print(brightness)\n\ndef reading(sensor):\n if(sensor==0):\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(12,GPIO.OUT)\n GPIO.setup(25,GPIO.IN)\n time.sleep(0.3)\n GPIO.output(12, GPIO.LOW)\n GPIO.output(12, True)\n time.sleep(0.00001)\n GPIO.output(12, False)\n while GPIO.input(25) == 0:\n signaloff = time.time()\n while GPIO.input(25) == 1:\n signalon = time.time()\n timepassed = signalon - signaloff\n distance = timepassed * 17000\n return distance\n GPIO.cleanup()\n else:\n print (\"Incorrect usonic() function varible.\")\n\ndef blinkLights(stuff, a,bri,av):\n total = 0\n shit = 0\n for x in range(av):\n total+=stuff[x]\n average=total/av\n print(average)\n b=bri\n \n if(average<100):\n for l in range(100):\n if(b != 100):\n for x in range(len(a)):\n a[x].pwmid.start(b)\n time.sleep(.002)\n b+=1\n if(average>100 and b!=0):\n for l in range(100):\n for x in range(len(a)):\n a[x].pwmid.start(b)\n time.sleep(.002)\n b=b-1\n if(b==0):\n for x in range(len(a)):\n a[x].pwmid.stop()\n return b\n\ntry:\n main()\nexcept KeyboardInterrupt:\n GPIO.cleanup()\n","repo_name":"UWCommuniTree/Motion-Lights","sub_path":"comunitree.py","file_name":"comunitree.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"}
+{"seq_id":"27472128429","text":"\n\"\"\" Evaluation code based on VOC protocol\n\nOriginal author: Ellis Brown, Max deGroot for VOC dataset\nhttps://github.com/amdegroot/ssd.pytorch\n\nUpdated by Gurkirt Singh for ucf101-24 dataset\n\n\"\"\"\n\nimport os\nimport numpy as np\n\n\ndef voc_ap(rec, prec, use_07_metric=False):\n \"\"\"\n VOC評価(ROADから抜粋)\n ap = voc_ap(rec, prec, [use_07_metric])\n Compute VOC AP given precision and recall.\n If use_07_metric is true, uses the\n VOC 07 11 point method (default:False).\n \"\"\"\n # print('voc_ap() - use_07_metric:=' + str(use_07_metric))\n if use_07_metric:\n # 11 point metric\n ap = 0.\n for t in np.arange(0., 1.1, 0.1):\n if np.sum(rec >= t) == 0:\n p = 0\n else:\n p = np.max(prec[rec >= t])\n ap = ap + p / 11.\n else:\n # correct AP calculation\n # first append sentinel values at the end\n mrec = np.concatenate(([0.], rec, [1.]))\n mpre = np.concatenate(([0.], prec, [0.]))\n\n # compute the precision envelope\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n\n # to calculate area under PR curve, look for points\n # where X axis (recall) changes value\n i = np.where(mrec[1:] != mrec[:-1])[0]\n\n # and sum (\\Delta recall) * prec\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap\n\n\ndef compute_iou(cls_gt_boxes, box):\n \"\"\"\n IoUマッチング(ROADから抜粋)\n \"\"\"\n ious = np.zeros(cls_gt_boxes.shape[0])\n\n for m in range(ious.shape[0]):\n gtbox = cls_gt_boxes[m]\n\n xmin = max(gtbox[0], box[0])\n ymin = max(gtbox[1], box[1])\n xmax = min(gtbox[2], box[2])\n ymax = min(gtbox[3], box[3])\n iw = np.maximum(xmax - xmin, 0.)\n ih = np.maximum(ymax - ymin, 0.)\n if iw > 0 and ih > 0:\n intsc = iw*ih\n else:\n intsc = 0.0\n # print (intsc)\n union = (gtbox[2] - gtbox[0]) * (gtbox[3] - gtbox[1]) + \\\n (box[2] - box[0]) * (box[3] - box[1]) - intsc\n ious[m] = intsc/union\n\n return ious\n\n\ndef generate_det_boxes(det_contexts, classes):\n \"\"\"\n 検出データを処理しやすいように持ち直し\n \"\"\"\n num_frames = len(det_contexts)\n dataset = [[] for _ in range(num_frames)]\n for frame_idx, context in enumerate(det_contexts):\n for det in context['context']:\n action_id = det['action']['model_id']\n score = det['action']['score']\n box = det['location']\n bbox = np.asarray(box + score, dtype=np.float32)\n dataset[frame_idx].append({\n 'bbox': bbox,\n 'action_id': action_id\n })\n return dataset\n\n\ndef evaluate_actions(gt, det_contexts, classes, iou_thresh=0.5):\n \"\"\"\n 動画単位の評価結果を取得する\n \"\"\"\n num_positives = 0 # 正解の数\n num_frames = len(gt)\n num_classes = len(classes)\n det_boxes = generate_det_boxes(det_contexts, classes)\n\n ap_all = np.array([], dtype=np.float32)\n scores_of_cls = np.zeros((num_classes, num_frames * 220)) # 行動認識結果スコアログ\n istps = np.zeros((num_classes, num_frames * 220)) # True-Positiveログ\n det_counts_of_cls = np.zeros(num_classes, dtype=np.int) # 行動ごとの検出結果\n num_positives_of_cls = np.zeros(num_classes, dtype=np.int) # 行動ごとの正例数\n\n for nf in range(num_frames): # フレーム単位\n gt_data = gt[nf] # 当該フレームの正解データ\n boxes = sorted(det_boxes[nf], key=lambda x: -x['bbox'][-4]) # 当該フレームの検出結果をスコア順に並べる\n gt_actions_of_boxes = [x['action_id'] for x in gt_data] # 当該フレームの、各ボックスに割り当てられている正解ラベルセット(e.g. [[1, 2], [1], ...])\n gt_boxes = np.asarray([x['box'] for x in gt_data], dtype=np.float32) # 当該フレームのバウンディングボックス\n\n # 正例カウント\n for gt_ids in gt_actions_of_boxes:\n gt_indices = [classes[x] for x in gt_ids if x != -1] # exclude No Action\n num_positives_of_cls[gt_indices] += 1\n\n # 検出結果評価\n if len(boxes) > 0:\n for box_data in boxes: # 検出結果単位で繰り返し\n box = box_data['bbox'][:-4] # バウンディング\n score = box_data['bbox'][-4:] # スコア\n act_ids = box_data['action_id'] # 行動ID\n\n # 全てNoActionの場合は一個のみ。\n # それ以外でNoActionが含まれる場合は、除外する(他の行動が含まれているので必要ない)\n if act_ids == [-1, -1, -1, -1]:\n act_ids = [-1]\n score = [score[0]]\n else:\n act_ids = [i for i in act_ids if i != -1]\n score = [score[i]\n for i, act_id in enumerate(act_ids)\n if act_id != -1]\n\n # 更新する対象の行動セット(デフォルトは検出した行動のID)\n target_dict = {idx: classes[i] if i != -1 else -1\n for idx, i in enumerate(act_ids)} # 行動IDに紐づくインデックス\n\n is_positive_dict = {\n idx: False for idx, _ in enumerate(act_ids)}\n\n # 正解ラベルとマッチング\n if gt_boxes.shape[0] > 0: # マッチしていない正解データがある正解データがある\n iou = compute_iou(gt_boxes, box) # IoU計算\n maxid = np.argmax(iou) # 最もマッチしたバウンディングボックスを取り出す\n gt_ids = gt_actions_of_boxes[maxid]\n\n if iou[maxid] >= iou_thresh:\n # 正解データがNoActionならdetection,gt共に評価対象外とする\n if - 1 in gt_ids:\n gt_boxes = np.delete(gt_boxes, maxid, 0)\n del gt_actions_of_boxes[maxid]\n continue\n\n for top_i, act_id in enumerate(act_ids):\n is_positive_dict[top_i] = act_id in gt_ids\n\n # 検出の行動数は、okutama-action datasetの最大同時ラベル数に合わせて、\n # 常に4つの行動ラベルを出力している。\n # そのため正解の行動数よりも検出の行動数のほうが多くなるケースが出てくる。\n # それをdet_counts_of_clsに含めるのはよろしくないので、この場合は\n # 検出の行動数を正解の行動数と一致するよう削る\n # ではどれを削るか?\n # 正解とマッチしてないもののうち、スコアの低いものから削る。\n if len(gt_ids) < len(is_positive_dict):\n fp_list = [\n act_id for act_id, is_positive\n in list(is_positive_dict.items())\n if is_positive is False\n ]\n while (len(gt_ids) != len(is_positive_dict)):\n min_scored_fp_act_id = fp_list.pop(-1)\n del is_positive_dict[min_scored_fp_act_id]\n del target_dict[min_scored_fp_act_id]\n\n # 正解データからマッチしたデータを削除\n gt_boxes = np.delete(gt_boxes, maxid, 0)\n del gt_actions_of_boxes[maxid]\n\n # istp、score、det_count更新\n # 正解した場合: 正解に設定されているラベル全てを更新\n # 外した場合: 検出したものだけ検出結果更新\n for top_i, class_value in target_dict.items():\n if class_value == -1:\n continue\n det_count = det_counts_of_cls[class_value] # 当該行動の現在の検出件数\n scores_of_cls[class_value, det_count] = score[top_i]\n if is_positive_dict[top_i] is True:\n istps[class_value, det_count] = 1\n det_counts_of_cls[class_value] += 1\n\n # 評価実行\n eval_results = {}\n for cls_id, cls_ind in classes.items():\n det_count = det_counts_of_cls[cls_ind]\n num_positives = num_positives_of_cls[cls_ind] if num_positives_of_cls[cls_ind] > 0 else 1\n if num_positives_of_cls[cls_ind] == 0:\n print('no gt of action', cls_id)\n continue\n \n scores = scores_of_cls[cls_ind, :det_count]\n istp = istps[cls_ind, :det_count]\n argsort_scores = np.argsort(-scores)\n istp = istp[argsort_scores]\n fp = np.cumsum(istp == 0) # fp[-1] ==> 誤検出(行動が違う)\n tp = np.cumsum(istp == 1) # tp[-1] ==> 正解を正解とした\n fp = fp.astype(np.float64)\n tp = tp.astype(np.float64)\n recall = tp / float(num_positives) # compute recall\n # compute precision\n precision = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n cls_ap = voc_ap(recall, precision)\n eval_results[cls_id] = {\n 'num_positives': num_positives_of_cls[cls_ind],\n 'fp': fp,\n 'tp': tp,\n 'recall': recall,\n 'precision': precision,\n 'istp': istps[cls_ind, :det_count],\n 'scores': scores_of_cls[cls_ind, :det_count],\n 'cls_ap': cls_ap\n }\n ap_all = np.append(ap_all, cls_ap)\n return np.mean(ap_all), eval_results\n\n\ndef all_video_eval(classes, eval_results_of_file):\n num_classes = len(classes)\n scores_of_cls = [[] for _ in range(num_classes)]\n istp_of_cls = [[] for _ in range(num_classes)]\n ap_all = np.zeros(num_classes, dtype=np.float32)\n num_positives_of_cls = np.zeros(num_classes, dtype=np.int)\n for _, (_, eval_results) in eval_results_of_file.items():\n for cls_id, eval_data in eval_results.items():\n cls_idx = classes[cls_id]\n scores = eval_data['scores']\n istp = eval_data['istp']\n num_positives = eval_data['num_positives']\n num_positives_of_cls[cls_idx] += num_positives\n scores_of_cls[cls_idx].append(scores)\n istp_of_cls[cls_idx].append(istp)\n results = {}\n for cls_id, cls_ind in classes.items():\n scores = np.concatenate(scores_of_cls[cls_ind])\n istp = np.concatenate(istp_of_cls[cls_ind])\n num_positives = num_positives_of_cls[cls_ind]\n if num_positives < 1:\n num_positives = 1\n argsort_scores = np.argsort(-scores)\n istp = istp[argsort_scores]\n fp = np.cumsum(istp == 0) # fp[-1] ==> 誤検出(行動が違う)\n tp = np.cumsum(istp == 1) # tp[-1] ==> 正解を正解とした\n fp = fp.astype(np.float64)\n tp = tp.astype(np.float64)\n recall = tp / float(num_positives) # compute recall\n # compute precision\n precision = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n cls_ap = voc_ap(recall, precision)\n ap_all[cls_ind] = cls_ap\n results[cls_id] = {\n 'num_positives': num_positives_of_cls[cls_ind],\n 'fp': fp,\n 'tp': tp,\n 'recall': recall,\n 'precision': precision,\n 'istp': istp,\n 'scores': scores,\n 'cls_ap': cls_ap\n }\n return np.mean(ap_all), results\n","repo_name":"hitottiez/deepsort","sub_path":"tools/eval_utils/poc2_act_eval.py","file_name":"poc2_act_eval.py","file_ext":"py","file_size_in_byte":11896,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"}
+{"seq_id":"25496804941","text":"import redis\n#连接\nr = redis.StrictRedis(host=\"crawler-platform02-redis-service.int.yidian-inc.com\",port=6379)\n\n#方法1:根据数据类型的不同,调用响应的方法\n#设置新增\nr.set(\"p1\",\"good\")\n#取值\nprint(r.get(\"p1\"))\n\n#方法2:pipline\n#缓冲多条命令,然后一次执行,减少服务器--客户端的TCP数据包\n# pipe = r.pipeline()\n# pipe.set(\"p2\",\"nice\")\n# pipe.set(\"p3\",\"cool\")\n# #保存至redis\n# pipe.execute()\n\nclass SunwenboRedis():\n def __init__(self,host=\"10.138.11.201\",port=6379):\n self.__redis = redis.StrictRedis(host=host,port=port,db=2)\n def set(self,key,value):\n self.__redis.set(key,value)\n def get(self,key):\n if self.__redis.exists(key):\n return self.__redis.get(key)\n else:\n return \"\"\n def delete(self,key):\n self.__redis.delete(key)\n def all(self):\n self.__redis.keys()\n return self.__redis.keys()\n\nabc = SunwenboRedis()\nabc.set(\"bbb\",\"222\")\nabc.set(\"abc\",\"222\")\nprint(abc.get(\"bbb\"))\n\nabc.delete(\"bbb\")\nprint(\"#########\")\n\nprint(abc.get(\"bbb\"))\nprint(abc.all())","repo_name":"sunwenbo/python","sub_path":"10.MongoDB 和Redis/4.Redis与python交互.py","file_name":"4.Redis与python交互.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"}
+{"seq_id":"16940046667","text":"\"\"\"Get images URLS from the OSRS Wiki\"\"\"\n\nimport re\nimport hashlib\n\n###\n## Append _detail before .png for high quality image\n###\n\ndef wiki_image_url(filename):\n \"\"\"\n Get image URL from OSRS Wiki\n :param filename: Image filename found in mapping\n :return: Image URL\n \"\"\"\n filename = re.sub(' ', '_', filename)\n md5hash = hashlib.md5(filename.encode()).hexdigest()\n filename = re.sub('\\\\(', '%28', filename)\n filename = re.sub('\\\\)', '%29', filename)\n return f'https://oldschool.runescape.wiki/images/{md5hash[0:1]}/{md5hash[0:2]}/{filename}?7263b'\n","repo_name":"EliasTalcott/osrs-flipping","sub_path":"flaskr/images.py","file_name":"images.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"}
+{"seq_id":"11953601828","text":"import time\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nISOTIMEFORMAT='%Y%m%d'\ndef sentemail():\n caodate=str(time.strftime(ISOTIMEFORMAT, time.localtime()))\n host = 'smtp.163.com'\n # 设置发件服务器地址\n port = 465\n # 设置发件服务器端口号。注意,这里有SSL和非SSL两种形式\n sender = 'zq476668643@163.com'\n # 设置发件邮箱,一定要自己注册的邮箱\n pwd = 'zq242333'\n # 设置发件邮箱的密码,163邮箱的授权码,等会登陆会用到\n receiver0 = '476668643@qq.com'\n # 设置邮件接收人,可以是扣扣邮箱\n receiver1 = '2652842878@qq.com'\n body = '
'+caodate+'
zhongfs
'\n # 设置邮件正文,这里是支持HTML的\n msg = MIMEText(body, 'html')\n # 设置正文为符合邮件格式的HTML内容\n message = MIMEMultipart()\n message['subject'] = caodate+'下载附件通知'\n # 设置邮件标题\n message['from'] = sender\n # 设置发送人\n message['to'] = receiver0\n # 设置接收人\n message.attach(msg)\n # filename='xfurlwett-'+caodate+'.txt'\n filename = '1.txt'\n # 构造附件1,传送当前目录下的 filename 文件\n att1 = MIMEText(open(filename, 'rb').read(), 'base64', 'utf-8')\n att1[\"Content-Type\"] = 'application/octet-stream'\n # 这里的filename可以任意写,写什么名字,邮件中显示什么名字\n att1[\"Content-Disposition\"] = 'attachment; filename=\"'+filename+'\"'\n message.attach(att1)\n try:\n s = smtplib.SMTP_SSL(host, port) # 注意!如果是使用SSL端口,这里就要改为SMTP_SSL\n s.login(sender, pwd) # 登陆邮箱\n s.sendmail(sender, receiver0, message.as_string())# 发送邮件!\n #s.sendmail(sender, receiver1, msg.as_string())\n print ('Done.sent email success')\n except smtplib.SMTPException:\n print ('Error.sent email fail')\nif __name__ == '__main__':\n sentemail()","repo_name":"github3332422/case","sub_path":"email_send/mail_163_annex.py","file_name":"mail_163_annex.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"}
+{"seq_id":"20524019838","text":"import threading\nimport os\nimport glob\nimport sqlite3\nimport requests\nimport requests.utils\nimport pickle\nimport re\nimport html.parser\nimport cgi\n\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime, timedelta\nfrom socket import error as SocketError\nimport errno\nimport tart\n\nfrom readeryc import HNapi, readerutils\n\n\nclass App(tart.Application):\n\n \"\"\" The class that directly communicates with Tart and Cascades\n \"\"\"\n\n cache = [] # {'ident': None} # Keep track of current request\n SETTINGS_FILE = readerutils.SETTINGS_FILE\n COOKIE = readerutils.COOKIE\n HEADERS = readerutils.HEADERS\n\n def __init__(self):\n super().__init__(debug=False) # set True for some extra debug output\n self.settings = {\n 'openInBrowser': False,\n 'readerMode': False,\n 'loggedIn': False,\n 'username': '',\n 'legacyFetch': False,\n 'darkTheme': False\n }\n self.restore_data(self.settings, self.SETTINGS_FILE)\n self.sess = HNapi(self.settings['username'])\n print(\"restored: \", self.settings)\n\n def onUiReady(self):\n print(\"UI READY!!\")\n tart.send('restoreSettings', **self.settings)\n self.onRequestPage(\"news\", \"news\")\n # self.onRequestPage(\"ask\", \"ask\")\n # self.onRequestPage(\"newest\", \"newest\")\n\n def onSaveSettings(self, settings):\n self.settings.update(settings)\n self.save_data(self.settings, self.SETTINGS_FILE)\n\n# Handling requests\n def onRequestPage(self, source, sentBy, askPost=\"false\", deleteComments=\"false\", startIndex=0, author=\"\"):\n \"\"\" This is really ugly, but it handles all url requests with threading,\n it also prevents the same request from being made twice\n \"\"\"\n\n entryExists = False\n position = 0\n currReq = {'ident': (datetime.now(), source)}\n src = \"\"\n for i in self.cache:\n position = position + 1\n src = i['ident'][1]\n if src == source:\n print(\"Request in progress!!\")\n entryExists = True\n ts = i['ident'][0]\n # If the request is old, make the new one anyway\n if datetime.now() - ts > timedelta(minutes=5):\n break\n return # Otherwise quit\n\n print(\"Requests pending: \", len(self.cache))\n if len(self.cache) == 0:\n self.cache.append(currReq)\n entryExists = True\n\n if entryExists != True:\n print(\"Request doesn't exist\")\n # If we have 5 reqs going, remove the first one before adding\n if len(self.cache) > 5:\n self.cache.pop(0)\n self.cache.append(currReq) # Append it to cache\n t = threading.Thread(target=self.parseRequest, args=(\n source, sentBy, startIndex, askPost, author))\n\n else: # If the request does exist\n if len(self.cache) == 1: # If it is the only one we make the request (first request added)\n print(\"Only request?\")\n t = threading.Thread(target=self.parseRequest, args=(\n source, sentBy, startIndex, askPost, author))\n else: # If there are multiple requests\n print(\"Checking request\")\n if src == source:\n print(\"Request is the same!\")\n # Check if cache was made 5 mins ago\n if datetime.now() - ts > timedelta(minutes=5):\n print(\"Old enough, request OK\")\n t = threading.Thread(target=self.parseRequest, args=(\n source, sentBy, startIndex, askPost, author))\n else:\n return\n t.daemon = True\n t.start()\n\n def parseRequest(self, source, sentBy, startIndex, askPost, author):\n print(\"Parsing request for: \" + sentBy)\n if (sentBy in ['news', 'ask', 'newest', 'show']):\n self.storyRoutine(source, sentBy)\n elif (sentBy == 'commentPage'):\n self.commentsRoutine(source, askPost)\n elif (sentBy == 'searchPage'):\n self.searchRoutine(startIndex, [source, author])\n else:\n print(\"Error getting page...\")\n return\n print(\"request complete! Removing...\")\n self.cache.pop(-1)\n\n# GET functions\n def storyRoutine(self, source, sentBy):\n # try:\n stories, moreLink = self.sess.getStories(source)\n # except requests.exceptions.ConnectionError:\n # tart.send('{0}ListError'.format(sentBy),\n # text=\"Error getting stories\\nCheck your connection and try again!\")\n # return\n # except IndexError:\n # print(\"Expired link?\")\n # tart.send('{0}ListError'.format(sentBy),\n # text=\"Link expired\\nPlease refresh the page\")\n # return\n print(stories)\n for story in stories:\n tart.send('add{0}Stories'.format(sentBy),\n story=story, moreLink=moreLink, sentTo=sentBy)\n if (source == 'news'):\n tart.send('addCoverStories', stories=stories)\n\n def commentsRoutine(self, source, askPost):\n print(\"source sent:\" + source)\n\n try:\n text, comments = self.sess.getComments(\n source, askPost, self.settings['legacyFetch'])\n if (text != \"\"):\n text = readerutils.textReplace(text)\n\n tart.send('addText', text=text, hnid=source)\n if (comments == []):\n tart.send(\n 'commentError', text=\"No comments, check back later!\", hnid=source)\n for comment in comments:\n comment['text'] = readerutils.textReplace(comment['text'])\n comment['barColour'] = \"#\" + \\\n readerutils.getColour(comment[\"indent\"] // 40)\n tart.send('addComments', comment=comment, hnid=source)\n\n except requests.exceptions.ConnectionError:\n print(\"ERROR GETTING COMMENTS\")\n tart.send('addText', text='', hnid=source)\n tart.send(\n 'commentError', text=\"Error getting comments\\nCheck your connection and try again!\", hnid=source)\n except SocketError:\n print(\"ERROR GETTING COMMENTS\")\n tart.send('addText', text='', hnid=source)\n tart.send(\n 'commentError', text=\"Error getting comments\\nCheck your connection and try again!\", hnid=source)\n\n def searchRoutine(self, startIndex, source):\n print(\"Searching for: \" + str(source))\n try:\n result = self.sess.getSearchStories(startIndex, source)\n if result == []:\n tart.send(\n 'searchError', text=\"No results found!\")\n return\n for res in result:\n tart.send('addSearchStories', story=res)\n except requests.exceptions.ConnectionError:\n tart.send(\n 'searchError', text=\"Error getting stories\\nCheck your connection and try again!\")\n except SocketError:\n tart.send(\n 'searchError', text=\"Error getting stories\\nCheck your connection and try again!\")\n\n# POST functions\n def onRequestLogin(self, username, password):\n result = self.sess.login(username, password)\n tart.send('loginResult', result=result)\n\n def onGetProfile(self, username):\n info = self.sess.getProfile(username)\n print(info)\n if (info == False):\n os.remove(self.COOKIE)\n tart.send(\n 'logoutResult', text=\"Unable to get profile, forcing logout...\")\n return\n tart.send('profileRetrieved', email=info[3], about=info[2])\n\n def onSaveProfile(self, username, email, about):\n res = False\n try:\n res = self.sess.postProfile(username, email, about)\n except:\n tart.send(\n 'profileSaved', text=\"Unable to update profile, check connection and try again\")\n if (res == True):\n tart.send('profileSaved', text=\"Profile updated!\")\n else:\n tart.send(\n 'profileSaved', text=\"Unable to update profile, check connection and try again\")\n\n def onSendComment(self, source, text):\n res = self.sess.postComment(source, text)\n text = text.replace('*', '')\n if (res == True):\n tart.send('commentPosted', result=\"true\", comment=text)\n return\n tart.send('commentPosted', result=\"false\", comment=\"\")\n\n def onPostStory(self, title, url, text):\n res = self.sess.postStory(title, url, text)\n if (res == True):\n tart.send('storyPosted', result='true')\n else:\n tart.send('storyPosted', result='false')\n\n def onLogout(self):\n self.sess.logout()\n try:\n os.remove(self.COOKIE)\n except OSError:\n tart.send('logoutResult', text=\"logged out successfully!\")\n\n tart.send('logoutResult', text=\"logged out successfully!\")\n\n# Favouriting functions\n def onSaveArticle(self, article):\n conn = sqlite3.connect(\"data/favourites.db\")\n print(article)\n article = tuple(article)\n cursor = conn.cursor()\n cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS articles\n (title text, articleURL text, saveTime text,\n poster text, numComments text, isAsk text,\n domain text, points text, hnid text PRIMARY KEY)\n \"\"\")\n\n # insert to table\n try:\n cursor.execute(\n \"INSERT INTO articles VALUES (?,?,?,?,?,?,?,?,?)\", article)\n print(\"Article saved!\")\n # save data to database\n conn.commit()\n tart.send('saveResult', text=\"Article successfully favourited\")\n except sqlite3.IntegrityError:\n print(\"Article already saved!\")\n tart.send('saveResult', text=\"Article already favourited\")\n\n def onDeleteArticle(self, hnid, selected):\n conn = sqlite3.connect(\"data/favourites.db\")\n\n hnid = str(hnid)\n cursor = conn.cursor()\n cursor.execute(\"DELETE FROM articles WHERE hnid=?\", (hnid,))\n conn.commit()\n tart.send(\n 'deleteResult', text=\"Article removed from favourites\", itemToRemove=selected)\n\n def onLoadFavourites(self):\n conn = sqlite3.connect(\"data/favourites.db\")\n\n cursor = conn.cursor()\n cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS articles\n (title text, articleURL text, saveTime text,\n poster text, numComments text, isAsk text,\n domain text, points text, hnid text PRIMARY KEY)\n \"\"\")\n cursor.execute('SELECT * FROM articles')\n results = readerutils.get_rowdicts(cursor)\n tart.send('fillList', results=results)\n\n# Misc functions\n def onDeleteCache(self):\n print(\"PYTHON DELETING CACHE\")\n workingDir = os.getcwd() + '/data/cache/'\n cursor = self.conn.cursor()\n print(\"Dropping favourites table\")\n cursor.execute(\"\"\"DROP TABLE IF EXISTS articles\"\"\")\n cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS articles\n (title text, articleURL text, saveTime text,\n poster text, numComments text, isAsk text,\n domain text, points text, hnid text PRIMARY KEY)\n \"\"\")\n tart.send('cacheDeleted', text=\"Cache cleared!\")\n\n def onCopyHTML(self, content, meta):\n print(content)\n print(meta)\n soup = BeautifulSoup(content)\n from tart import clipboard\n c = clipboard.Clipboard()\n mimeType = 'text/plain'\n c.insert(mimeType, str(soup.text))\n tart.send('contentCopied', meta=meta)\n\n def onCopy(self, articleLink):\n from tart import clipboard\n c = clipboard.Clipboard()\n mimeType = 'text/plain'\n c.insert(mimeType, articleLink)\n tart.send('copyResult', text=articleLink + \" copied to clipboard!\")\n","repo_name":"krruzic/Reader-YC","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12497,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"22"}
+{"seq_id":"21258228525","text":"import string, random\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom details.models import Userdetails, Delivery, Item\nfrom details.forms import Add_detailsForm,Add_coordinatesForm\n\n\n# Create your views here.\ndef user_details(request, amount, template_name='user_registration.html'):\n\tform = Add_detailsForm(request.POST or None)\n\tctx = {}\n\tctx['form']=form\n\tform.fields['amount'].initial = amount\n\n\tif request.method == \"POST\":\n\t\tif form.is_valid():\n\t\t\tinstance = form.save()\n\t\t\t# return HttpResponseRedirect(instance.get_absolute_url())\n\t\t\treturn redirect(\"/location/%s\" % str(instance.slug))\n\t#else:\n\t\t#form = Add_detailsForm()\n\treturn render(request, template_name, ctx)\n\ndef user_location(request, slug, template_name='user_location.html'):\n\ttry:\n\t\tctx = {}\n\t\tuser_info = Userdetails.objects.get(slug=slug)\n\t\tform = Add_coordinatesForm(request.POST or None)\n\n\t\tfirst_name = user_info.first_name\n\t\tlast_name = user_info.last_name\n\t\temail = user_info.email\n\t\tctx['form']=form\n\n\t\tif form.is_valid():\n\t\t\tinstance = form.save(commit=False)\n\t\t\tinstance.save()\n\t\t\tdescription='none'\n\t\t\ttypes='MERCHANT'\n\t\t\treference='none'\n\t\t\ttotal_amount = float(user_info.amount) + float(request.POST['amount'])\n\t\t\treturn redirect('http://45.55.252.17/pickanddrop/pesapal-iframe.php?first_name=%s&last_name=%s&amount=%s&email=%s&description=%s&type=%s&reference=%s'%(first_name, \n\t\t\t\tlast_name,total_amount,email,description,types,reference))\n\texcept:\n\t\traise \n\n\treturn render(request, template_name, ctx)\n\ndef token_generator(request, template_name='token_generator.html'):\n\n\tdef id_generator(size=6, chars=string.ascii_uppercase + string.digits):\n\t\treturn ''.join(random.choice(chars) for _ in range(size))\n\ttoken = id_generator(7)\n\tctx = {}\n\tctx['token']=token\n\treturn render(request, template_name, ctx)\n","repo_name":"skafis/pickanddrop","sub_path":"details/class_views/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"}
+{"seq_id":"3104962584","text":"from django.contrib import admin\nfrom .models import RegisteredUser\n# Register your models here.\nadmin.site.site_header=\"Cov Testing Dashboard\"\nadmin.site.site_title=\"Dashboard\"\nadmin.site.index_title=\"Dashboard for Cov test report Shangri-la\"\n\n\nclass Dashboard(admin.ModelAdmin):\n list_display = ('name','email','age','gender','address','postcode','ttn','testResult')\n # change_list_template = \"sdsd\"\n list_filter = ('testResult','age','postcode',)\n # change_list_template = 'virusTestingApp/registeredUsers.html'\n\n # //http://127.0.0.1:8000/dashboard/virusTestingApp/registereduser/\n\nadmin.site.register(RegisteredUser,Dashboard)\n","repo_name":"sk814/angular_api_mobile_cw3_frontend","sub_path":"pro1/COVTesting/virusTestingApp/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"}
+{"seq_id":"69947656698","text":"# Import libraries \n\nimport numpy as np \n\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\n\nimport seaborn as sns \n\nimport os\n\nimport warnings\n\n\n\n\nwarnings.filterwarnings(\"ignore\")\n# Set the size of the plots \n\nplt.rcParams[\"figure.figsize\"] = (18,8)\n\nsns.set(rc={'figure.figsize':(18,8)})\ndata = pd.read_csv(\"../input/pubg-finish-placement-prediction/train_V2.csv\")\n\nprint(\"Finished loading the data\")\ndata.shape\ndata.info()\ndata.head()\ndata.drop(columns=['rankPoints'], inplace=True)\n# Check to see what we are dealing with regarding missing and null values \n\ndata.isnull().values.any()\ndata.isnull().sum()\ndata.dropna(inplace=True)\n\ndata.isnull().values.any()\n# Check to see win percentage distribution \n\nsns.distplot(data['winPlacePerc']).set_title('Distribution of Winning Percentile');\nprint('Mean: {:.4f}, Median {:.4f}'.format(data['winPlacePerc'].mean(), data['winPlacePerc'].median()))\ndata['matchMean'] = data.groupby('matchId')['winPlacePerc'].transform('mean')\n\ndata['matchMedian'] = data.groupby('matchId')['winPlacePerc'].transform('median')\nsns.distplot(data['matchMean'], kde=False).set_title('Mean for Winning Percentile grouped by match');\nsns.distplot(data['matchMedian'], kde=False).set_title('Median for Winning Percentile grouped by match');\n# Get values\n\nprint('Mean: {:.4f}, Median {:.4f}'.format(data['matchMean'].mean(), data['matchMedian'].median()))\n# Can do this with matchType and then derive the team and match size\n\ndata['matchType'].unique()\nsns.countplot('matchType', data=data);\ndata['teamSize'] = data.groupby('groupId')['groupId'].transform('count')\n\ndata['maxTeamSize'] = data.groupby('matchId')['teamSize'].transform('max')\n\ndata['matchSize'] = data.groupby('matchId')['Id'].transform('nunique')\nsns.distplot(data['matchSize'], kde=False).set_title('Distribution of Players per Game');\n# Let's see the largest team size\n\ndata['maxTeamSize'].max()\nsns.distplot(data['teamSize'], kde=False);\ntypes = ['solo', 'solo-fpp', 'duo', 'duo-fpp', 'squad', 'squad-fpp']\n\ndata = data.loc[data['matchType'].isin(types)]\nsns.countplot('matchType', data=data);\nsns.distplot(data['matchSize'], kde=False).set_title('Distribution of Players per Game');sns.distplot(data['matchSize'], kde=False).set_title('Distribution of Players per Game');\ndata['matchSize'].min()\nsns.distplot(data['teamSize'], kde=False);\n# Also look at top 10% and bottom 10% of players \n\ntop_10 = data[data['winPlacePerc'] >= 0.9]\n\nbottom_10 = data[data['winPlacePerc'] <= 0.1]\ndata['boosts'].unique()\nsns.scatterplot(x=\"boosts\", y=\"winPlacePerc\", data=data, color='seagreen');\nsns.scatterplot(x=\"boosts\", y=\"winPlacePerc\", data=top_10, color='seagreen');\nsns.scatterplot(x=\"boosts\", y=\"winPlacePerc\", data=bottom_10, color='seagreen');\nsns.scatterplot(x=\"heals\", y=\"winPlacePerc\", data=data, color='seagreen');\nsns.scatterplot(x=\"heals\", y=\"winPlacePerc\", data=top_10, color='seagreen');\nsns.scatterplot(x=\"heals\", y=\"winPlacePerc\", data=bottom_10, color='seagreen');\ntop_10[['boosts', 'heals']].describe()\nbottom_10[['boosts', 'heals']].describe()\n# Count \n\nsns.countplot(data['kills'], color='red');\nsns.lineplot(x=\"kills\", y='winPlacePerc', data=data, color='red');\nsns.scatterplot(x=\"kills\", y=\"winPlacePerc\", data=data, color='red');\nsns.scatterplot(x=\"kills\", y=\"winPlacePerc\", data=top_10, color='red');\nsns.scatterplot(x=\"kills\", y=\"winPlacePerc\", data=bottom_10, color='red');\nzero_kills = data.copy()\n\nzero_kills = zero_kills[zero_kills['kills']==0]\n# Same reason as previous line\n\nsns.scatterplot(x=\"kills\", y='winPlacePerc', data=zero_kills);\nsns.lineplot(x=\"killPlace\", y='winPlacePerc', data=zero_kills);\ndata.head()\ndata[data['groupId'] == '4d4b580de459be'][['matchType', 'kills', 'killPlace', 'winPlacePerc']]\ndata[data['matchType'] == 'duo-fpp'].head()\ndata[data['groupId'] == '8e0a0ea95d3596'][['matchType', 'kills', 'killPlace', 'winPlacePerc']]\nsns.scatterplot(x=\"damageDealt\", y=\"winPlacePerc\", data=data);\nsns.scatterplot(x=\"damageDealt\", y=\"winPlacePerc\", data=top_10);\nsns.scatterplot(x=\"damageDealt\", y=\"winPlacePerc\", data=bottom_10);\nsns.scatterplot(x=\"matchDuration\", y=\"winPlacePerc\", data=data, color='yellow');\nsns.scatterplot(x=\"matchDuration\", y=\"winPlacePerc\", data=top_10, color='yellow');\nsns.scatterplot(x=\"matchDuration\", y=\"winPlacePerc\", data=bottom_10, color='yellow');\nsns.scatterplot(x=\"killPoints\", y=\"winPlacePerc\", data=data, color='orange');\nsns.scatterplot(x=\"killPoints\", y=\"winPlacePerc\", data=top_10, color='orange');\nsns.scatterplot(x=\"killPoints\", y=\"winPlacePerc\", data=bottom_10, color='orange');\nsns.lineplot(x=\"killPoints\", y='kills', data=data, color='orange');\nsns.lineplot(x=\"kills\", y='killPoints', data=data, color='orange');\nsns.lineplot(x=\"winPoints\", y='winPlacePerc', data=data, color='brown');\nsns.scatterplot(x=\"winPoints\", y=\"winPlacePerc\", data=data, color='brown');\nsns.scatterplot(x=\"winPoints\", y=\"winPlacePerc\", data=top_10, color='brown');\nsns.scatterplot(x=\"winPoints\", y=\"winPlacePerc\", data=bottom_10, color='brown');","repo_name":"aorursy/new-nb-5","sub_path":"mjenkins1_pubg-presentation-intro.py","file_name":"mjenkins1_pubg-presentation-intro.py","file_ext":"py","file_size_in_byte":4995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"}
+{"seq_id":"21378934204","text":"import asyncio\nfrom asyncio.events import AbstractEventLoop\nfrom signal import SIGINT, SIGTERM\n\nfrom app.configs.log import logger\n\n\ndef handler(sig):\n \"\"\"\n signal 처리를 위한 handler 함수\n :param sig: 프로세스에서 받은 시그널\n :return: None\n \"\"\"\n logger.info(f\"[sig handelr] recv signal : {sig}\")\n loop = asyncio.get_running_loop()\n\n for task in asyncio.all_tasks(loop=loop):\n task.cancel()\n\n logger.info(f\"[sig handelr] all tasks canceled\")\n\n loop.remove_signal_handler(SIGTERM)\n loop.add_signal_handler(SIGINT, lambda: None)\n\n\ndef set_signal(loop: AbstractEventLoop):\n \"\"\"\n 현재 동작중인 eventloop에 signal 처리 등록\n :param loop:\n :return:\n \"\"\"\n for sig in [SIGTERM, SIGINT]:\n loop.add_signal_handler(sig, handler, sig)\n","repo_name":"f-lab-edu/ComMoni","sub_path":"agent/app/sig/setting.py","file_name":"setting.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"}
+{"seq_id":"74467415095","text":"## Import Important Library\n\nimport unicodecsv\nfrom datetime import datetime\nfrom collections import Counter\nimport time\n\n## Filenames\nchicago = 'chicago.csv'\nnew_york_city = 'new_york_city.csv'\nwashington = 'washington.csv'\n\n## Date and Month Lists\nmonth_list = [('january', 1), ('february', 2), ('march', 3), ('april', 4), ('may', 5), ('june', 6)]\nday_list = [('monday', 1), ('tuesday', 2), ('wednesday', 3), ('thursday', 4), ('friday', 5), ('saturday', 6), ('sunday', 7)]\n\ndef read_csv(filename):\n '''Import CSV files and convert them to dictionaries\n\n Args:\n Bikshare csv Filename\n Returns:\n List of Dictionaries containing the bikeshare data\n '''\n with open(filename,'rb') as f:\n reader = unicodecsv.DictReader(f)\n return list(reader)\n\ndef change_timeclass(date_time):\n '''Function to change str to datetime class\n\n Args:\n (str) each row in bikeshare data that will be converted to datetime\n Returns:\n (datetime) each row in bikeshare data\n '''\n return datetime.strptime(date_time, '%Y-%m-%d %H:%M:%S')\n\ndef change_intclass(integer):\n '''Function to change str to int class and empty str to None\n\n Args:\n (str) each row in bikeshare data that will be converted to integer\n Returns:\n (int) each row in bikeshare data\n '''\n if integer == '':\n return None\n else:\n return int(float(integer))\n\ndef fix_data_type(city_file):\n '''Function to fix data type for each column in bikeshare file\n\n Args:\n Bikeshare city file ; str data\n Returns:\n Bikeshare city file with correct type for each data (str, int, datetime)\n '''\n for row in city_file:\n '''Fixing data type'''\n row['Start Time'] = change_timeclass(row['Start Time'])\n row['End Time'] = change_timeclass(row['End Time'])\n row['Trip Duration'] = change_intclass(row['Trip Duration'])\n\ndef get_city():\n '''Asks the user for a city and returns the filename for that city's bike share data.\n\n Args:\n none.\n Returns:\n (str) Filename for city's bikeshare data.\n '''\n city = input('\\nHello! Let\\'s explore some US bikeshare data!\\n'\n 'Would you like to see data for Chicago, New York, or Washington?\\n')\n\n city_list = ['chicago', 'new york', 'washington']\n ## Handle Invalid Raw Input\n if city.lower() not in city_list:\n print('\\nInvalid Input! Please choose Chicago, New York, or Washington.\\n')\n return get_city()\n\n return city\n\ndef get_time_period():\n '''Asks the user for a time period and returns the specified filter.\n\n Args:\n none.\n Returns:\n (str) Time Period for filtering city's bikeshare data.\n '''\n time_period = input('\\nWould you like to filter the data by month, day, or not at'\n ' all? Type \"none\" for no time filter.\\n')\n\n period_list = ['month', 'day', 'none']\n ## Handle Invalid Raw Input\n if time_period.lower() not in period_list:\n print('\\nInvalid Input! Please choose month, day, or none.\\n')\n return get_time_period()\n\n return time_period\n\ndef get_what_month():\n '''Asks the user for month name and returns the specified filter.\n\n Args:\n none.\n Returns:\n (str) Month name for filtering city's bikeshare data.\n '''\n month_filter = input('\\nWhich month? January, February, March, April, May, June.\\n')\n\n month_list = ['january', 'february', 'march', 'april', 'may', 'june']\n ## Handle Invalid Raw Input\n if month_filter.lower() not in month_list:\n print('\\nInvalid Input! Please choose a month.\\n')\n return get_what_month()\n\n return month_filter\n\ndef get_what_day():\n '''Asks the user for day name and returns the specified filter.\n\n Args:\n none.\n Returns:\n (str) Day name for filtering city's bikeshare data.\n '''\n day_filter = input('\\nWhich day?\\n')\n\n day_list = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']\n ## Handle Invalid Raw Input\n if day_filter.lower() not in day_list:\n print('\\nInvalid Input! Please choose a day.\\n')\n return get_what_day()\n\n return day_filter\n\ndef popular_month(city_file):\n '''Get Only Month from Start Time\n\n Args:\n (str) Rows of bikeshare data in list\n Returns:\n (str) Popular Month for given filter'''\n\n month_count = []\n for data in city_file:\n month_count.append(data['Start Time'].month)\n\n result = Counter(month_count).most_common()\n for month in month_list:\n if month[1] == result[0][0]:\n month_result = month[0]\n return month_result\n\ndef popular_day(city_file):\n '''Get Popular Day from Start Time\n\n Args:\n (str) Rows of bikeshare data in list\n Returns:\n (str) Popular Day for given filter '''\n\n day_count = []\n for data in city_file:\n day_count.append(data['Start Time'].isoweekday())\n\n result = Counter(day_count).most_common()\n for day in day_list:\n if day[1] == result[0][0]:\n day_result = day[0]\n return day_result\n\ndef popular_hour(city_file):\n '''Get Popular Hour from Start Time\n\n Args:\n (str) Rows of bikeshare data in list\n Returns:\n (int) Popular Hour for given filter '''\n\n hour_count = []\n for data in city_file:\n hour_count.append(data['Start Time'].hour)\n\n result = Counter(hour_count).most_common()\n return result[0][0]\n\ndef trip_duration(city_file):\n '''Get Statistic from Trip_Duration\n\n Args:\n (str) Rows of bikeshare data in list\n Returns:\n (int) Total Trip Duration and Average Trip Duration for given filter '''\n\n total_duration, trip_count = 0, 0\n for data in city_file:\n total_duration += data['Trip Duration']\n trip_count += 1\n\n average_duration = total_duration / trip_count\n return (total_duration, average_duration)\n\ndef popular_stations(city_file):\n '''Get Popular Start and End Station\n\n Args:\n (str) Rows of bikeshare data in list\n Returns:\n (str) Popular Start and End Station for given filter '''\n\n start_station, end_station = [], []\n for data in city_file:\n start_station.append(data['Start Station'])\n end_station.append(data['End Station'])\n\n start_result = Counter(start_station).most_common()\n end_result = Counter(end_station).most_common()\n\n return (start_result[0][0], end_result[0][0])\n\ndef popular_trip(city_file):\n '''Get Popular Trip\n\n Args:\n (str) Rows of bikeshare data in list\n Returns:\n (str) Popular Trip for given filter '''\n\n trip = []\n for data in city_file:\n trip.append((data['Start Station'], data['End Station']))\n\n trip_result = Counter(trip).most_common()\n return trip_result[0][0]\n\ndef users(city_file):\n '''Get Total Count of Each User Type\n\n Args:\n (str) Rows of bikeshare data in list\n Returns:\n (int) Total Count of Each User Type for given filter '''\n\n sub_count, cust_count = 0, 0\n for data in city_file:\n if data['User Type'] == 'Subscriber':\n sub_count += 1\n else:\n cust_count += 1\n\n return (sub_count, cust_count)\n\ndef gender(city_file):\n '''Get Total Count of Each Gender\n\n Args:\n (str) Rows of bikeshare data in list\n Returns:\n (int) Total Count of Each Gender for given filter '''\n\n male_count, female_count = 0, 0\n for data in city_file:\n if data['Gender'] == 'Male':\n male_count += 1\n elif data['Gender'] == 'Female':\n female_count += 1\n\n return (male_count, female_count)\n\ndef birth_years(city_file):\n '''Get Birth Years Statistic\n\n Args:\n (str) Rows of bikeshare data in list\n Returns:\n (int) Oldest User, Youngest User, Popular Birth Year for given filter '''\n\n birth_year = []\n for data in city_file:\n if type(data['Birth Year']) == int:\n birth_year.append(data['Birth Year'])\n\n year_result = Counter(birth_year).most_common()\n oldest_result = min(birth_year)\n youngest_result = max(birth_year)\n\n return (oldest_result, youngest_result, year_result[0][0])\n\ndef display_data(city_file,start_row,end_row):\n '''Displays five lines of data if the user specifies that they would like to.\n After displaying five lines, ask the user if they would like to see five more,\n continuing asking until they say stop.\n\n Args:\n (str) Rows of bikeshare data in list\n Returns:\n (str) Five rows from bikeshare data\n '''\n display = input('\\nWould you like to view individual trip data? '\n 'Type \\'yes\\' or \\'no\\'.\\n')\n\n ## Handle Invalid Raw Input\n if display.lower() == 'yes':\n print(city_file[start_row:end_row])\n start_row += 5\n end_row += 5\n display_data(city_file,start_row,end_row)\n elif display.lower() != 'yes' and display.lower() != 'no':\n print('\\nInvalid Input!')\n display_data(city_file,start_row,end_row)\n\ndef restart():\n '''Ask if the user want to restar the program or not\n\n Args:\n None\n Returns:\n None\n '''\n\n answer = input('\\nWould you like to restart? Type \\'yes\\' or \\'no\\'.\\n')\n\n ## Handle Invalid Raw Input\n if answer.lower() == 'yes':\n statistics()\n elif answer.lower() != 'yes' and answer.lower() != 'no':\n print('\\nInvalid Input\\n')\n restart()\n\ndef statistics():\n '''Calculates and prints out the descriptive statistics about a city and time period\n specified by the user via raw input.\n\n Args:\n none.\n Returns:\n none.\n '''\n # Filter by city (Chicago, New York, Washington)\n city = get_city().lower()\n\n # Open the correct CSV file base on city filter and fix the data type\n if city == 'chicago':\n city_data = read_csv(chicago)\n fix_data_type(city_data)\n for row in city_data:\n row['Birth Year'] = change_intclass(row['Birth Year'])\n\n elif city == 'new york':\n city_data = read_csv(new_york_city)\n fix_data_type(city_data)\n for row in city_data:\n row['Birth Year'] = change_intclass(row['Birth Year'])\n\n else:\n city_data = read_csv(washington)\n fix_data_type(city_data)\n\n # Filter by time period (month, day, none)\n time_period = get_time_period().lower()\n\n ## Statistic for \"none\" filter\n if time_period == 'none':\n\n print('Calculating the Statistic...\\n')\n\n start_time = time.time()\n\n # What is the most popular month for start time?\n pop_month = popular_month(city_data)\n print('\\nPopular Month is {}'.format(pop_month.title()))\n\n # What is the most popular day of week (Monday, Tuesday, etc.) for start time?\n pop_day = popular_day(city_data)\n print('\\nPopular Day is {}'.format(pop_day.title()))\n\n # What is the most popular hour of day for start time?\n pop_hour = popular_hour(city_data)\n print('\\nPopular Hour is {}'.format(pop_hour))\n\n # What is the total trip duration and average trip duration?\n trip_result = trip_duration(city_data)\n print('\\nTotal Trip Duration: {}'\n '\\nAverage Trip Duration: {}'.format(trip_result[0],trip_result[1]))\n\n # What is the most popular start station and most popular end station?\n station_result = popular_stations(city_data)\n print('\\nPopular Start Station: {}'\n '\\nPopular End Station: {}'.format(station_result[0],station_result[1]))\n\n # What is the most popular trip?\n most_trip = popular_trip(city_data)\n print('\\nPopular Trip is {}'.format(most_trip))\n\n # What are the counts of each user type?\n user_result = users(city_data)\n print('\\nSubscriber: {}'\n '\\nCustomer: {}'.format(user_result[0], user_result[1]))\n\n if city == 'chicago' or city == 'new york':\n\n # What are the counts of gender?\n gender_result = gender(city_data)\n print('\\nMale: {}'\n '\\nFemale: {}'.format(gender_result[0], gender_result[1]))\n\n # What are the earliest (i.e. oldest user), most recent (i.e. youngest user), and\n # most popular birth years?\n birthyear_result = birth_years(city_data)\n print('\\nOldest User: {}'\n '\\nYoungest User: {}'\n '\\nPopular Birth Year: {}'.format(birthyear_result[0], birthyear_result[1], birthyear_result[2]))\n\n print(\"\\nThat took %s seconds.\" % (time.time() - start_time))\n\n # Display five lines of data at a time if user specifies that they would like to\n start_row, end_row = 0, 4\n display_data(city_data,start_row,end_row)\n\n ## Statistic for \"month\" filter\n if time_period == 'month':\n\n # Filter by what month?\n month_filter = get_what_month().lower()\n\n # List of row based on selected month filter\n selected_data = []\n for month in month_list:\n if month[0] == month_filter:\n month_index = month[1]\n for data in city_data:\n if data['Start Time'].month == month_index:\n selected_data.append(data)\n\n print('\\nCalculating the Statistic...')\n\n start_time = time.time()\n\n # What is the most popular day of week (Monday, Tuesday, etc.) for start time?\n pop_day = popular_day(selected_data)\n print('\\nPopular Day is {}'.format(pop_day.title()))\n\n # What is the most popular hour of day for start time?\n pop_hour = popular_hour(selected_data)\n print('\\nPopular Hour is {}'.format(pop_hour))\n\n # What is the total trip duration and average trip duration?\n trip_result = trip_duration(selected_data)\n print('\\nTotal Trip Duration: {}'\n '\\nAverage Trip Duration: {}'.format(trip_result[0],trip_result[1]))\n\n # What is the most popular start station and most popular end station?\n station_result = popular_stations(selected_data)\n print('\\nPopular Start Station: {}'\n '\\nPopular End Station: {}'.format(station_result[0],station_result[1]))\n\n # What is the most popular trip?\n most_trip = popular_trip(selected_data)\n print('\\nPopular Trip is {}'.format(most_trip))\n\n # What are the counts of each user type?\n user_result = users(selected_data)\n print('\\nSubscriber: {}'\n '\\nCustomer: {}'.format(user_result[0], user_result[1]))\n\n if city == 'chicago' or city == 'new york':\n\n # What are the counts of gender?\n gender_result = gender(selected_data)\n print('\\nMale: {}'\n '\\nFemale: {}'.format(gender_result[0], gender_result[1]))\n\n # What are the earliest (i.e. oldest user), most recent (i.e. youngest user), and\n # most popular birth years?\n birthyear_result = birth_years(selected_data)\n print('\\nOldest User: {}'\n '\\nYoungest User: {}'\n '\\nPopular Birth Year: {}'.format(birthyear_result[0], birthyear_result[1], birthyear_result[2]))\n\n print(\"\\nThat took %s seconds.\" % (time.time() - start_time))\n\n # Display five lines of data at a time if user specifies that they would like to\n start_row, end_row = 0, 4\n display_data(selected_data,start_row,end_row)\n\n ## Statistic for \"day\" filter\n if time_period == 'day':\n\n # Filter by what day?\n day_filter = get_what_day().lower()\n\n # List of row based on selected day filter\n selected_data = []\n for day in day_list:\n if day[0] == day_filter:\n day_index = day[1]\n for data in city_data:\n if data['Start Time'].isoweekday() == day_index:\n selected_data.append(data)\n\n print('\\nCalculating the Statistic...')\n\n start_time = time.time()\n\n # What is the most popular hour of day for start time?\n pop_hour = popular_hour(selected_data)\n print('\\nPopular Hour is {}'.format(pop_hour))\n\n # What is the total trip duration and average trip duration?\n trip_result = trip_duration(selected_data)\n print('\\nTotal Trip Duration: {}'\n '\\nAverage Trip Duration: {}'.format(trip_result[0],trip_result[1]))\n\n # What is the most popular start station and most popular end station?\n station_result = popular_stations(selected_data)\n print('\\nPopular Start Station: {}'\n '\\nPopular End Station: {}'.format(station_result[0],station_result[1]))\n\n # What is the most popular trip?\n most_trip = popular_trip(selected_data)\n print('\\nPopular Trip is {}'.format(most_trip))\n\n # What are the counts of each user type?\n user_result = users(selected_data)\n print('\\nSubscriber: {}'\n '\\nCustomer: {}'.format(user_result[0], user_result[1]))\n\n if city == 'chicago' or city == 'new york':\n\n # What are the counts of gender?\n gender_result = gender(selected_data)\n print('\\nMale: {}'\n '\\nFemale: {}'.format(gender_result[0], gender_result[1]))\n\n # What are the earliest (i.e. oldest user), most recent (i.e. youngest user), and\n # most popular birth years?\n birthyear_result = birth_years(selected_data)\n print('\\nOldest User: {}'\n '\\nYoungest User: {}'\n '\\nPopular Birth Year: {}'.format(birthyear_result[0], birthyear_result[1], birthyear_result[2]))\n\n print(\"\\nThat took %s seconds.\" % (time.time() - start_time))\n\n # Display five lines of data at a time if user specifies that they would like to\n start_row, end_row = 0, 4\n display_data(selected_data,start_row,end_row)\n\n # Restart?\n restart()\n\nif __name__ == \"__main__\":\n\tstatistics()\n","repo_name":"henhal12/udacity-data-analysis-nanodegree-term-one","sub_path":"project2-bikeshare/bikeshare.py","file_name":"bikeshare.py","file_ext":"py","file_size_in_byte":18106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"}
+{"seq_id":"16000556185","text":"import os\nfrom datetime import timedelta\nfrom pathlib import Path\n\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n\nSECRET_KEY = 'django-insecure-#c)nh44kcl3k6(ww^#vy^fp+(%1#g5%n-e8cs9##-_&7rmt=$n'\n\nDEBUG = os.getenv('DEBUG')\n\nALLOWED_HOSTS = os.getenv('ALLOWED_HOSTS').split(',')\n\n\nINSTALLED_APPS = [\n 'frontend.apps.FrontendConfig',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'rest_framework',\n 'drf_yasg',\n 'phonenumber_field'\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'config.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(BASE_DIR, 'api/docs'),\n os.path.join(BASE_DIR, 'frontend/templates')\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages'\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'config.wsgi.application'\n\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': BASE_DIR / 'db.sqlite3',\n }\n}\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"verbose\": {\n \"format\": \"{levelname} {asctime} {message}\",\n \"style\": \"{\",\n },\n \"simple\": {\n \"format\": \"{levelname} {message}\",\n \"style\": \"{\",\n },\n },\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"verbose\",\n },\n },\n \"loggers\": {\n \"frontend\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n },\n}\n\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework_simplejwt.authentication.JWTAuthentication',\n ),\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',\n 'PAGE_SIZE': 10\n}\n\nSIMPLE_JWT = {\n 'ACCESS_TOKEN_LIFETIME': timedelta(days=10),\n 'AUTH_HEADER_TYPES': ('Bearer',),\n}\n\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\nLOGIN_URL = 'frontend:signup'\nLOGOUT_REDIRECT_URL = 'frontend:main'\nAUTH_USER_MODEL = 'frontend.CustomUser'\n\nPHONENUMBER_DB_FORMAT = 'NATIONAL'\nPHONENUMBER_DEFAULT_REGION = 'RU'\n\nLANGUAGE_CODE = 'ru'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'api/docs'),\n os.path.join(BASE_DIR, 'frontend/static')\n)\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nSTATIC_URL = '/static/'\n\n\nDEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'\n","repo_name":"clownvkkaschenko/ReferralSystem","sub_path":"referral_system/config/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"}
+{"seq_id":"7079171187","text":"import graphene\nimport model.model as model\nimport model.repo as repo\n\ndocument_repo = None\n\n\ndef set_repos(_document_repo=None):\n global document_repo\n print(f'gqlschema.set_repos: {_document_repo}')\n document_repo = _document_repo\n\n\nclass Date(graphene.ObjectType):\n month = graphene.Int()\n year = graphene.Int()\n\n @classmethod\n def from_model(cls, date):\n return cls(month=date.month, year=date.year)\n\n\nclass ChildField(graphene.ObjectType):\n id = graphene.ID()\n name = graphene.String()\n date = graphene.Field(Date)\n\n @classmethod\n def from_model(cls, child_field):\n return cls(\n id=str(child_field.id),\n name=child_field.name,\n date=child_field.date\n )\n\n\nclass Document(graphene.ObjectType):\n id = graphene.ID()\n name = graphene.String()\n age = graphene.Int()\n child_field = graphene.List(ChildField)\n archived = graphene.Boolean()\n\n @classmethod\n def from_model(cls, document):\n return cls(\n id=document.id,\n name=document.name,\n age=document.age,\n child_field=[ChildField.from_model(s) for s in document.child_field],\n archived=document.archived\n )\n\n\nclass Query(graphene.ObjectType):\n documents = graphene.List(Document)\n document = graphene.Field(Document, id=graphene.ID())\n\n async def resolve_documents(self, args, context=None, info=None):\n global document_repo\n assert(document_repo is not None)\n\n results = []\n async for document in document_repo.find():\n results.append(Document.from_model(document))\n return results\n\n async def resolve_document(self, args, id, context=None, info=None):\n global document_repo\n assert (document_repo is not None)\n\n try:\n document = await document_repo.find_by_id(id)\n except repo.InvalidId as exc:\n return Errors([Error('id', ['invalid'])])\n\n if not document:\n return Errors([Error('id', ['not found'])])\n\n result = Document.from_model(document)\n\n return result\n\n\nclass Error(graphene.ObjectType):\n field = graphene.String()\n messages = graphene.List(graphene.String)\n\n\nclass Errors(graphene.ObjectType):\n errors = graphene.List(Error)\n\n @classmethod\n def from_exception(cls, exc):\n return cls(errors=[Error(field=field, messages=messages) for field, messages in exc.errors.items()])\n\n\nclass DocumentResponse(graphene.Union):\n class Meta:\n types = (Document, Errors)\n\n\nclass CreateDocumentInput(graphene.InputObjectType):\n name = graphene.String(required=True)\n age = graphene.Int(required=False)\n archived = graphene.String(required=False)\n\n\nclass CreateDocument(graphene.Mutation):\n class Arguments:\n document = CreateDocumentInput(required=True)\n\n Output = DocumentResponse\n\n async def mutate(self, info, document):\n global document_repo\n assert(document_repo is not None)\n\n try:\n result = await document_repo.create(name=document.name, age=document.age, archived=document.archived)\n except repo.RepoError as exc:\n return Errors.from_exception(exc)\n\n return Document.from_model(result)\n\n\nclass SetDocumentArchivedInput(graphene.InputObjectType):\n id = graphene.ID(required=True)\n archived = graphene.Boolean(required=True)\n\n\nclass SetDocumentArchived(graphene.Mutation):\n class Arguments:\n set_archived = SetDocumentArchivedInput(required=True)\n\n Output = DocumentResponse\n\n async def mutate(self, info, set_archived):\n global document_repo\n assert(document_repo is not None)\n\n try:\n document = await document_repo.find_by_id(set_archived.id)\n except repo.InvalidId as exc:\n return Errors([Error('id', ['invalid'])])\n\n if not document:\n return Errors([Error('id', ['not found'])])\n\n document.set_archived(set_archived.archived)\n result = await document_repo.save(document)\n \n return Document.from_model(result)\n\n\nclass DateInput(graphene.InputObjectType):\n month = graphene.Int(required=True)\n year = graphene.Int(required=True)\n\n def to_model(self):\n return model.Date(self.month, self.year)\n\n\nclass AddChildFieldInput(graphene.InputObjectType):\n document_id = graphene.ID(required=True)\n name = graphene.String()\n date = graphene.Field(DateInput, required=False)\n\n\nclass AddChildField(graphene.Mutation):\n class Arguments:\n add_child_field = AddChildFieldInput(required=True)\n\n Output = DocumentResponse\n\n async def mutate(self, info, add_child_field):\n global document_repo\n assert(document_repo is not None)\n\n try:\n document = await document_repo.find_by_id(add_child_field.document_id)\n except repo.InvalidId as exc:\n return Errors([Error('id', ['invalid'])])\n\n if not document:\n return Errors([Error('id', ['not found'])])\n\n document.add_child_field(\n model.ChildField(\n name=add_child_field.name,\n date=add_child_field.date.to_model(),\n )\n )\n\n result = await document_repo.save(document)\n\n return Document.from_model(result)\n\n\nclass RemoveChildFieldInput(graphene.InputObjectType):\n document_id = graphene.ID(required=True)\n child_field_id = graphene.ID(required=True)\n\n\nclass RemoveChildField(graphene.Mutation):\n class Arguments:\n remove_child_field = RemoveChildFieldInput(required=True)\n\n Output = DocumentResponse\n\n async def mutate(self, info, remove_child_field):\n global document_repo\n assert(document_repo is not None)\n\n try:\n document = await document_repo.find_by_id(remove_child_field.document_id)\n except repo.InvalidId as exc:\n return Errors([Error('id', ['invalid'])])\n\n if not document:\n return Errors([Error('id', ['not found'])])\n\n try:\n document.remove_child_field(remove_child_field.child_field_id)\n except KeyError as exc:\n return Errors([Error('contract_id', ['not found'])])\n\n result = await document_repo.save(document)\n\n return Document.from_model(result)\n\n\nclass EditChildFieldInput(graphene.InputObjectType):\n id = graphene.ID(required=True)\n name = graphene.String()\n date = graphene.Field(DateInput, required=False)\n\n\nclass EditChildFieldMutationInput(graphene.InputObjectType):\n document_id = graphene.ID(required=True)\n child_field = graphene.InputField(EditChildFieldInput, description=\"Child field to update\")\n\n\nclass EditChildField(graphene.Mutation):\n class Arguments:\n edit_child_field = EditChildFieldMutationInput(required=True)\n\n Output = DocumentResponse\n\n async def mutate(self, info, edit_child_field):\n global document_repo\n assert(document_repo is not None)\n\n try:\n document = await document_repo.find_by_id(edit_child_field.document_id)\n except repo.InvalidId as exc:\n return Errors([Error('id', ['invalid'])])\n\n if not document:\n return Errors([Error('id', ['not found'])])\n\n try:\n document.update_child_field(model.ChildField(\n id=edit_child_field.child_field.id,\n name=edit_child_field.child_field.name,\n date=edit_child_field.child_field.date.to_model()\n ))\n\n except KeyError as exc:\n return Errors([Error('contract_id', ['not found'])])\n\n result = await document_repo.save(document)\n\n return Document.from_model(result)\n\n\nclass Mutation(graphene.ObjectType):\n create_document = CreateDocument.Field()\n set_document_archived = SetDocumentArchived.Field()\n\n add_child_field = AddChildField.Field()\n remove_child_field = RemoveChildField.Field()\n edit_child_field = EditChildField.Field()\n\n\nschema = graphene.Schema(query=Query, mutation=Mutation)\n\n\n__all__ = [\n 'Date',\n 'DateInput',\n 'ChildField',\n 'Document',\n 'DocumentResponse',\n 'SetDocumentArchived',\n 'SetDocumentArchivedInput',\n 'CreateDocument',\n 'CreateDocumentInput',\n 'AddChildField',\n 'AddChildFieldInput',\n 'EditChildField',\n 'EditChildFieldInput',\n 'EditChildFieldMutationInput',\n 'RemoveChildField',\n 'RemoveChildFieldInput',\n 'Error',\n 'Errors',\n 'Query',\n 'Mutation',\n 'schema'\n]\n\n","repo_name":"ivanwilliam1/PythonAsyncAPIService","sub_path":"app/gqlschema.py","file_name":"gqlschema.py","file_ext":"py","file_size_in_byte":8523,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"}
+{"seq_id":"27970964811","text":"import os\nimport sys\nimport subprocess\nfrom time import ctime\nimport time\nimport socket\nfrom utils.common_utils import Common\n\nroot_dir = os.path.dirname(os.path.dirname(__file__))\n\n\nclass AppiumUtils(Common):\n\n def appium_start(self, _host, _port):\n \"\"\"\n start an appium server, if you want start any auto test from appium, you need\n start appium server first!\n :param host: the localhost number, eg:127.0.0.1\n :param port: the appium server lisenning port, eg:4723 but can't set 4724, be\n -cause bootstrap-port need set next one by port\n :return:\n \"\"\"\n print(\"=======Start appium server[port:%s] at: %s=======\" % (str(_port), ctime()))\n bp = str(_port + 1)\n cmd = \"appium -a \" + str(_host) + \" -p \" + str(_port) + \" -bp \" + bp\n print(\"Cmd:%s\" % cmd)\n log_path = os.path.join(root_dir, 'log', 'appium_' + str(_port) + '.log')\n print(\"Log path:%s\" % log_path)\n subprocess.Popen(cmd, shell=True, stdout=open(log_path, 'a'), stderr=subprocess.STDOUT)\n time.sleep(5)\n print(\"=======Start appium server[port:%s] success!!!=======\" % str(_port))\n\n def start_appium_server(self, _host, _port):\n if self.is_appium_port_idle(_host, _port):\n self.appium_start(_host, _port)\n return True\n else:\n print(\"=======Start appium server[host:%s/port:%s] fail!!!=======\" % (str(_host), str(_port)))\n return False\n\n def release_appium_server_port(self, _port):\n server_pid = self.get_netstat_pid_by_port(_port)\n if server_pid is not None:\n kill_cmd = \"kill \" + str(server_pid)\n os.popen(kill_cmd)\n print(\"Appium server: port=%d/pid=%d kill done ...\" % (_port, server_pid))\n else:\n print(\"Appium server port:%d is idle and available!\" % _port)\n\n def is_appium_port_idle(self, _host, _port):\n s_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n s_client.connect((_host, _port))\n s_client.shutdown(socket.SHUT_RDWR)\n except OSError:\n print(\"Appium server port:%d is not be used!\" % _port)\n return True\n else:\n print(\"Appium server port:%d is using!\" % _port)\n return False\n\n def get_netstat_pid_by_port(self, _port):\n pid = None\n cmd = \"netstat -nlptu | awk '{print $4,$7}' | grep \" + str(_port)\n shell_dict = self.shell_cmd(cmd)\n tag = \":\" + str(_port)\n for line in shell_dict['std_out'].split('\\n'):\n if tag in line:\n pid = line.split(' ')[1].split('/')[0]\n return int(pid)\n\n def multi_start_appium_server(self):\n pass\n\n def multi_connect_device(self):\n pass\n\n\n","repo_name":"BoJunZeng/appium_demo","sub_path":"utils/appium_utils.py","file_name":"appium_utils.py","file_ext":"py","file_size_in_byte":2820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"}
+{"seq_id":"26481334275","text":"#This module helps communicate with raspberry pi for SOIL SENSORS, WATER FLOW SENSOR,\r\nimport RPi.GPIO as GPIO\r\nimport time, sys\r\nimport os\r\nimport requests\r\nimport json\r\nimport weather_api as wa\r\nimport datetime\r\n\r\n\r\nFLOW_SENSOR_GPIO = 13\r\n#MQTT_SERVER = \"192.168.1.220\"\r\n\r\nGPIO.setmode(GPIO.BCM)\r\nGPIO.setup(FLOW_SENSOR_GPIO, GPIO.IN, pull_up_down = GPIO.PUD_UP)\r\n\r\nglobal count\r\ncount = 0\r\n\r\ndef countPulse(channel):\r\n\tglobal count\r\n\tif start_counter == 1:\r\n\t\tcount = count+1\r\n\r\nGPIO.add_event_detect(FLOW_SENSOR_GPIO, GPIO.FALLING, callback=countPulse)\r\n\r\nwhile True:\r\n\ttry:\r\n\r\n\t\tt = time.localtime()\r\n\t\tcurrent_time = time.strftime(\"%H:%M:%S\", t)\r\n\r\n\t\tnow = datetime.datetime.now()\r\n\t\tcurrent_date = now.strftime(\"%Y-%m-%d\")\r\n\r\n\r\n\t\tstart_counter = 1\r\n\t\ttime.sleep(1)\r\n\t\tstart_counter = 0\r\n\t\tflow = (count / 7.5) # Pulse frequency (Hz) =7.5Q, Q is flow rate in L/min.\r\n\t\tprint(\"The flow is: %.3f Liter/min\" % (flow))\r\n\t\tprint(\"Sending API Requests...\")\r\n\t\t#os.system (\"'curl -X PUT -d '{\"time\": current_time ,\"flow\": flow}' 'INSERT API LINK HERE'\")\r\n\r\n\t\t#publish.single(\"/Garden.Pi/WaterFlow\", flow, hostname=MQTT_SERVER)\r\n\t\t\r\n\r\n\t\theaders = {\r\n\t\t \t'Content-Type': 'application/x-www-form-urlencoded',\r\n\t\t }\r\n\r\n\t\tdata = {\"date\": current_date ,\"water_l\":\"270 m\",\"moisture\":\"20 %\",\"pump\":\"ON\",\"time\": current_time ,\"flow\": flow, \"weather\": wa.weather_patiala, \"temp\": wa.weather_patiala[\"main\"][\"temp\"], \"humidity\": wa.weather_patiala[\"main\"][\"humidity\"]}\r\n\t\tdata_json = json.dumps(data)\r\n\t\tresponse = requests.put('INSERT API LINK HERE', headers=headers, data=data_json)\r\n\t\tprint(\"Request Sent to Patiala Firebase API\")\r\n\r\n\t\tdata_chd = {\"date\": current_date ,\"time\": current_time ,\"flow\": flow, \"weather\": wa.weather_chandigarh, \"temp\": wa.weather_chandigarh[\"main\"][\"temp\"], \"humidity\": wa.weather_chandigarh[\"main\"][\"humidity\"]}\r\n\t\tdata_json_chd = json.dumps(data_chd)\r\n\t\tresponse = requests.put('INSERT API LINK HERE', headers=headers, data=data_json_chd)\r\n\t\tprint(\"Request Sent to Chandigarh Firebase API\")\r\n\r\n\r\n\r\n\t\tdata_amritsar = {\"date\": current_date ,\"time\": current_time ,\"flow\": flow, \"weather\": wa.weather_amritsar, \"temp\": wa.weather_amritsar[\"main\"][\"temp\"], \"humidity\": wa.weather_amritsar[\"main\"][\"humidity\"]}\r\n\t\tdata_json_amritsar = json.dumps(data_amritsar)\r\n\t\tresponse = requests.put('INSERT API LINK HERE', headers=headers, data=data_json_amritsar)\r\n\t\tprint(\"Request Sent to Amritsar Firebase API\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t# response = requests.put('INSERT API LINK HERE', headers=headers, data=data_json) old url\r\n\r\n\t\tcount = 0\r\n\t\ttime.sleep(60)\r\n\t\tcontinue\r\n\t\t\r\n\texcept KeyboardInterrupt:\r\n\t\tprint(\"KeyboardInterrupt has been caught.\")\r\n\t\tGPIO.cleanup() #shouldn't be used i guess\r\n\t\tsys.exit()\r\n","repo_name":"SUNS-TIET/SMARTswitch","sub_path":"raspi/raspberrypi_module.py","file_name":"raspberrypi_module.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"}
+{"seq_id":"34880149021","text":"# coding: utf8\n\n_STORE = dict()\n\n\ndef simplecache(func):\n def _(self, *args, **kw):\n key = (\n func.__name__ +\n \"_\".join([str(x) for x in args]) + \"+\"\n \"_\".join(\"%s~%s\" % (k, v) for k, v in sorted(kw.items()))\n )\n res = _STORE.get(key)\n if res is None:\n res = func(self, *args, **kw)\n _STORE[key] = res\n return res\n return _\n","repo_name":"yeyuexia/gitlab_auto_report","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"}
+{"seq_id":"17898765362","text":"# -*- coding utf-8 -*-\nfrom odoo import models, fields, _\n\n\nclass AccountAnalyticLine(models.Model):\n _inherit = \"account.analytic.line\"\n\n timesheet_type = fields.Many2one(comodel_name=\"project.timesheet.type\", string=\"Type\")\n progress = fields.Float(string=\"% Progress\",)\n #milestone_id = fields.Many2one(comodel_name=\"project.milestone\", string=\"Milestone\", related=\"task_id.milestone_id\", store=True)\n progress_manual = fields.Float(\"Progress manual\", related=\"task_id.progress_manual\", store=True)\n planned_hours = fields.Float(string=\"Planned Hours\", related=\"task_id.planned_hours\", store=True)\n","repo_name":"Nexta-Digital-Solutions/nexta-project-14","sub_path":"project_custom/models/account_analytic_line.py","file_name":"account_analytic_line.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"}
+{"seq_id":"24830954543","text":"# -*- coding: utf-8 -*-\nimport unittest\nfrom Model.CCGameDBTWModel import *\n\n\nclass CCGameDBTWTest(unittest.TestCase):\n # 確認角色個數與欄位正確性\n def test_total_count(self):\n expected = 489\n actual = len(CCGameDBTWDataOwner().data)\n assert actual == expected, 'Total character count expect {0}, but actual {1}'.format(expected, actual)\n\n # 檢查我資料庫中的角色,是否與 CGDT 內的 ID 與 FullName 吻合\n def test_are_id_and_full_name_matched(self):\n data_owner = CCGameDBTWDataOwner()\n # 若 id>=6000,代表是國服的角色,不用檢查\n characters = DBAccessor.execute('select ID, FullName from Character where ID<6000').fetchall()\n for each_my_info in characters:\n the_id = each_my_info[0]\n name = each_my_info[1]\n\n cgdt_info = data_owner.find_character_by_id(the_id)\n assert cgdt_info is not None, 'Character ID={0} {1} did not match any id.'.format(\n the_id, name.encode('utf-8'))\n assert _compare_full_name(cgdt_info.full_name, name), 'Character ID={0} has FullName {1} and {2}.'.format(\n the_id, cgdt_info.full_name.encode('utf-8'), name.encode('utf-8'))\n\n\ndef _compare_full_name(cgdt_name, db_name):\n name = db_name.replace('v1', '') # 主人公的 FullName 有重複\n return cgdt_name == name\n","repo_name":"YiFanChen99/chain-chronicle","sub_path":"Test/CCGameDBTWTest.py","file_name":"CCGameDBTWTest.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"}
+{"seq_id":"23420092660","text":"import pytest\n\nfrom inka.models.notes.basic_note import BasicNote\nfrom inka.models.notes.cloze_note import ClozeNote\n\n\n@pytest.fixture\ndef basic_note() -> BasicNote:\n return BasicNote(\"front content\", \"back content\", [\"tag1\", \"tag2\"], \"deck name\")\n\n\ndef test_search_query(basic_note):\n basic_note.front_html = \"
front content
\"\n expected = '\"
front content
\"'\n\n assert basic_note.search_query == expected\n\n\ndef test_convert_fields_to_html_when_function_passed(basic_note):\n new_text = \"new text\"\n\n basic_note.convert_fields_to_html(lambda text: new_text)\n\n assert basic_note.front_html == new_text\n assert basic_note.back_html == new_text\n\n\ndef test_update_fields_with_when_function_passed(basic_note):\n new_text = \"new text\"\n\n basic_note.update_fields_with(lambda text: new_text)\n\n assert basic_note.updated_front_md == new_text\n assert basic_note.updated_back_md == new_text\n\n\ndef test_get_raw_fields(basic_note):\n fields = basic_note.get_raw_fields()\n\n assert len(fields) == 2\n assert fields[0] == basic_note.raw_front_md\n assert fields[1] == basic_note.raw_back_md\n\n\ndef test_get_raw_question_field(basic_note):\n field = basic_note.get_raw_question_field()\n\n assert field == basic_note.raw_front_md\n\n\ndef test_get_html_fields(basic_note, config):\n front_name = \"myFront\"\n back_name = \"myBack\"\n config.update_option_value(\"anki\", \"front_field\", front_name)\n config.update_option_value(\"anki\", \"back_field\", back_name)\n expected = {front_name: basic_note.front_html, back_name: basic_note.back_html}\n\n assert basic_note.get_html_fields(config) == expected\n\n\ndef test_get_anki_note_type(basic_note, config):\n expected = \"my super type\"\n config.update_option_value(\"anki\", \"basic_type\", expected)\n\n assert basic_note.get_anki_note_type(config) == expected\n\n\ndef test_eq_when_same():\n first_note = BasicNote(\"front\", \"back\", [\"tag1\"], \"deck\")\n second_note = BasicNote(\"front\", \"back\", [\"tag1\"], \"deck\")\n\n assert first_note == second_note\n\n\n@pytest.mark.parametrize(\n \"second_note\",\n (\n BasicNote(\"oops\", \"back\", [\"tag1\"], \"deck\"),\n BasicNote(\"front\", \"oops\", [\"tag1\"], \"deck\"),\n BasicNote(\"front\", \"back\", [\"tag1\", \"tag2\"], \"deck\"),\n BasicNote(\"front\", \"back\", [\"tag1\"], \"my deck\"),\n None,\n ClozeNote(\"front\", [\"tag1\"], \"my deck\"),\n \"short string\",\n ),\n)\ndef test_eq_when_not_equal(second_note):\n first_note = BasicNote(\"front\", \"back\", [\"tag1\"], \"deck\")\n\n assert first_note != second_note\n","repo_name":"keiqu/inka","sub_path":"tests/test_basic_note.py","file_name":"test_basic_note.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"22"}
+{"seq_id":"18943642230","text":"from collections import defaultdict\nfrom sys import stdin, argv\nfrom urllib.parse import urlparse\nfrom urllib.request import urlopen\n\ndef tsv(items):\n items = sorted([(k, v) for (k, v) in items.items()], key=lambda x: x[1])\n for k, v in items:\n print(\"{}\\t{}\".format(k, v))\n\ndef try_visit(url):\n try:\n req = urlopen(url, timeout=5)\n return req.status < 400\n except:\n return False\n\ndef main(type):\n counts = defaultdict(lambda: 0)\n for line in stdin:\n url = urlparse(line)\n if type == 'paths':\n counts[url.path] += 1\n elif type == 'hosts':\n counts[url.netloc] += 1\n elif type == 'visit':\n counts[line] = try_visit(line)\n\n tsv(counts)\n\nif __name__ == '__main__':\n if len(argv) < 2 or argv[1] not in ['paths', 'hosts', 'visit']:\n print(\"Supply 'hosts' or 'paths' as the argument to count\")\n exit(1)\n main(argv[1])","repo_name":"sinkingpoint/blog-codes","sub_path":"google-urls/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"}
+{"seq_id":"70007202297","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Apr 12 23:29:37 2018\r\n\r\n@author: saikiran\r\n\"\"\"\r\n\r\nfrom mpi4py import MPI\r\nimport numpy as np\r\ndef divide_data(data,n):\r\n split_data = np.split(data,n)\r\n return split_data\r\n \r\n \r\ncomm = MPI.COMM_WORLD\r\nrank = comm.rank\r\nprint(\"my rank is:\", rank)\r\nstart_time = MPI.Wtime()\r\nprint(\"start time is:\",start_time)\r\nn=4\r\nnp.random.seed(0)\r\nvector2 = np.random.rand(1,4)\r\nv2=vector2\r\n\r\nsize = len(v2[0])\r\n\r\nfor i in range(n):\r\n vector1 = np.random.rand(4,4)\r\n vector1=np.ravel(vector1)\r\n v1 = divide_data(vector1,n)\r\n if rank==0:\r\n vec1 = np.reshape(v1[i],(int(size/n),size))\r\n \r\n if i==0:\r\n data=vec1*v2\r\n print(\"my vector product is:\",data)\r\n end_time = MPI.Wtime()\r\n print(\"end time is:\",end_time)\r\n print(\"total execution time is :\",end_time-start_time)\r\n \r\n destination_process= i+1\r\n if destination_process==n:\r\n print(\"Data has been sent to all processes succesfully\")\r\n else:\r\n comm.send(v1[i+1],dest=destination_process, tag=8)\r\n print(\"sending vector1 data {} data to process{}\" .format(v1[i+1],destination_process))\r\n final_vector=comm.recv(source = i+1)\r\n print(\"received data is\",final_vector)\r\n append_data = np.append(data,final_vector,axis=0)\r\n data = append_data\r\n print(\"my final_vector product data is :\",data)\r\n \r\n if rank==i+1:\r\n vector3 = comm.recv(source=0,tag=8)\r\n vector3 = np.reshape(vector3,(int(size/n),size))\r\n print(\"received vector1 data is\",vector3)\r\n data2 = np.multiply(vector3,v2)\r\n print(\"my vector product is:\", data2)\r\n destination_process = 0\r\n comm.send(data2, dest=destination_process)\r\n print(\"sending vector average data {} data to process{}\" .format(data2,destination_process))\r\n end_time = MPI.Wtime()\r\n print(\"end time is:\",end_time)\r\n print(\"total execution time is :\",end_time-start_time)\r\n \r\nif rank==n-1:\r\n print(\"vector multiplication using point to point is completed successfully\")","repo_name":"SaikiranGS/Distributed-programming","sub_path":"Distributed_vector_computation/vector_multiplication.py","file_name":"vector_multiplication.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"}
+{"seq_id":"75090366456","text":"from codes import urequests\r\nfrom codes import mywifi\r\nimport json\r\nfrom machine import UART\r\n\r\nwifi = mywifi.WIFI()#ssid,password\r\n\r\n#\r\n\r\nuart = UART(2, 9600)\r\nurl=\"http://planesystem.xyz/control_led/\"\r\n\r\nret = {}\r\nret['led1'] = 10 #亮度1\r\nret['led2'] = 15 #亮度1\r\nret['led3'] = 20 #亮度1\r\n\r\n#上传灯信息\r\nr=urequests.post(url,data=json.dumps(ret))\r\nprint(r.json()[\"res\"])\r\nr.close()\r\n#获得时间计算后的当前开关信息(0表示白天,1表示黑天可以开灯)\r\nuart.write(str(r))\r\n#记得每次cl\r\n#\r\n#\r\n# r.close()","repo_name":"ywz978020607/History_mpy","sub_path":"Micropython_esp32_8266/esp32_自组网/版本二 esp自带wifi组网/codes/test_django.py","file_name":"test_django.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"zh","doc_type":"code","stars":36,"dataset":"github-code","pt":"22"}
+{"seq_id":"15771282107","text":"\"\"\"Serverless module.\"\"\"\nfrom __future__ import print_function\n\nimport logging\nimport os\nimport re\nimport subprocess\nimport sys\n\nfrom . import (\n RunwayModule, format_npm_command_for_logging, generate_node_command,\n run_module_command, run_npm_install, warn_on_skipped_configs\n)\nfrom ..util import change_dir, which\n\nLOGGER = logging.getLogger('runway')\n\n\ndef gen_sls_config_files(stage, region):\n \"\"\"Generate possible SLS config files names.\"\"\"\n names = []\n for ext in ['yml', 'json']:\n # Give preference to explicit stage-region files\n names.append(\n os.path.join('env',\n \"%s-%s.%s\" % (stage, region, ext))\n )\n names.append(\"config-%s-%s.%s\" % (stage, region, ext))\n # Fallback to stage name only\n names.append(\n os.path.join('env',\n \"%s.%s\" % (stage, ext))\n )\n names.append(\"config-%s.%s\" % (stage, ext))\n return names\n\n\ndef get_sls_config_file(path, stage, region):\n \"\"\"Determine Serverless config file name.\"\"\"\n for name in gen_sls_config_files(stage, region):\n if os.path.isfile(os.path.join(path, name)):\n return name\n return \"config-%s.json\" % stage # fallback to generic json name\n\n\ndef run_sls_remove(sls_cmd, env_vars):\n \"\"\"Run sls remove command.\"\"\"\n sls_process = subprocess.Popen(sls_cmd,\n stdout=subprocess.PIPE,\n env=env_vars)\n stdoutdata, _stderrdata = sls_process.communicate()\n sls_return = sls_process.wait()\n print(stdoutdata)\n if sls_return != 0 and (sls_return == 1 and not (\n re.search(r\"Stack '.*' does not exist\", stdoutdata))):\n sys.exit(sls_return)\n\n\nclass Serverless(RunwayModule):\n \"\"\"Serverless Runway Module.\"\"\"\n\n def run_serverless(self, command='deploy'):\n \"\"\"Run Serverless.\"\"\"\n response = {'skipped_configs': False}\n sls_opts = [command]\n\n if not which('npm'):\n LOGGER.error('\"npm\" not found in path or is not executable; '\n 'please ensure it is installed correctly.')\n sys.exit(1)\n\n if 'CI' in self.context.env_vars and command != 'remove':\n sls_opts.append('--conceal') # Hide secrets from serverless output\n\n if 'DEBUG' in self.context.env_vars:\n sls_opts.append('-v') # Increase logging if requested\n\n sls_opts.extend(['-r', self.context.env_region])\n sls_opts.extend(['--stage', self.context.env_name])\n sls_env_file = get_sls_config_file(self.path,\n self.context.env_name,\n self.context.env_region)\n\n sls_cmd = generate_node_command(command='sls',\n command_opts=sls_opts,\n path=self.path)\n\n if (not self.options.get('environments') and os.path.isfile(os.path.join(self.path, sls_env_file))) or ( # noqa pylint: disable=line-too-long\n self.options.get('environments', {}).get(self.context.env_name)): # noqa\n if os.path.isfile(os.path.join(self.path, 'package.json')):\n with change_dir(self.path):\n run_npm_install(self.path, self.options, self.context)\n LOGGER.info(\"Running sls %s on %s (\\\"%s\\\")\",\n command,\n os.path.basename(self.path),\n format_npm_command_for_logging(sls_cmd))\n if command == 'remove':\n # Need to account for exit code 1 on any removals after\n # the first\n run_sls_remove(sls_cmd, self.context.env_vars)\n else:\n run_module_command(cmd_list=sls_cmd,\n env_vars=self.context.env_vars)\n else:\n LOGGER.warning(\n \"Skipping serverless %s of %s; no \\\"package.json\\\" \"\n \"file was found (need a package file specifying \"\n \"serverless in devDependencies)\",\n command,\n os.path.basename(self.path))\n else:\n response['skipped_configs'] = True\n LOGGER.info(\n \"Skipping serverless %s of %s; no config file for \"\n \"this stage/region found (looking for one of \\\"%s\\\")\",\n command,\n os.path.basename(self.path),\n ', '.join(gen_sls_config_files(self.context.env_name,\n self.context.env_region)))\n return response\n\n def plan(self):\n \"\"\"Skip sls planning.\"\"\"\n LOGGER.info('Planning not currently supported for Serverless')\n\n def deploy(self):\n \"\"\"Run sls deploy.\"\"\"\n result = self.run_serverless(command='deploy')\n warn_on_skipped_configs(result, self.context.env_name,\n self.context.env_vars)\n\n def destroy(self):\n \"\"\"Run serverless remove.\"\"\"\n result = self.run_serverless(command='remove')\n warn_on_skipped_configs(result, self.context.env_name,\n self.context.env_vars)\n","repo_name":"goedelsoup/runway","sub_path":"runway/module/serverless.py","file_name":"serverless.py","file_ext":"py","file_size_in_byte":5354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"22"}
+{"seq_id":"39966390178","text":"# Modified from ScanNet evaluation script: https://github.com/ScanNet/ScanNet/blob/master/BenchmarkScripts/3d_evaluation/evaluate_semantic_label.py\nimport logging\nimport numpy as np\n\nlog = logging.getLogger(__name__)\n\n\ndef evaluate_scan(pred_ids, gt_ids, confusion, id_to_label_map, ignore_id):\n\n VALID_CLASS_IDS = list(id_to_label_map.keys())\n\n # sanity checks\n if not pred_ids.shape == gt_ids.shape:\n raise RuntimeError(\"Ground truth and prediction sizes don't match\")\n\n for (gt_val, pred_val) in zip(gt_ids.flatten(), pred_ids.flatten()):\n if gt_val not in VALID_CLASS_IDS:\n continue\n if pred_val not in VALID_CLASS_IDS:\n pred_val = ignore_id\n confusion[gt_val][pred_val] += 1\n\n\ndef get_iou(label_id, confusion, id_to_label_map):\n\n VALID_CLASS_IDS = list(id_to_label_map.keys())\n\n if not label_id in VALID_CLASS_IDS:\n return float(\"nan\")\n # #true positives\n tp = np.longlong(confusion[label_id, label_id])\n # #false negatives\n fn = np.longlong(confusion[label_id, :].sum()) - tp\n # #false positives\n not_ignored = [l for l in VALID_CLASS_IDS if not l == label_id]\n fp = np.longlong(confusion[not_ignored, label_id].sum())\n\n denom = tp + fp + fn\n if denom == 0:\n return float(\"nan\")\n return (float(tp) / denom, tp, denom)\n\n\ndef write_result_file(confusion, ious, id_to_label_map):\n\n VALID_CLASS_IDS = list(id_to_label_map.keys())\n\n log.info(\"Semantic Segmentation results\")\n log.info(\"iou scores\")\n for i in range(len(VALID_CLASS_IDS)):\n label_id = VALID_CLASS_IDS[i]\n label_name = id_to_label_map[label_id]\n if type(ious[label_name]) == tuple:\n iou = ious[label_name][0]\n log.info(\"{0:<14s}({1:<2d}): {2:>5.3f}\".format(label_name, label_id, iou))\n log.info(\"confusion matrix\")\n log.info(\"\\t\\t\\t\")\n\n output_string = \"\"\n for i in range(len(VALID_CLASS_IDS)):\n # f.write('\\t{0:<14s}({1:<2d})'.format(CLASS_LABELS[i], VALID_CLASS_IDS[i]))\n output_string += \"{0:<8d}\".format(VALID_CLASS_IDS[i])\n log.info(output_string)\n\n for r in range(len(VALID_CLASS_IDS)):\n log.info(\"{0:<14s}({1:<2d})\".format(id_to_label_map[r], VALID_CLASS_IDS[r]))\n\n output_string = \"\"\n for c in range(len(VALID_CLASS_IDS)):\n output_string += \"\\t{0:>5.3f}\".format(\n confusion[VALID_CLASS_IDS[r], VALID_CLASS_IDS[c]]\n )\n log.info(output_string)\n\n\ndef evaluate(matches, id_to_label_map, ignore_id, verbose=True):\n\n VALID_CLASS_IDS = list(id_to_label_map.keys())\n\n max_id = np.max(VALID_CLASS_IDS)\n confusion = np.zeros((max_id + 1, max_id + 1), dtype=np.ulonglong)\n\n if verbose:\n log.info(f\"evaluating {len(matches.keys()) } scans...\")\n\n for scene_name, compare in matches.items():\n evaluate_scan(\n compare[\"pred\"], compare[\"gt\"], confusion, id_to_label_map, ignore_id\n )\n\n class_ious = {}\n for i in range(len(VALID_CLASS_IDS)):\n label_id = VALID_CLASS_IDS[i]\n label_name = id_to_label_map[label_id]\n class_ious[label_name] = get_iou(label_id, confusion, id_to_label_map)\n\n if verbose:\n log.info(\"classes IoU\")\n log.info(\"----------------------------\")\n for i in range(len(VALID_CLASS_IDS)):\n label_id = VALID_CLASS_IDS[i]\n label_name = id_to_label_map[label_id]\n if type(class_ious[label_name]) == tuple:\n log.info(\n \"{0:<14s}: {1:>5.3f} ({2:>6d}/{3:<6d})\".format(\n label_name,\n class_ious[label_name][0],\n class_ious[label_name][1],\n class_ious[label_name][2],\n )\n )\n\n # Return mean IOU\n mean_iou = 0\n for i in range(len(VALID_CLASS_IDS)):\n label_id = VALID_CLASS_IDS[i]\n iou_output = get_iou(label_id, confusion, id_to_label_map)\n if type(iou_output) == tuple:\n mean_iou += iou_output[0]\n mean_iou /= len(VALID_CLASS_IDS)\n\n if verbose:\n log.info(\"----------------------------\")\n log.info(f\"mIOU {mean_iou:.3f}\")\n\n return mean_iou\n","repo_name":"jandaa/masters-thesis","sub_path":"src/util/eval_semantic.py","file_name":"eval_semantic.py","file_ext":"py","file_size_in_byte":4242,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"}
+{"seq_id":"13975588828","text":"#!/usr/bin/python3\n\nfrom pwn import *\n\nfilename = \"./chall\"\n\n#io = process(filename)\nio = remote(\"138.68.177.159\", 30537)\nelf = ELF(filename)\n\ncontext.clear(arch=\"amd64\")\n\n\noffset = 40\nbuffer = 0x601048 #bss address to write into using read\nsyscall = 0x40053b #syscall gadget\n\n\ndef main():\n\trop = ROP(elf)\n\trop.call(elf.sym.read, [0x0, buffer])\n\trop.call(syscall, [0x1, elf.got.__libc_start_main])\n\trop.main()\n\n\n\tpayload = b\"A\"*40 + rop.chain()\n\tio.sendline(payload)\n\t\n\tpayload = b\"\"\n\tio.sendline(payload) #control the value or RAX to write\n\t\n\tleaked_address = u64(io.recv(6).decode(\"latin-1\").ljust(8,\"\\x00\"))\n\tlog.info(\"Leaked address: %s \" % hex(leaked_address))\n\t\n\tbase_address = leaked_address - 0x21b10\n\t\n\tlog.info(\"Base address: %s \" % hex(base_address))\t\n\tsystem = base_address + 0x4f550\n\tbinsh = base_address + 0x1b3e1a\n\t\n\trop = ROP(elf)\n\trop.call(system, [binsh])\n\tio.sendline(b\"A\"*offset + rop.chain())\n\tio.interactive()\n\n\nif __name__ == \"__main__\":\n\tmain()\n\n# CHTB{n0_0utput_n0_pr0bl3m_w1th_sr0p}\n","repo_name":"mutur4/ctf-writeups","sub_path":"CyberApocalypse/system dROP/exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"22"}
+{"seq_id":"71782960056","text":"class Solution:\n def findShortestSubArray(self, nums: List[int]) -> int:\n hashmap = {}\n \n for i, num in enumerate(nums):\n if num in hashmap:\n count, arr = hashmap.get(num)\n hashmap[num] = (count + 1, arr + [i])\n else:\n hashmap[num] = (1, [i])\n \n curr_max = 0\n res = len(nums)\n \n for _, val in hashmap.items():\n count, arr = val\n if count > curr_max:\n curr_max = count\n res = arr[-1] - arr[0] + 1\n elif count == curr_max:\n cuur_max = count\n res = min(res, arr[-1] - arr[0] + 1)\n \n return res","repo_name":"jzhou45/leethub","sub_path":"0697-degree-of-an-array/0697-degree-of-an-array.py","file_name":"0697-degree-of-an-array.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"}
+{"seq_id":"15020352555","text":"from common.common import *\nfrom compiler.semantic import Semantic\nfrom compiler.code_generator import Code\n\n\nclass Expression:\n \n # __errors__=[]\n __tokens__ = []\n __terminals__ = [\"ID\",\"FLOAT\",\"INTEGER\",\"LITERAL\"]\n __token__ = None\n __init__ = False\n __errors__ = []\n __type__ = None\n \n def __erro__():\n if not Expression.__init__:\n t = Expression.__token__\n Expression.__init__ = True\n if not t in Expression.__errors__:\n Expression.__tokens__.insert(0,t)\n error(t)\n\n def __get_token__():\n if not Expression.__init__:\n Expression.__token__ = None if len(Expression.__tokens__) == 0 else Expression.__tokens__.pop(0)\n if Expression.__token__:\n if Expression.__token__.type in Expression.__terminals__:\n Code.operating(Expression.__token__.token)\n \n def __T__():\n if Expression.__init__:\n return\n if Expression.__token__:\n t = Expression.__token__\n if t.token == \"(\" or t.type in Expression.__terminals__:\n if t.type == \"ID\":\n Semantic.initialized(Expression.__token__)\n if t.token != \"(\":\n Semantic.cast(t)\n Expression.__F__()\n Expression.__T2__()\n else:\n Expression.__erro__()\n\n def __E2__():\n if Expression.__init__:\n return\n Expression.__get_token__()\n if Expression.__token__:\n t = Expression.__token__\n\n if t.token == \"+\" or t.token == \"-\":\n Expression.__get_token__()\n Expression.__T__()\n Expression.__E2__()\n Code.operator(t.token)\n\n elif t.token != \")\":\n Expression.__erro__()\n\n def __T2__():\n if Expression.__init__:\n return\n Expression.__get_token__()\n if Expression.__token__:\n t = Expression.__token__\n\n if t.token == \"*\" or t.token == \"/\":\n Expression.__get_token__()\n Expression.__F__()\n Expression.__T2__()\n Code.operator(t.token)\n else:\n Expression.__tokens__.insert(0,t)\n \n def __F__():\n if Expression.__init__:\n return\n if Expression.__token__:\n t = Expression.__token__\n if t.token == \"(\":\n Expression.__E__()\n # Expression.__get_token__()\n if Expression.__token__:\n t = Expression.__token__\n if t.token != \")\":\n Expression.__erro__()\n\n def __E__():\n if Expression.__init__:\n return\n Expression.__get_token__()\n if Expression.__token__:\n t = Expression.__token__\n if t.token == \"(\" or t.type in Expression.__terminals__:\n Expression.__T__()\n Expression.__E2__()\n else:\n Expression.__erro__()\n \n def match(tokens,is_return = False):\n Expression.__tokens__ = []\n Semantic.init_cast()\n Code.start_op()\n if not isinstance(tokens,list):\n tokens = list([tokens])\n while len(tokens):\n token = tokens[0]\n if not token or token.token in [\",\",\";\"]:\n break\n tokens.pop(0)\n if token:\n Expression.__tokens__.append(token)\n\n if not len(Expression.__tokens__):\n if not is_return:\n error(f\"Esperado expressao\")\n else:\n while True:\n Expression.__init__ = False\n Expression.__E__()\n if not Expression.__init__:\n break\n \n Expression.__type__ = Semantic.get_cast()\n Code.finish_op()\n \n return tokens\n","repo_name":"marcotuliocarvalho/StarCompiler","sub_path":"compiler/expression.py","file_name":"expression.py","file_ext":"py","file_size_in_byte":4006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"}
+{"seq_id":"8986459280","text":"from matplotlib.ticker import MaxNLocator\r\nimport itertools\r\n\r\ndef draw_rectangles(rectangleDataList):\r\n padding = 5\r\n maxX = max([max_x for (min_x,min_y,max_x,max_y) in rectangleDataList]) + padding\r\n minX = min([min_x for (min_x,min_y,max_x,max_y) in rectangleDataList]) - padding\r\n maxY = max([max_y for (min_x,min_y,max_x,max_y) in rectangleDataList]) + padding\r\n minY = min([min_y for (min_x,min_y,max_x,max_y) in rectangleDataList]) - padding\r\n\r\n # this plots all of the rectangles on the same graph\r\n fig, ax = plt.subplots()\r\n plt.title('Rectangles')\r\n ax.set_aspect(1) #normalizes the graph\r\n ax.yaxis.set_major_locator(MaxNLocator(integer=True))\r\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\r\n\r\n #the x and y limits for the graph are set by the largest x and y values generated and are restricted by radii\r\n plt.xlim(minX , maxX )\r\n plt.ylim(minY , maxY )\r\n plt.grid(True, which='both')\r\n\r\n for rectangle in rectangleDataList:\r\n min_x,min_y,max_x,max_y = rectangle\r\n width = max_x - min_x\r\n height = max_y - min_y\r\n\r\n # Plot library uses min left as the point\r\n pt_x = min_x\r\n pt_y = min_y\r\n\r\n # For annotation\r\n center_x = min_x + width/2.0\r\n center_y = min_y + height/2.0\r\n\r\n # Place the rectangle\r\n rectangleObj = plt.Rectangle(xy=(pt_x, pt_y), width=width, height=height, color='b', fill=False, linewidth=2)\r\n ax.add_artist(rectangleObj)\r\n annotate_string = 'Min: (' + str(min_x)+','+str(min_y)+')\\n'+'Max: ('+str(max_x)+','+str(max_y) + ')'\r\n label = ax.annotate(annotate_string, xy=(center_x, center_y), fontsize=9, ha=\"center\")\r\n\r\n plt.show()\r\n\r\ndraw_rectangles([list(itertools.chain(*ground_truth)), list(itertools.chain(*prediction))])","repo_name":"Mekrab/Computer-Vision-Toolbox","sub_path":"python_toolbox/draw_rectangles.py","file_name":"draw_rectangles.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"}
+{"seq_id":"74983369335","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\ndef run(event,context):\n driver = webdriver.PhantomJS(service_args=['--ssl-protocol=any'])\n driver.implicitly_wait(10)\n driver.get('http://www.python.org/')\n assert \"Python\" in driver.title\n elem = driver.find_element_by_name(\"q\")\n elem.send_keys(\"pycon\")\n elem.send_keys(Keys.RETURN)\n assert \"No results found.\" not in driver.page_source\n print(driver.title)\n driver.quit() \n","repo_name":"jaklinger/nesta_dataflow","sub_path":"collect_data/utils/immerseuk/gtr/gtr_extrainfo_aws.py","file_name":"gtr_extrainfo_aws.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"}
+{"seq_id":"22237229726","text":"# Conversor de segundos para horas e minutos\n\nprint(\"Insira os segundos a serem convertidos\")\n\nsegundos = input()\nhoras = float(segundos) // 3600\n\nsegundos_minutos = float(segundos) % 3600\nminutos = segundos_minutos // 60\n\nsegundos_restantes = segundos_minutos % 60\n\nprint(segundos, \"segundo equivalem a :\", int(horas), \"hora(s),\", int(minutos), \"minuto(s),\", int(segundos_restantes), \"segundos\")\n","repo_name":"vinaud/Exercicios-Python","sub_path":"Exercicios basicos/conversorSegundos.py","file_name":"conversorSegundos.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"}
+{"seq_id":"10949085801","text":"# from exceptions import ApiException\nfrom client import Client\nimport json\n\nclass DiacriticRestorer(Client):\n \"\"\"Lexicon class\"\"\"\n\n def __init__(self, language='hr'):\n super(DiacriticRestorer, self).__init__()\n self.language = language\n\n def restore(self, text, format='json'):\n\n if not self._auth.hasToken():\n raise ValueError(\"Unauthorized\")\n\n if self.language is None:\n raise ValueError(\"Language not set\")\n\n if text is None:\n raise ValueError(\"Please specify the input text\")\n\n params = {\n 'text': text,\n 'format': format\n }\n\n return self.queryApi(\"{0}/restore\".format(self.language), params)\n","repo_name":"clarinsi/reldi-lib","sub_path":"reldi/restorer.py","file_name":"restorer.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"22"}
+{"seq_id":"39722288905","text":"# coding:utf-8\nfrom lxmaya_fnc import ma_fnc_abstract\n\nfrom lxutil_prd import utl_prd_commands\n\nfrom lxmaya_prd import ma_prd_objects\n\nfrom lxmaya.dcc.dcc_objects import _mya_dcc_obj_dags\n\n\nclass Method(ma_fnc_abstract.AbsMyaChecker):\n NAMING_PATTERN = '*_grp'\n def __init__(self, *args):\n super(Method, self).__init__(*args)\n # Group-name is Non-match *_grp\n def _check_method_0(self, *args):\n obj, check_index = args\n #\n is_error = not obj.get_is_naming_match(self.NAMING_PATTERN)\n #\n self.set_error_obj_update(is_error, obj, check_index)\n #\n outputs = []\n return outputs\n # noinspection PyMethodMayBeStatic\n def _repair_method_0(self, *args):\n obj = args[0]\n obj._set_path_update_()\n if obj.get_is_exists():\n new_name = '{}_grp'.format(obj.name)\n obj.set_rename(new_name)\n # empty\n def _check_method_1(self, *args):\n obj, check_index = args\n #\n is_error = not obj.get_all_shape_paths()\n #\n self.set_error_obj_update(is_error, obj, check_index)\n #\n outputs = []\n return outputs\n # noinspection PyMethodMayBeStatic\n def _repair_method_1(self, *args):\n obj = args[0]\n obj._set_path_update_()\n if obj.get_is_exists():\n obj.set_delete()\n\n def set_check_run(self):\n self.set_restore()\n #\n self.EXCEPT_DCC_PATHS = []\n scene = utl_prd_commands.get_current_scene()\n entity_obj = scene.get_current_entity_obj()\n step_obj = scene.get_current_step_obj()\n # exclude entity and step groups\n if step_obj is not None:\n shot_opt = ma_prd_objects.ObjOpt(entity_obj)\n for i in shot_opt.get_dcc_group_paths():\n self.EXCEPT_DCC_PATHS.append(i)\n step_op = ma_prd_objects.ObjOpt(step_obj)\n for i in step_op.get_dcc_group_paths():\n self.EXCEPT_DCC_PATHS.append(i)\n #\n groups = _mya_dcc_obj_dags.Groups().get_custom_nodes(reference=False, exclude_paths=self.EXCEPT_DCC_PATHS)\n self._set_objs_check_(groups)\n","repo_name":"no7hings/lxdcc_fnc","sub_path":"script/python/lxmaya_fnc/checker/utl/group_custom.py","file_name":"group_custom.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"}
+{"seq_id":"21121864182","text":"from concurrent.futures import ProcessPoolExecutor\nimport os\nimport sys\n\nimport torch\n\nfrom graphembed.utils import latest_path_by_basename_numeric_order\n\nfrom .agg_grid_results import load_embedding\nfrom ..agg_angle_ratios import sample_angle_ratios_and_save, parse_args\nfrom ..utils import fullpath_list\n\n\ndef main():\n torch.set_default_dtype(torch.float64)\n with ProcessPoolExecutor(max_workers=args.n_cpus) as ppool:\n futures = []\n for ds_dir in fullpath_list(args.root_dir):\n ds_name = os.path.basename(ds_dir)\n for loss_fn_dir in fullpath_list(ds_dir):\n loss_fn = os.path.basename(loss_fn_dir)\n for n_factors_dir in fullpath_list(loss_fn_dir):\n try:\n n_factors = int(os.path.basename(n_factors_dir))\n except:\n continue # Ignore the Euclidean baseline.\n for run_dir in fullpath_list(n_factors_dir):\n # Load the embedding.\n pattern = os.path.join(run_dir, 'embedding_*.pth')\n path = latest_path_by_basename_numeric_order(pattern)\n emb = load_embedding(path)\n\n # Submit it for processing.\n f = ppool.submit(sample_angle_ratios_and_save, emb,\n run_dir)\n futures.append(f)\n # Wait for the results.\n for f in futures:\n f.result()\n\n\nif __name__ == '__main__':\n global args\n args = parse_args()\n sys.exit(main())\n","repo_name":"dalab/matrix-manifolds","sub_path":"graphembed/experiments/products/agg_angle_ratios.py","file_name":"agg_angle_ratios.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"22"}
+{"seq_id":"38568787226","text":"import sys\nimport math\nimport cv2 as cv\nimport numpy as np\n\ndef main(argv):\n main_images_path = '../images/'\n #default_file = main_images_path + 'Empty1.jpg'\n #default_file = main_images_path + 'Empty2.jpg'\n default_file = main_images_path + 'Empty3.png'\n #default_file = main_images_path + 'empty_lot.jpg'\n #default_file = main_images_path + 'parking_lot.png'\n\n filename = argv[0] if len(argv) > 0 else default_file\n # Loads an image\n src = cv.imread(cv.samples.findFile(filename), cv.IMREAD_GRAYSCALE)\n # Check if image is loaded fine\n if src is None:\n print('Error opening image!')\n print('Usage: hough_lines.py [image_name -- default ' + default_file + '] \\n')\n return -1\n\n dst = cv.Canny(src, 250, 350, None, 3)\n # here i can change the value of the min and max gradients depending on the brightness of the image\n cv.imshow (\"canny\", dst)\n # Copy edges to the images that will display the results in BGR\n cdst = cv.cvtColor(dst, cv.COLOR_GRAY2BGR)\n cdstP = np.copy(cdst)\n\n linesP = cv.HoughLinesP(dst, 1, np.pi / 180, 50, None, 50, 10)\n\n\n if linesP is not None:\n for i in range(0, len(linesP)):\n l = linesP[i][0]\n cv.line(cdstP, (l[0], l[1]), (l[2], l[3]), (0, 0, 255), 6, cv.LINE_AA)\n #print(l[0], \" \", l[1], \" \", l[2], \" \", l[3])\n# cv.LINE_AA >> gives anti-aliased line which looks great for curves.\n cv.imshow(\"Source\", src)\n cv.imshow(\"Detected Lines (in red) - Probabilistic Line Transform\", cdstP)\n\n cv.waitKey()\n return 0\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])","repo_name":"AhmedSayedSk/parking-spaces-detction","sub_path":"code/line_detection.py","file_name":"line_detection.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"}
+{"seq_id":"14473316842","text":"#References:\n#https://github.com/hpclab/rankeval/commit/0b5090325228afe197f0708cb158ada50b8f7b7a\nimport pandas as pd\nimport os\nfrom rankeval.dataset import Dataset\nimport lightgbm\nfrom rankeval.metrics import MAP\nfrom sklearn.datasets import load_svmlight_file\nfrom rankeval.model import RTEnsemble\nimport numpy as np\nimport configparser\nimport sys\n\ndef make_dir(path):\n try:\n os.mkdir(path)\n except OSError as error:\n print(path+\" - already exists.\")\n\ndef predicttt(trees, leaves, learning_rate, msn_train, msn_vali, msn_test):\n lgbm_train_dataset = lightgbm.Dataset(data=msn_train.X, label=msn_train.y, group=msn_train.get_query_sizes(), params={'verbose': -1}, free_raw_data=False)\n lgbm_vali_dataset = lightgbm.Dataset(data=msn_vali.X, label=msn_vali.y, group=msn_vali.get_query_sizes(), params={'verbose': -1}, free_raw_data=False)\n \n params = {\n 'boosting_type': 'gbdt',\n 'objective': 'lambdarank',\n 'metric': ['map'],\n 'map_at': [10],\n 'num_leaves': leaves,\n 'learning_rate': learning_rate,\n 'verbose': 1,\n 'use_missing': False\n }\n lgbm_model = lightgbm.train(\n params, \n lgbm_train_dataset, \n num_boost_round=trees,\n valid_sets=[lgbm_train_dataset, lgbm_vali_dataset],\n valid_names=['train', 'vali'],\n early_stopping_rounds=100,\n verbose_eval=10\n )\n \n\n filename = 'lgbm.model'\n rankeval_model = None\n try:\n lgbm_model.save_model(filename=filename)\n rankeval_model = RTEnsemble(filename, \n name=\"LightGBM model\", \n format=\"LightGBM\")\n finally:\n os.remove(filename)\n return rankeval_model\n\n########################################################\nconfig = configparser.ConfigParser()\nconfig.read('../config_'+sys.argv[1]+'.ini')\n\nmake_dir(\"../0_dataset/binary\")\n\nfor section in config.sections():\n max_label = int(config[section]['max_label'])\n ones = max_label\n new_labels = []\n for i in range(1,max_label+1):\n temp = []\n for j in range(1,i+1):\n temp.append(0)\n for k in range(ones,0,-1):\n temp.append(1)\n ones -=1\n new_labels.append(temp)\n \n \n dataset_name = config[section]['dataset_name']\n make_dir(\"../0_dataset/binary/\"+dataset_name+\"\")\n binary_output_path = config[section]['binary_output_path']\n \n binary = int(config[section]['binary'])\n num_folds = int(config[section]['num_folds'])\n num_features = int(config[section]['num_features'])\n trees = config[section]['trees'].split(\",\")\n leaves = config[section]['leaves'].split(\",\")\n learning_rate = config[section]['learning_rate'].split(\",\")\n \n output_path = binary_output_path\n filenamess = [\"train.txt\",\"vali.txt\",\"test.txt\"]\n \n input_path = \"../0_dataset/\"+dataset_name+\"/\"\n \n \n \n old_labels = list(range(0, max_label+1)) #[0,1,2]\n \n map_accuracy_array = [None]*max_label\n map_accuracy_array = [{\"dataset_name\": dataset_name, \"relevance_>=\": i+1, \"MAP@10\": 0, \"Fold1\": 0, \"Fold2\": 0, \"Fold3\": 0, \"Fold4\": 0, \"Fold5\": 0} for i,value in enumerate(map_accuracy_array)]\n \n for i in range(0,len(new_labels)):\n #print(new_labels[i])\n make_dir(\"../0_dataset/binary/\"+dataset_name+\"/\"+str(i+1))\n map_accuracy = 0\n for j in range(1,num_folds+1):\n make_dir(output_path+str(i+1)+\"/Fold\"+str(j))\n \n data = pd.read_csv(input_path+\"Fold\"+str(j)+'/'+filenamess[0], header=None, sep=\" \")\n data[0] = data[0].replace(old_labels, new_labels[i])\n data.to_csv(output_path+str(i+1)+\"/Fold\"+str(j)+\"/train.txt\", sep=' ', header=False, index=False)\n \n data = pd.read_csv(input_path+\"Fold\"+str(j)+'/'+filenamess[1], header=None, sep=\" \")\n data[0] = data[0].replace(old_labels, new_labels[i])\n data.to_csv(output_path+str(i+1)+\"/Fold\"+str(j)+\"/vali.txt\", sep=' ', header=False, index=False)\n \n data = pd.read_csv(input_path+\"Fold\"+str(j)+'/'+filenamess[2], header=None, sep=\" \")\n data[0] = data[0].replace(old_labels, new_labels[i])\n data.to_csv(output_path+str(i+1)+\"/Fold\"+str(j)+\"/test.txt\", sep=' ', header=False, index=False)\n \n msn_train = Dataset.load(output_path+str(i+1)+\"/Fold\"+str(j)+\"/train.txt\")\n msn_vali = Dataset.load(output_path+str(i+1)+\"/Fold\"+str(j)+\"/vali.txt\")\n msn_test = Dataset.load(output_path+str(i+1)+\"/Fold\"+str(j)+\"/test.txt\")\n \n msn_lgbm_lmart_1Ktrees_model = predicttt(int(trees[j-1]), int(leaves[j-1]), float(learning_rate[j-1]), msn_train, msn_vali, msn_test)\n \n y_pred_test = msn_lgbm_lmart_1Ktrees_model.score(msn_test)\n map = MAP(cutoff=10)\n map_10_mean_score = map.eval(msn_test, y_pred_test)[0]\n \n map_accuracy_array[i][\"Fold\"+str(j)] = map_10_mean_score\n map_accuracy += map_10_mean_score\n \n map_accuracy = map_accuracy / num_folds\n map_accuracy_array[i][\"MAP@10\"] = map_accuracy\n print(\"DONE: \"+str(new_labels[i]))\n print(str(map_accuracy_array))\n f = open(\"../output/\"+dataset_name+\"_binary_relevance_cutoff.txt\", \"w\")\n f.write(str(map_accuracy_array)+\"\\n\")\n f.close()\n\n\n\n","repo_name":"bajpaijalaj/master-thesis","sub_path":"1_preprocessing/find_cutoff_for_binary_relevance.py","file_name":"find_cutoff_for_binary_relevance.py","file_ext":"py","file_size_in_byte":5419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"}
+{"seq_id":"72796413175","text":"def solution(info, edges):\n visited = [0] * len(info)\n global max_sheep\n max_sheep = -1\n def dfs(sheep, wolf):\n global max_sheep\n \n if wolf >= sheep:\n return\n else:\n max_sheep = max(sheep, max_sheep)\n \n for i in range(len(edges)):\n parent = edges[i][0]\n child = edges[i][1]\n if visited[parent] and not visited[child]:\n visited[child] = 1\n if info[child] == 0:\n dfs(sheep+1, wolf)\n else:\n dfs(sheep, wolf+1)\n visited[child] = 0\n visited[0] = 1\n dfs(1, 0) \n return max_sheep","repo_name":"chan0139/algo_study","sub_path":"준비/programmers/양과늑대(lv3 dfs).py","file_name":"양과늑대(lv3 dfs).py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"}
+{"seq_id":"33023336093","text":"import matplotlib.pyplot as plt\nfrom numpy.linalg import lstsq\nimport numpy as np\nimport scipy.stats\n\nimport pandas as pd\n\n\ndef fitar(seq, p=1):\n A = np.vstack([seq[i:-p + i] for i in range(p)]).T\n y = seq[p:]\n return lstsq(A, y, rcond=None)[0], A, y\n\ndef fitarma(seq, p=1, q=1):\n A = np.vstack([\n seq[i:i+p]\n for i in range(len(seq) - p)\n ])\n y = seq[p:]\n x = lstsq(A, y, rcond=None)[0]\n\n res = y - A @ x\n seq = y\n\n A_ar = np.vstack([\n seq[i:i+p]\n for i in range(max(p, q) - p, len(seq) - p)\n ])\n\n A_ma = np.vstack([\n res[i:i+p]\n for i in range(max(p, q) - q, len(seq) - q)\n ])\n\n A = np.hstack((A_ar, A_ma))\n y = seq[max(p, q):]\n\n x = lstsq(A, y, rcond=None)[0]\n return x, A, y\n\ndef sim_garch(As, Bs, n):\n k = max(len(As), len(Bs))\n eps = np.random.normal(size=n + 300 + k)\n ep2 = eps**2\n h = np.ones_like(eps)\n\n for i in range(k, n + 300 + k):\n h[i] = np.sqrt(\n Bs[0] +\n np.dot(Bs[1:], ep2[i - len(Bs) + 1:i][::-1]) +\n np.dot(As, h[i - len(As):i][::-1])\n )\n eps[i] *= h[i]\n ep2[i] = eps[i]**2\n\n return eps[-n:]\n\ndef mom2_garch(seq, p=1, q=1, c=3.7, g=True):\n seq = np.square(seq)\n\n k = 10 * (p + q)\n\n A = np.vstack([\n [1, *seq[i:i+k]]\n for i in range(len(seq) - k)\n ])\n y = seq[k:]\n\n ix = y < c\n Ap, yp = A[ix], y[ix]\n\n x = lstsq(Ap, yp, rcond=None)[0]\n\n if not g:\n return x, A, y\n\n sigma = A @ x\n seq = y\n\n A_ar = np.vstack([\n sigma[i:i+p]\n for i in range(max(p, q) - p, len(seq) - p)\n ])\n\n A_ma = np.vstack([\n [1, *seq[i:i+q]]\n for i in range(max(p, q) - q, len(seq) - q)\n ])\n\n A = np.hstack((A_ar, A_ma))\n y = y[max(p, q):]\n\n # ix = y < c\n # Ap, yp = A[ix], y[ix]\n Ap, yp = A, y\n\n x = lstsq(Ap, yp, rcond=None)[0]\n return x, A, y\n\n\nif __name__ == '__main__':\n from arch import arch_model\n from contextlib import redirect_stdout\n from io import StringIO\n\n As = np.asarray([0.1, 0.1, 0.1])\n Bs = np.asarray([0.1, 0.1])\n n = 1000\n\n b01 = []\n b02 = []\n\n for _ in range(300):\n eps = sim_garch(As, Bs, n)\n par1, *_ = mom2_garch(eps, p=1, q=1)\n b01.append(par1[0])\n\n # just shut up\n with redirect_stdout(StringIO()):\n g = arch_model(eps, p=1, q=1)\n par2 = g.fit(disp='off').params\n par2 = par2[[3, 1, 2]]\n b02.append(par2[0])\n\n plt.plot(eps)\n plt.show()\n\n print(np.mean(b01))\n plt.hist(b01, bins=50)\n plt.show()\n\n print(np.mean(b02))\n plt.hist(b02, bins=50)\n plt.show()\n\ndef _():\n from read import priceof\n from arsim import AR\n\n\n past = 200\n pos = np.random.randint(400)\n\n btcc = priceof('btcusdt').open.iloc[::60*24]\n btc = btcc.pct_change().to_numpy()[-past-pos:-pos]\n btc_p = btc #btcc.pct_change().to_numpy()[-pos:-pos+past]\n\n p, q = 10, 10\n pp, qq = 10, 10\n par, A, y = fitarma(btc, p=p, q=q)\n # _, Ap, yp = fitarma(btc_p, p=p, q=q)\n Ap, yp = A, y\n pred = A @ par\n predp = Ap @ par\n res = y - pred\n resp = yp - predp\n\n pgarch, B, u = mom2_garch(res, p=pp, q=qq)\n # _, Bp, up = mom2_garch(resp, p=pp, q=qq)\n Bp, up = B, u\n vol = np.sqrt(B @ pgarch)\n volp = np.sqrt(Bp @ pgarch)\n\n plt.plot(np.arange(len(btc_p)), btc_p, label='btc')\n plt.plot(np.arange(p + max(p, q), len(btc_p)), predp, label='predictions')\n plt.plot(np.arange(p + max(p, q) + pp + qq, len(btc_p)), volp, label='volatility')\n\n rvol = pd.Series(resp).rolling(20).std()\n plt.plot(np.arange(len(btc_p) - len(rvol), len(btc_p)), rvol, label='rolling standard deviation')\n plt.legend()\n\n plt.show()\n","repo_name":"ltricot/bachelor-thesis","sub_path":"arfit.py","file_name":"arfit.py","file_ext":"py","file_size_in_byte":3761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"}
+{"seq_id":"4362155513","text":"from chroma_core.lib.storage_plugin.api import attributes, alert_conditions\nfrom chroma_core.lib.storage_plugin.api.identifiers import GlobalId, ScopedId\nfrom chroma_core.lib.storage_plugin.api.plugin import Plugin\nfrom chroma_core.lib.storage_plugin.api import resources\n\nversion = 1\n\n\nclass Controller(resources.ScannableResource):\n class Meta:\n identifier = GlobalId(\"address\")\n alert_conditions = [\n alert_conditions.ValueCondition(\n \"status\", warn_states=[\"FAILED\"], error_states=[\"BADLY_FAILED\"], message=\"Controller failure\"\n ),\n alert_conditions.UpperBoundCondition(\n \"temperature\", warn_bound=85, error_bound=95, message=\"High temperature warning\", id=\"temp_high\"\n ),\n alert_conditions.LowerBoundCondition(\n \"temperature\", warn_bound=0, message=\"Low temperature warning\", id=\"temp_low\"\n ),\n alert_conditions.ValueCondition(\n \"multi_status\", warn_states=[\"FAIL1\"], message=\"Failure 1\", id=\"multi_status_failure1\"\n ),\n alert_conditions.ValueCondition(\n \"multi_status\", warn_states=[\"FAIL2\"], message=\"Failure 2\", id=\"multi_status_failure2\"\n ),\n alert_conditions.ValueCondition(\n \"multi_status\", warn_states=[\"FAIL1\", \"FAIL2\"], message=\"Failure 1 or 2\", id=\"multi_status_failure12\"\n ),\n ]\n\n address = attributes.String()\n status = attributes.Enum(\"OK\", \"FAILED\", \"BADLY_FAILED\")\n multi_status = attributes.Enum(\"OK\", \"FAIL1\", \"FAIL2\")\n temperature = attributes.Integer(min_val=-274)\n\n\nclass Lun(resources.LogicalDrive):\n class Meta:\n identifier = ScopedId(\"lun_id\")\n\n lun_id = attributes.String()\n\n\nclass Presentation(resources.Resource):\n class Meta:\n identifier = ScopedId(\"lun_id\", \"host_id\")\n\n lun_id = attributes.String()\n path = attributes.String()\n host_id = attributes.Integer()\n\n\nclass TestPlugin(Plugin):\n pass\n","repo_name":"whamcloud/integrated-manager-for-lustre","sub_path":"tests/unit/chroma_core/lib/storage_plugin/alert_plugin.py","file_name":"alert_plugin.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"22"}
+{"seq_id":"37018685575","text":"__author__ = 'Daniel'\n\nfrom UserData import config\n\ndef get_integer_input(query=\"\", default=None):\n \"\"\"\n Takes a query and gets an input from the user\n :param query:\n :param default:\n :return:\n \"\"\"\n res = \"\"\n while not str.isnumeric(res):\n res = input(query)\n if res == \"\" and default is not None:\n return default\n return int(res)\n\ndef is_debug_mode():\n return config.getboolean(\"Debug\",\"Mode\")\n\n__version__ = \"0.0.2\"","repo_name":"daniellowtw/MentalMaths","sub_path":"utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"}
+{"seq_id":"40102609172","text":"from keyWordExtraction import RakeKeywordExtractor\nfrom nltk.corpus import wordnet as wn\nimport sys\nimport codecs\n#print all the synset element of an element\ndef lemmalist(word):\n syn_set = []\n for synset in wn.synsets(word):\n for item in synset.lemmas():\n syn_set.append(item.name())\n return syn_set\n\n\ndef getTagSet():\n\tf = codecs.open(\"raw_tags\", encoding='utf-8')\n\tlines = f.read()\n\tf.close()\n\tlines = lines.split(\"\\n\")\n\ttags = []\n\tfor line in lines:\n\t\tif len(line.split()) == 0:\n\t\t\tcontinue\n\t\trawTag = line.split()[:-2]\n\t\ttagPopularity = line.split()[-2].encode('utf-8')\n\t\ttagPopularity = int (tagPopularity.replace(',', ''))\n\t\ttag = \" \".join(rawTag)\n\t\ttags.append([tag.lower(),tagPopularity])\n\treturn tags\n\n\ndef keywords(text):\n\trake = RakeKeywordExtractor()\n\tkeywords = rake.extract(text, incl_scores=True)\n\treturn keywords\n\ndef coreTags(text):\n\tcoreTags = []\n\n\ttags = getTagSet()\n\trake = RakeKeywordExtractor()\n\tkeywords = rake.extract(text, incl_scores=True)\n\ttagsInArticles = set()\n\tfor keyword in keywords:\n\t\ttagsInArticles.add(keyword)\n\tfor keyword,score in tagsInArticles:\n\t\tfor tag,tagPopularity in tags:\n\t\t\tif (tag.lower() == keyword.lower()):\n\t\t\t\tcoreTags.append([tag, tagPopularity*score])\n\tcoreTags = sorted(coreTags, key=lambda x: x[1], reverse=True)\n\treturn coreTags\n\n\ndef additionalTags(text):\n\tadditionalTags = []\n\ttags = getTagSet()\n\trake = RakeKeywordExtractor()\n\tkeywords = rake.extract(text, incl_scores=True)\n\tsimilarWordDict = {}\n\ttagsInArticles = set()\n\n\tfor keyword,score in keywords:\n\t\tif len(keyword.split()) > 1:\n\t\t\tcontinue\n\t\ttagsInArticles.add(keyword)\n\t\tsimilarWords = lemmalist(keyword)\n\t\tif len(similarWords) != 0:\n\t\t\tfor word in similarWords:\n\t\t\t\tif ((word in tagsInArticles) == False):\n\t\t\t\t\tsimilarWordDict[word] = score\n\n\tfor keyword in similarWordDict.keys():\n\t\tfor tag,tagPopularity in tags:\n\t\t\tif (tag.lower() == keyword.lower()):\n\t\t\t\tadditionalTags.append([tag, tagPopularity*similarWordDict[keyword]])\n\tadditionalTags = sorted(additionalTags, key=lambda x: x[1], reverse=True)\n\treturn additionalTags[:20]\n\n\n#Demo\n# f = codecs.open(\"testFile\", encoding='utf-8')\n# content = f.read()\n# f.close()\n# print keywords(content)\n# print coreTags(content)\n# print additionalTags(content)\n","repo_name":"iaoshili/NLP_Project","sub_path":"tagRecommender.py","file_name":"tagRecommender.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"}
+{"seq_id":"16343509576","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.db import models\n\nclass activityManager(models.Manager):\n\tdef activityValidator(self,postData):\n\t\tresponse = {\n\t\t\t'status': True\n\t\t}\n\t\terrors = []\n\t\tif len(postData['desc']) == 0:\n\t\t\terrors.append(\"Fill out description so people know what you are doing\")\n\t\tif len(postData['newCategory']) == 0:\n\t\t\tcategoryInput = category.objects.get(id=postData['categoryId'])\n\t\telse:\n\t\t\texisting = category.objects.filter(name=postData['newCategory'])\n\t\t\tif len(existing) > 0:\n\t\t\t\tresponse['status'] = False\n\t\t\t\terrors.append(\"This category exists already\") \n\t\t\telse:\n\t\t\t\tcategoryInput = category.objects.create(name=postData['newCategory'])\n\t\tif len(errors) == 0:\n\t\t\tresponse['activity'] = activity.objects.create(category=categoryInput,desc=postData['desc'],lat=postData['actLat'],lng=postData['actLng'])\n\t\tresponse['errors'] = errors\n\t\treturn response\t\t\n\nclass category(models.Model):\n\tname = models.CharField(max_length = 255)\n\tcreated_at = models.DateTimeField(auto_now_add = True)\n\tupdated_at = models.DateTimeField(auto_now = True)\n\nclass activity(models.Model):\n\tcategory = models.ForeignKey(category, related_name=\"activities\")\n\tdesc = models.CharField(max_length = 255)\n\tlat = models.DecimalField(max_digits=9, decimal_places=6)\n\tlng = models.DecimalField(max_digits=9, decimal_places=6)\n\tcreated_at = models.DateTimeField(auto_now_add = True)\n\tupdated_at = models.DateTimeField(auto_now = True)\n\tobjects = activityManager()","repo_name":"crgalloway/activityFinder","sub_path":"apps/activity/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"}
+{"seq_id":"14457919281","text":"class Solution:\n def minimumFinishTime(self, tires: List[List[int]], changeTime: int, numLaps: int) -> int:\n # the worst case is: fi = 1, ri = 2, changeTime = 10 ** 5\n # at most 18 laps without changing tire \n one_tire = [0] * 19\n seconds = [0] * len(tires)\n for i in range(1, 19):\n for j in range(len(tires)):\n seconds[j] += tires[j][0] * tires[j][1] ** (i - 1)\n one_tire[i] = min(seconds)\n \n dp = [float('inf')] * (numLaps + 1)\n dp[0] = -changeTime\n \n for i in range(1, numLaps + 1):\n for j in range(1, min(i + 1, 19)):\n dp[i] = min(dp[i], dp[i - j] + changeTime + one_tire[j])\n return dp[-1]\n \n# m = len(tires) n = numLaps\n# Time O(m+n) Space O(m+n)\n","repo_name":"dianachenyu/AlgorithmPractice","sub_path":"2000-3000/2188. Minimum Time to Finish the Race.py","file_name":"2188. Minimum Time to Finish the Race.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"}
+{"seq_id":"42465750053","text":"import time\nfrom dronekit import connect, VehicleMode, LocationGlobalRelative, Command, LocationGlobal\nfrom pymavlink import mavutil\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--connect', default='/dev/ttyUSB0')\nargs = parser.parse_args()\n\n# Connect to the Vehicle\nprint ('Connecting to vehicle on: %s' % args.connect)\nvehicle = connect(args.connect, baud=57600, wait_ready=True)\n\nvehicle.gimbal.rotate(0,0,0) #pitch ,roll,yaw\ntime.sleep(5)\nvehicle.gimbal.rotate(-90,0,0) # seting the gimbil to down\nprint(\"setting the gimbal down\")\ntime.sleep(5)\nvehicle.gimbal.rotate(90,0,0) #gimbal facing top\nprint(\"setting the gimbal up\")\ntime.sleep(5)\nvehicle.gimbal.rotate(0,0,90) #90 yaw to west\nprint(\"setting the gimbal to west\")\n\n\n","repo_name":"ramankumarrudr/Alphabt-Drones","sub_path":"gimbil_control.py","file_name":"gimbil_control.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"}
+{"seq_id":"13654176874","text":"'''\r\nWrite code to reverse a C-Style String. (C-String means that \r\nabcd is represented as five characters, including the null character.)\r\n@author: chwang\r\n'''\r\n\r\ndef reverseCStyleString1(stringToReverse):\r\n newString = \"\"\r\n for char in reversed(stringToReverse):\r\n newString = newString + char\r\n return newString\r\n\r\ndef reverseCStyleString2(stringToReverse):\r\n newString = \"\"\r\n for char in stringToReverse[::-1]:\r\n newString = newString + char\r\n return newString\r\n\r\ndef reverseCStyleString3(stringToReverse):\r\n if len(stringToReverse) == 0:\r\n return \"\"\r\n newString = \"\"\r\n #the stop is omitted!\r\n for index in range(len(stringToReverse) - 1, -1, -1):\r\n newString = newString + stringToReverse[index]\r\n return newString\r\n\r\nif __name__ == \"__main__\":\r\n testString = \"Trick or treat\"\r\n print(reverseCStyleString1(testString))\r\n print(reverseCStyleString2(testString))\r\n print(reverseCStyleString3(testString)) \r\n\r\n","repo_name":"oreoero/cc150-python","sub_path":"chapter1/reverse_c_style_string.py","file_name":"reverse_c_style_string.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"}
+{"seq_id":"21381056411","text":"#!/usr/bin/env python3\n\"\"\"Example of plotting slices of a field with yt and matplotlib manipulation of the plot\"\"\"\n\n\n# ========================================================================\n#\n# Imports\n#\n# ========================================================================\nimport numpy as np\nimport yt\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nfrom matplotlib.ticker import SymmetricalLogLocator\nfrom matplotlib.backends.backend_pdf import PdfPages\n\n\n# ========================================================================\n#\n# Function definitions\n#\n# ========================================================================\ndef plot_ds(fdir, field=\"x_velocity\"):\n\n # Load the data\n ds = yt.load(fdir, unit_system=\"mks\")\n\n # Setup\n L = (ds.domain_right_edge - ds.domain_left_edge).d\n width = L[0]\n res = 512\n zlocs = np.array([0.0525, 0.0775, 0.1025, 0.1275, 0.1525])\n fname = \"slices.pdf\"\n\n with PdfPages(fname) as pdf:\n plt.close(\"all\")\n plt.rc(\"text\", usetex=True)\n linthresh = 1e-3\n\n # Get a slice in x\n slc = yt.SlicePlot(ds, \"x\", fields=[field])\n frb = slc.data_source.to_frb(width, res)\n x_slc = np.array(frb[field])\n\n fig0 = plt.figure(0)\n ax0 = fig0.add_subplot(111)\n im = ax0.imshow(\n x_slc,\n origin=\"lower\",\n extent=[\n ds.domain_left_edge.d[0],\n ds.domain_right_edge.d[0],\n ds.domain_left_edge.d[2],\n ds.domain_right_edge.d[2],\n ],\n aspect=\"equal\",\n cmap=\"Spectral_r\",\n norm=colors.SymLogNorm(\n linthresh=linthresh, linscale=0.5, vmin=x_slc.min(), vmax=x_slc.max()\n ),\n )\n cbar = plt.colorbar(\n im, ax=ax0, ticks=SymmetricalLogLocator(linthresh=linthresh, base=10)\n )\n cbar.ax.set_title(r\"$u$\")\n\n for zloc in zlocs:\n ax0.plot(\n [ds.domain_left_edge.d[0], ds.domain_right_edge.d[0]],\n [zloc, zloc],\n color=\"w\",\n lw=1,\n ls=\"--\",\n )\n\n ax0.set_xlabel(r\"$y~[\\mathrm{m}]$\", fontsize=22, fontweight=\"bold\")\n ax0.set_ylabel(r\"$z~[\\mathrm{m}]$\", fontsize=22, fontweight=\"bold\")\n plt.setp(ax0.get_xmajorticklabels(), fontsize=18)\n plt.setp(ax0.get_ymajorticklabels(), fontsize=18)\n fig0.subplots_adjust(bottom=0.15)\n fig0.subplots_adjust(left=0.17)\n pdf.savefig(dpi=300)\n\n # Get slices in z\n for k, zloc in enumerate(zlocs):\n slc = yt.SlicePlot(ds, \"z\", fields=[field], center=[0, 0, zloc])\n frb = slc.data_source.to_frb(width, res)\n z_slc = np.array(frb[field])\n\n fig0 = plt.figure(k + 1)\n ax0 = fig0.add_subplot(111)\n im = ax0.imshow(\n z_slc,\n origin=\"lower\",\n extent=[\n ds.domain_left_edge.d[0],\n ds.domain_right_edge.d[0],\n ds.domain_left_edge.d[1],\n ds.domain_right_edge.d[1],\n ],\n aspect=\"equal\",\n cmap=\"Spectral_r\",\n norm=colors.SymLogNorm(\n linthresh=linthresh,\n linscale=0.5,\n vmin=x_slc.min(),\n vmax=x_slc.max(),\n ),\n )\n cbar = plt.colorbar(\n im, ax=ax0, ticks=SymmetricalLogLocator(linthresh=linthresh, base=10)\n )\n cbar.ax.set_title(r\"$u$\")\n\n ax0.set_xlabel(r\"$x~[\\mathrm{m}]$\", fontsize=22, fontweight=\"bold\")\n ax0.set_ylabel(r\"$y~[\\mathrm{m}]$\", fontsize=22, fontweight=\"bold\")\n plt.setp(ax0.get_xmajorticklabels(), fontsize=18)\n plt.setp(ax0.get_ymajorticklabels(), fontsize=18)\n fig0.subplots_adjust(bottom=0.15)\n fig0.subplots_adjust(left=0.17)\n pdf.savefig(dpi=300)\n","repo_name":"AMReX-Combustion/PelePlot","sub_path":"slicer.py","file_name":"slicer.py","file_ext":"py","file_size_in_byte":4061,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"41578517910","text":"from blip_session import BlipSession\n\nAUTH_KEY = '{{bot_key}}'\nSEARCHED_VALUE = '{{searched_value}}'\nbs = BlipSession(AUTH_KEY, '{{organization}}') #Organization is optional#\nsearched_key = '{{searched_key}}'\n\ndef get_contact_crb(skip, take):\n return {\n \"method\" : \"get\",\n \"uri\" : f\"/contacts?$skip={skip}&$take={take}\"\n }\n\nskip = 0\ntake = 100\nresponse = bs.force_command(get_contact_crb(skip, take))\ncontact_found = False\nwhile skip < response['resource']['total']:\n for contact in response['resource']['items']:\n if 'extras' in contact and searched_key in contact['extras'] and contact['extras'][searched_key] == SEARCHED_VALUE:\n print(f\"Valor encontrado em {contact['identity']}\")\n contact_found = True\n if contact_found:\n break\n skip += 100\n print(f'{skip} contatos analisados!')\n response = bs.force_command(get_contact_crb(skip, take))\n\nif contact_found == False:\n print('Valor não encontrado nos contatos')","repo_name":"louisbaggins/Blip-helper-script","sub_path":"search_contact_extras.py","file_name":"search_contact_extras.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"}
+{"seq_id":"22235943872","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 21 21:17:55 2021\r\n\r\n@author: divyam\r\n\"\"\"\r\n\r\nimport cv2 as cv\r\nimport numpy as np\r\nimport random\r\nimport matplotlib.pyplot as plt\r\nimport math\r\nimport copy\r\n\r\nimg1 = cv.imread(\"im0.png\")\r\nimg2 = cv.imread(\"im1.png\")\r\nimg1 = cv.resize(img1, (600,400))\r\nimg2 = cv.resize(img2, (600,400))\r\n\r\n#cv.imshow(\"image\",cap1)\r\n#cv.waitKey(0)\r\n#cv.destroyAllWindows\r\n\r\n############### feature matching ###############\r\norb = cv.ORB_create()\r\nkp1, des1 = orb.detectAndCompute(img1, None)\r\nkp2, des2 = orb.detectAndCompute(img2, None)\r\n\r\n# Brute Force Matching\r\nbf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)\r\nmatches = bf.match(des1, des2)\r\nmatches = sorted(matches, key = lambda x:x.distance)\r\n\r\nsize = len(matches)\r\nprint(size)\r\nmatch = matches[:int(size/10)]\r\n\r\n# Initialize lists\r\nlist_kp1 = []\r\nlist_kp2 = []\r\n\r\nfor mat in match:\r\n\r\n # Get the matching keypoints for each of the images\r\n img1_idx = mat.queryIdx\r\n img2_idx = mat.trainIdx\r\n\r\n # x - columns\r\n # y - rows\r\n # Get the coordinates\r\n (x1, y1) = kp1[img1_idx].pt\r\n (x2, y2) = kp2[img2_idx].pt\r\n\r\n # Append to each list\r\n list_kp1.append((x1, y1))\r\n list_kp2.append((x2, y2))\r\n\r\n#matching_result = cv.drawMatches(img1, kp1, img2, kp2, match, None, flags=2)\r\n#cv.imshow(\"matching results\", matching_result)\r\n#cv.waitKey(0)\r\n#cv.destroyAllWindows\r\n##################################################\r\n\r\n########### getting feature index ################\r\ndef get_8_rand(): # get 8 random index to fit in Af = 0\r\n key_X1 = []\r\n \r\n rand_ind_list = []\r\n for i in range(8):\r\n# if i not in rand_ind_list:\r\n rand_ind_list.append(random.randint(0,int(size/10)-1))\r\n \r\n for ind in rand_ind_list:\r\n key_X1.append(ind)\r\n return key_X1\r\n\r\ndef get_index(key_x):\r\n index1 = []\r\n index2 = []\r\n for i in key_x:\r\n index1.append((int(list_kp1[i][0]),int(list_kp1[i][1])))\r\n index2.append((int(list_kp2[i][0]),int(list_kp2[i][1])))\r\n return index1, index2\r\n###################################################\r\n \r\n########## 8 point algorithms ##################\r\n \r\n # -----------------A_matrix = [x2*x1, x2*y1, x2, y2*x1, y2*y1, y2, x1, y1, 1]\r\n#A_matrix = []\r\n#key_x = get_8_rand()\r\n#for iter in key_x:\r\n# A_matrix.append([list_kp2[iter][0]*list_kp1[iter][0],list_kp2[iter][0]*list_kp1[iter][1],list_kp2[iter][0],list_kp2[iter][1]*list_kp1[iter][0],list_kp2[iter][1]*list_kp1[iter][1],list_kp2[iter][1],list_kp1[iter][0],list_kp1[iter][1],1])\r\n#U,sig,V_T = np.linalg.svd(A_matrix)\r\n#K_H = V_T[-1,:]/V_T[-1,-1]\r\n#F = K_H.reshape(3,3)\r\n\r\n\r\n################## RANSAC ###############\r\ninitial_F = 10000\r\nF_mat = None\r\nfor i in range(2000):\r\n A_matrix = []\r\n key_x = get_8_rand()\r\n for iter in key_x: # making A matrix for Af = 0\r\n A_matrix.append([list_kp2[iter][0]*list_kp1[iter][0],list_kp2[iter][0]*list_kp1[iter][1],list_kp2[iter][0],list_kp2[iter][1]*list_kp1[iter][0],list_kp2[iter][1]*list_kp1[iter][1],list_kp2[iter][1],list_kp1[iter][0],list_kp1[iter][1],1])\r\n U,sig,V_T = np.linalg.svd(A_matrix)\r\n K_H = V_T[-1,:]/V_T[-1,-1]\r\n F = K_H.reshape(3,3)\r\n list_kp1_new = list(list_kp1[0])\r\n list_kp2_new = list(list_kp2[0])\r\n list_kp1_new.append(1)\r\n list_kp2_new.append(1)\r\n X1 = np.array(list_kp1_new)\r\n X2 = np.array(list_kp2_new)\r\n ans_F = np.matmul(np.transpose(X2),np.matmul(F,X1)) # Fundamental matrix equation xT*F*x = 0\r\n if ans_F < 0:\r\n continue\r\n if abs(ans_F) < initial_F:\r\n initial_F = abs(ans_F)\r\n index1, index2 = get_index(key_x)\r\n F_mat = F\r\n\r\n#### Enforcing Ransac #####\r\nU_f , sig_f, V_t_f = np.linalg.svd(F_mat)\r\nsig_f[-1] = 0\r\nF_mat = np.matmul(U_f,np.matmul(np.diag(sig_f),V_t_f))\r\n###########################\r\n\r\n#F_mat = np.array([[ 2.56502805e-19,2.54139887e-17,-5.07574184e-15],[-4.80711816e-31,-2.43696301e-18,1.00000000e+00],\r\n# [9.53904157e-29,-1.00000000e+00,7.53666453e-17]]) ## seeded\r\nprint(initial_F)\r\nprint(index1)\r\nindex1 = np.array(index1)\r\nindex2 = np.array(index2)\r\n \r\n################## Esential Matrix #####################\r\n## dataset 1 \r\nK1 = np.array([[5299.313, 0, 1263.818], [0, 5299.313, 977.763], [0, 0, 1]])\r\nK2 = np.array([[5299.313, 0, 1438.004], [0, 5299.313, 977.763], [0, 0, 1]])\r\n\r\n## dataset 2 \r\n#K1 = np.array([[4396.869, 0, 1353.072], [0, 4396.869, 989.702], [0, 0, 1]])\r\n#K2 = np.array([[4396.869, 0, 1538.86], [0, 4396.869, 989.702], [0, 0, 1]])\r\n\r\n## dataset 3 \r\n#K1 = np.array([[5806.559, 0, 1429.219], [0, 5806.559 , 993.403], [0, 0, 1]])\r\n#K2 = np.array([[5806.559, 0, 1538.86], [0, 5806.559, 993.403], [0, 0, 1]])\r\n\r\nE = np.matmul(np.transpose(K2),np.matmul(F_mat,K1))\r\n\r\n\r\n############ Estimating pose using Esential Matrix ##########\r\ndef get_pose(E, K): ## E is essential matrix and K is intrinsic parameter\r\n print(\"--Getting rotational and translational matrices\")\r\n U, S, VT = np.linalg.svd(E)\r\n W = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]])\r\n R = np.matmul(np.matmul(U, np.linalg.inv(W)), VT)\r\n T = np.matmul(np.matmul(np.matmul(U, W), S), U.T)\r\n print(\"Rotation\\n\", R)\r\n print(\"Translation\\n\", T)\r\n return R, T\r\n\r\nprint(\"Pose of Camera one \")\r\nprint(\"########\")\r\nR, T = get_pose(E,K1)\r\nprint(\"Pose of Camera two \")\r\nprint(\"########\")\r\nR, T = get_pose(E,K2)\r\n\r\n############ Rectification ####################\r\nh1, w1, c = img1.shape\r\nh2, w2, c = img2.shape\r\n_, H1, H2 = cv.stereoRectifyUncalibrated(np.float32(index1), np.float32(index2), F_mat, imgSize=(w1, h1))\r\nprint(\"H1/n \", H1)\r\nprint (\"H2/n \", H2)\r\n\r\nimg1_rectified = cv.warpPerspective(img1, H1, (w1, h1))\r\nimg2_rectified = cv.warpPerspective(img2, H2, (w2, h2))\r\nimg_rec1 = copy.deepcopy(img1_rectified)\r\nimg_rec2 = copy.deepcopy(img2_rectified)\r\n\r\nX_1 = np.linalg.inv(H2) \r\nF_new = np.matmul(np.transpose(X_1),np.matmul(F_mat,np.linalg.inv(H1)))\r\nprint(\"Rectified F/n :\",F_new)\r\n############ fixing initial points #################\r\n\r\nnew_kp1 = []\r\nnew_kp2 = []\r\n\r\nfor i in range(len(list_kp1)): # transforming keypoints to new warped image\r\n X1 = np.array([list_kp1[i][0],list_kp1[i][1],1])\r\n X2 = np.array([list_kp2[i][0],list_kp2[i][1],1])\r\n new_pt1 = np.dot(H1,np.transpose(X1))\r\n new_pt2 = np.dot(H2,np.transpose(X2))\r\n new_kp1.append((int(new_pt1[0]/new_pt1[2]),int(new_pt1[1]/new_pt1[2])))\r\n new_kp2.append((int(new_pt2[0]/new_pt2[2]),int(new_pt2[1]/new_pt2[2])))\r\n\r\ncount = 0 \r\nfor mat in match: # setting up new keypoints according to feature transform\r\n\r\n # Get the matching keypoints for each of the images\r\n img1_idx = mat.queryIdx\r\n img2_idx = mat.trainIdx\r\n\r\n # x - columns\r\n # y - rows\r\n # Get the coordinates\r\n kp1[img1_idx].pt = new_kp1[count]\r\n kp2[img2_idx].pt = new_kp2[count]\r\n\r\n count += 1\r\n \r\n\r\n#################### Epipolar lines ######################\r\ndef drawlines(img1,img2,lines,pts1,pts2): # for drawing epipolar lines\r\n ''' img1 - image on which we draw the epilines for the points in img2\r\n lines - corresponding epilines '''\r\n r,c, h = img1.shape\r\n for r,pt1,pt2 in zip(lines,pts1,pts2):\r\n color = tuple(np.random.randint(100,200,3).tolist())\r\n x0,y0 = map(int, [0, -r[2]/r[1] ])\r\n x1,y1 = map(int, [c, -(r[2]+r[0]*c)/r[1] ])\r\n img1 = cv.line(img1, (x0,y0), (x1,y1), (0,0,255),2)\r\n img1 = cv.circle(img1,tuple(pt1),5,color,-1)\r\n img2 = cv.circle(img2,tuple(pt2),5,color,-1)\r\n return img1,img2\r\n\r\nnew_kp2 = np.array(new_kp2)\r\nnew_kp1 = np.array(new_kp1)\r\nlines1 = cv.computeCorrespondEpilines(new_kp2.reshape(-1,1,2), 2,F_new)\r\nlines2 = cv.computeCorrespondEpilines(new_kp1.reshape(-1,1,2), 2,F_new)\r\nlines1 = lines1.reshape(-1,3)\r\nlines2 = lines2.reshape(-1,3)\r\nimg3,img4 = drawlines(img1_rectified,img2_rectified,lines1,new_kp1,new_kp2)\r\nimg5,img6 = drawlines(img2_rectified,img1_rectified,lines2,new_kp2,new_kp1)\r\ncv.imshow(\"image\", img5)\r\ncv.imshow(\"image2\", img6)\r\ncv.waitKey(0)\r\ncv.destroyAllWindows\r\n \r\n################ correspondance (SSD) ########################\r\nwindow = (7,7)\r\n\r\n\r\nd = 50\r\nimg_gray1 = cv.cvtColor(img_rec1, cv.COLOR_BGR2GRAY)\r\nimg_gray2 = cv.cvtColor(img_rec2, cv.COLOR_BGR2GRAY)\r\n\r\ndef check_diff(img1,img2,x,y,count):\r\n sum = 0\r\n for i in range(window[0]):\r\n for j in range(window[1]):\r\n diff = (img1[x+i][y+j] - img2[x+i][y+j+count])**2\r\n sum = sum + diff\r\n# min_diff = min(diff_list)\r\n# ind = diff_list.index(min_diff)\r\n# index = (int(str(ind)[0]),ind%10)\r\n return sum\r\n#\r\ndef diff_disp(img1,img2,x,y,count):\r\n \r\n winblock1 = img1[x:x + window[0],y:x + window[1]]\r\n winblock2 = img2[x:x + window[0],y + count:y + count + window[1]]\r\n diff = (winblock2.sum() - winblock1.sum())\r\n #print(np.sum(winblock1))\r\n return diff\r\n\r\nh, w = img_gray1.shape\r\ndisparity_image = np.zeros(img_gray1.shape)\r\nh = h - 10\r\nw = w - 50 \r\nt = math.ceil(window[0]/2)\r\nfor i in range(h):\r\n for j in range(w):\r\n diff_list = []\r\n for k in range(50):\r\n sum = diff_disp(img_gray1,img_gray2,i,j,k)\r\n diff_list.append(sum)\r\n disparity_image[i+t][j+t] = diff_list.index(min(diff_list))\r\n\r\nplt.imshow(disparity_image)\r\ncolormap = plt.get_cmap('jet')\r\nheatmap = (colormap(disparity_image) * 2**16).astype(np.uint16)[:,:,:3]\r\nheatmap = cv.cvtColor(heatmap, cv.COLOR_RGB2BGR)\r\n\r\n#########################################################################\r\n\r\n###### inbuilt disparity function\r\n#win_size = 2\r\n#min_disp = -4\r\n#max_disp = 9\r\n#num_disp = max_disp - min_disp # Needs to be divisible by 16\r\n#stereo = cv.StereoSGBM_create(\r\n# minDisparity=min_disp,\r\n# numDisparities=num_disp,\r\n# blockSize=5,\r\n# uniquenessRatio=5,\r\n# speckleWindowSize=5,\r\n# speckleRange=5,\r\n# disp12MaxDiff=2,\r\n# P1=8 * 3 * win_size ** 2,\r\n# P2=32 * 3 * win_size ** 2,\r\n#)\r\n#disparity_SGBM = stereo.compute(img_rec1, img_rec2)\r\n#plt.imshow(disparity_SGBM, \"gray\")\r\n#plt.colorbar()\r\n#plt.show()\r\n########################################################################\r\n\r\n###################### Depth Map #######################################\r\ndef normalize(matrix):\r\n maxvalue = matrix.max() \r\n minvalue = matrix.min() \r\n span = maxvalue - minvalue\r\n \r\n matrix = (matrix - minvalue)/span\r\n matrix = matrix*255 \r\n matrix = matrix.astype(np.uint8)\r\n \r\n return matrix\r\n\r\ndisp = disparity_image.astype(np.float32)\r\ndisp[disp == 0] = 0.01\r\ndepth = 1./(5*np.copy(disp)) # 1/5 because image was resized by 0.2\r\nB = 177.288\r\nf = 5299.313\r\ndepth = depth*B*f\r\ndepth = normalize(depth)\r\ndepthmap = cv.applyColorMap(depth, cv.COLORMAP_JET)\r\n\r\n########################################################################\r\n\r\n#################### Results ##########################################\r\nplt.imshow(disparity_image)\r\nmatching_result = cv.drawMatches(img1_rectified, kp1, img2_rectified, kp2, match, None, flags=2)\r\ncv.imshow(\"matching results\", matching_result)\r\ncv.imshow(\"disparity_image\", disparity_image)\r\ncv.imshow('heatmap', heatmap)\r\ncv.imshow('depthmap', depthmap)\r\ncv.imshow('depth', depth)\r\n#cv.imshow(\"gray\", img_gray1)\r\n#cv.imshow(\"image\", img5)\r\n#cv.imshow(\"image2\", img6)\r\ncv.waitKey(0)\r\ncv.destroyAllWindows\r\n\r\n","repo_name":"divi9626/Stereo-Vision","sub_path":"stereo_depth.py","file_name":"stereo_depth.py","file_ext":"py","file_size_in_byte":11451,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"6701091572","text":"import socket\nimport time\nfrom modules import directives\nfrom modules import headers\nfrom modules import ip_utils\nfrom modules import listeners\nfrom collections import defaultdict\nfrom contextlib import closing\nfrom itertools import repeat\nfrom multiprocessing import Pool\nfrom os import getpid\nfrom typing import Set, Tuple\n\n\ndef ping(addresses: Set[str]) -> Set[Tuple[str, float, headers.ip]]:\n \"\"\"\n Send an ICMP ECHO REQUEST to each address\n in the set addresses. Then return a set which\n contains all the addresses which replied and\n which have the correct ID.\n \"\"\"\n with closing(\n socket.socket(\n socket.AF_INET,\n socket.SOCK_RAW,\n socket.IPPROTO_ICMP\n )\n ) as ping_sock:\n # get the local ip address\n addresses = {\n ip\n for ip in addresses\n if (\n not ip.endswith(\".0\")\n and not ip.endswith(\".255\")\n )\n }\n\n # initialise a process pool\n p = Pool(1)\n # get the local process id for use in creating packets.\n ID = getpid() & 0xFFFF\n # run the listeners.ping function asynchronously\n replied = p.apply_async(listeners.ping, (ID, 5))\n time.sleep(0.01)\n for address in zip(addresses, repeat(1)):\n try:\n packet = ip_utils.make_icmp_packet(ID)\n ping_sock.sendto(packet, address)\n except PermissionError:\n ip_utils.eprint(\"raw sockets require root priveleges, exiting\")\n exit()\n p.close()\n p.join()\n # close and join the process pool to so that all the values\n # have been returned and the pool closed\n return replied.get()\n\n\ndef connect(address: str, ports: Set[int]) -> Set[int]:\n \"\"\"\n This is the most basic kind of scan\n it simply connects to every specififed port\n and identifies whether they are open.\n \"\"\"\n import socket\n from contextlib import closing\n open_ports: Set[int] = set()\n for port in ports:\n # loop through each port in the list of ports to scan\n try:\n with closing(\n socket.socket(\n socket.AF_INET,\n socket.SOCK_STREAM\n )\n ) as s:\n # open an IPV4 TCP socket\n s.connect((address, port))\n # attempt to connect the newly created socket to the target\n # address and port\n open_ports.add(port)\n # if the connection was successful then add the port to the\n # list of open ports\n except (ConnectionRefusedError, OSError) as e:\n pass\n return open_ports\n\n\ndef tcp(dest_ip: str, portlist: Set[int]) -> listeners.PORTS:\n src_port = ip_utils.get_free_port()\n # request a local port to connect from\n if \"127.0.0.1\" == dest_ip:\n local_ip = \"127.0.0.1\"\n else:\n local_ip = ip_utils.get_local_ip()\n p = Pool(1)\n listener = p.apply_async(listeners.tcp, ((local_ip, src_port), 5))\n time.sleep(0.01)\n # start the TCP ACK listener in the background\n for port in portlist:\n # flag = 2 for syn scan\n packet = ip_utils.make_tcp_packet(\n src_port,\n port,\n local_ip,\n dest_ip,\n 2\n )\n with closing(\n socket.socket(\n socket.AF_INET,\n socket.SOCK_RAW,\n socket.IPPROTO_TCP\n )\n ) as s:\n s.sendto(packet, (dest_ip, port))\n # send the packet to its destination\n p.close()\n p.join()\n ports = listener.get()\n ports[\"FILTERED\"] = portlist - ports[\"OPEN\"] - ports[\"CLOSED\"]\n if local_ip == \"127.0.0.1\":\n ports[\"OPEN\"] -= set([src_port])\n\n return ports\n\n\ndef udp(\n dest_ip: str,\n ports_to_scan: Set[int]\n) -> listeners.PORTS:\n \"\"\"\n Takes in a destination IP address in either dot or long form and\n a list of ports to scan. Sends UDP packets to each port specified\n in portlist and uses the listeners to mark them as open, open|filtered,\n filtered, closed they are marked open|filtered if no response is\n recieved at all.\n \"\"\"\n\n local_port = ip_utils.get_free_port()\n # get port number\n ports: listeners.PORTS = defaultdict(set)\n ports[\"REMAINING\"] = ports_to_scan\n p = Pool(1)\n udp_listen = p.apply_async(listeners.udp, (dest_ip, 4))\n time.sleep(0.01)\n # start the UDP listener\n with closing(\n socket.socket(\n socket.AF_INET,\n socket.SOCK_RAW,\n socket.IPPROTO_UDP\n )\n ) as s:\n for _ in range(2):\n # repeat 3 times because UDP scanning comes\n # with a high chance of packet loss\n for dest_port in ports[\"REMAINING\"]:\n try:\n packet = ip_utils.make_udp_packet(\n local_port,\n dest_port\n )\n # create the UDP packet to send\n s.sendto(packet, (dest_ip, dest_port))\n # send the packet to the currently scanning address\n except socket.error:\n packet_bytes = \" \".join(map(hex, packet))\n print(\n \"The socket modules sendto method with the following\",\n \"argument resulting in a socket error.\",\n f\"\\npacket: [{packet_bytes}]\\n\",\n \"address: [{dest_ip, dest_port}])\"\n )\n\n p.close()\n p.join()\n\n ports[\"OPEN\"].update(udp_listen.get())\n # if we are on localhost remove the scanning port\n if dest_ip == \"127.0.0.1\":\n ports[\"OPEN\"] -= set([local_port])\n ports[\"REMAINING\"] -= ports[\"OPEN\"]\n # only scan the ports which we know are not open\n with closing(\n socket.socket(\n socket.AF_INET,\n socket.SOCK_RAW,\n socket.IPPROTO_UDP\n )\n ) as s:\n for dest_port in ports[\"REMAINING\"]:\n try:\n packet = ip_utils.make_udp_packet(\n local_port,\n dest_port\n )\n # make a new UDP packet\n p = Pool(1)\n icmp_listen = p.apply_async(\n listeners.icmp_unreachable,\n (dest_ip,),\n )\n # start the ICMP listener\n time.sleep(0.01)\n s.sendto(packet, (dest_ip, dest_port))\n # send packet\n p.close()\n p.join()\n icmp_code = icmp_listen.get()\n # receive ICMP code from the ICMP listener\n if icmp_code in {0, 1, 2, 9, 10, 13}:\n ports[\"FILTERED\"].add(dest_port)\n elif icmp_code == 3:\n ports[\"CLOSED\"].add(dest_port)\n except socket.error:\n packet_bytes = \" \".join(map(\"{:02x}\".format, packet))\n ip_utils.eprint(\n \"The socket modules sendto method with the following\",\n \"argument resulting in a socket error.\",\n f\"\\npacket: [{packet_bytes}]\\n\",\n \"address: [{dest_ip, dest_port}])\"\n )\n # this creates a new set which contains all the elements that\n # are in the list of ports to be scanned but have not yet\n # been classified\n ports[\"OPEN|FILTERED\"] = (\n ports[\"REMAINING\"]\n - ports[\"OPEN\"]\n - ports[\"FILTERED\"]\n - ports[\"CLOSED\"]\n )\n del(ports[\"REMAINING\"])\n # set comprehension to update the list of open filtered ports\n return ports\n\n\ndef version_detect_scan(\n target: directives.Target,\n probes: directives.PROBE_CONTAINER\n) -> directives.Target:\n for probe_dict in probes.values():\n for proto in probe_dict:\n target = probe_dict[proto].scan(target)\n return target\n","repo_name":"tritoke/networkScanner","sub_path":"Code/modules/scanners.py","file_name":"scanners.py","file_ext":"py","file_size_in_byte":8172,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"2"}
+{"seq_id":"7594112566","text":"from controller.game_controller import GameController\nfrom controller.reports_controller import ReportsController\nfrom view.view import View\n\n\nclass MainController:\n \"\"\"The Main controller ask the user whether he/she wants to play a tournament or request a report.\"\"\"\n\n def __init__(self):\n \"\"\"The init method import the game controller and the report controller. The game controller is used to play\n a tournament. The report controller is used to request a report.\n \"\"\"\n self.game_controller = GameController()\n self.reports_controller = ReportsController()\n\n def start(self):\n \"\"\"The start method is the method that is called in the execution file of the application. When the application\n is executed, we ask the user whether he/she wants to play a tournament or to request a report. If the user\n choose 1, then a new tournament start. If he/she choose 2, the application will display the report menu.\n \"\"\"\n View.display_initial_menu()\n choice = View.get_choice(\"Choice: \")\n if choice == \"1\":\n self.game_controller.run_game()\n elif choice == \"2\":\n self.reports_controller.request_report()\n else:\n View.display_text(\"Invalid choice\")\n","repo_name":"Kmenguete/OpenClassroom_Project4","sub_path":"controller/main_controller.py","file_name":"main_controller.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"31430492914","text":"#!/usr/bin/env python3\n\"\"\"\nAutomated controller for lights\n\nTimers are stored in the file pointed to by LIGHT_TIMERS\n\n\nUse cron or equivalent to have this program automatically run at startup\n\"\"\"\nfrom lightStripLib import Room\nfrom datetime import datetime\nfrom time import sleep\nimport sys\nimport subprocess\n\nTIMER_FILE=\"light.transition\" #\"light_timers.csv\"\n\n\n\ndef get_timers(timer_file):\n \"\"\"\n Return a list of timers\n\n Timers read from LIGHT_TIMERS file\n Raw file should read:\n TIME,first element in the scene],second element in the scene, etc\n where '|' is used to separate individual components of each scene element\n\n for example:\n TIME,HUE|SATURATION|BRIGHTNESS|DURATION|TRANSITION,HUE|SATURATION|BRIGHT...\n\n On failure, the function returns `timers` in its current state\n \"\"\"\n timers = [] # list of timers\n # each timer is a tuple consisting of (TIME, TRANSITION, ACTIVATED, LIGHTS)\n # where the time is HHMM in 24 hour time and TRANSITION is a list of tuples\n # each transition tuple consists of (HUE, SATURATION, BRIGHTNESS, DURATION, TRANSITION)\n # where HUE, SATURATION, and BRIGHTNESS are floats and DURATION, TRANSITION are integers\n # representing the DURATION and TRANSITION length of each part of the scene in miliseconds\n with open(timer_file, 'r') as timer_file:\n for raw_timer in timer_file:\n raw_input = raw_timer.split(',')\n time = 0000\n try:\n time = int(raw_input.pop(0))\n except Exception:\n print(\"failed to parse time:\", time)\n return timers\n lights = []\n try:\n lights = [l for l in raw_input.pop(0).split('|') if l]\n except Exception:\n print(\"failed to parse lights\")\n return timers\n scene_elements = []\n while raw_input:\n scene_element = raw_input.pop(0).split('|')\n try:\n scene_elements.append((\n float(scene_element[0]),\n float(scene_element[1]),\n float(scene_element[2]),\n int(scene_element[3]),\n int(scene_element[4])))\n except Exception:\n print(\"failed to parse scene element:\", scene_element)\n return timers\n timers.append((time, scene_elements, False, lights)) # time to activate, elements, bool if the timer has been activated today, lights to be activated\n return timers\n\ndef main():\n \"\"\"\n Main driver for program\n\n\n \"\"\"\n # get hash\n current_hash = subprocess.run(\n ['md5sum', TIMER_FILE], \n stdout=subprocess.PIPE).stdout.decode('utf-8')\n \n timers = get_timers(TIMER_FILE) # get all the timers\n # TODO: sort the timers so the earliest timer is first and the latest timer is last\n room = Room()\n if not room.setup(): # get all the lights\n sys.exit(1)\n \n print(timers)\n\n while True: # make sure the timer never stops running\n if not timers:\n # if there are no timers, we are just going to stop the program\n sys.exit(1)\n current_time = int(datetime.now().strftime('%H%M'))\n if current_time % 5 == 0:\n print(f\"{current_time} - timers: {len(timers)}\")\n for t in timers:\n time, transition, activated, lights = t\n print(f\"\\t{time} : {'done' if activated else 'waiting'}\")\n\n\n for index, timer in enumerate(timers):\n time, transition, activated, lights = timer\n if abs(current_time - time) <= 1 and not activated:\n print(f\"controller ran: {transition} at {time}\")\n # run the transition\n if lights:\n print(f\"only transitioning lights: {lights}\")\n for light in lights:\n room.light_transition(light, transition)\n else:\n print(\"ran transition on all lights\")\n room.room_transition(transition)\n activated = True # set the timer to activated\n elif current_time <= 1:\n activated = False\n\n timers[index] = (time, transition, activated, lights)\n sleep(60) # wait a minute\n\n # check for any new timers only if the timer file has changed\n new_hash = subprocess.run(\n ['md5sum', TIMER_FILE.encode('utf-8')], \n stdout=subprocess.PIPE).stdout.decode('utf-8')\n if current_hash != new_hash:\n print(\"checking for timers because timer file got modified\")\n timers = get_timers(TIMER_FILE)\n \n current_hash = new_hash\n # and repeat the process\n\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"BCaven/elgato-light-controller","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":4973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"15861528886","text":"# Pre-Repackaging Queries\nimport os\n\ndef do_pre_repackaging(test, year, query, renamed):\n\n aid_year = str(int(year) - 1) + \"-\" + str(year)\n\n if test:\n directory = os.path.realpath(os.path.join('C:\\Testing Bob/Pell Repackaging', aid_year))\n else:\n directory = os.path.realpath(os.path.join('O:/Systems/QUERIES/Pell Repackaging', aid_year))\n\n if not os.path.isdir(directory):\n os.makedirs(directory)\n\n move_directory = \"Pell Reports\"\n\n # FORMAT: return (query, renamed, archive_directory, UOSFA_folder)\n # query: The original file name\n # renamed: The name the file should have after being moved\n # archive_directory: The folder the file will be copied to\n # UOSFA_folder: The name of the folder the renamed file should be moved to\n # - (eg. \"Budget Reports\", \"SAP Reports\", \"Unknown Reports\") \n # - put \"None\" if it shouldn't be moved to a folder in 'O:/UOSFA Reports/'\n\n if query.startswith(\"UUFA_PP_RPKG_AGGREGATE_LIMITS\"):\n return (query, renamed, directory, move_directory)\n\n if query.startswith(\"UUFA_PP_RPKG_AWD_AY_NO_BDGT\"):\n return (query, renamed, directory, move_directory)\n\n if query.startswith(\"UUFA_PP_RPKG_AWD_STRM_INACTIVE\"):\n return (query, renamed, directory, move_directory)\n\n if query.startswith(\"UUFA_PP_RPKG_AWRD_LOCK\"):\n return (query, renamed, directory, move_directory)\n\n if query.startswith(\"UUFA_PP_RPKG_COA_DOUBLE\"):\n return (query, renamed, directory, move_directory)\n\n if query.startswith(\"UUFA_PP_RPKG_LTHT_PELL_COA\"):\n return (query, renamed, directory, move_directory)\n\n if query.startswith(\"UUFA_PP_RPKG_RPKG_NO_BUDGET\"):\n return (query, renamed, directory, move_directory)\n\n if query.startswith(\"UUFA_PP_RPKG_RPKG_SNAPSHOT\"):\n return (query, renamed, directory, move_directory)\n\n if query.startswith(\"UUFA_PP_RPKG_TOTAL_WDRN_DRP\"):\n return (query, renamed, directory, move_directory)\n \n if query.startswith(\"UUFA_READY_REPACKAGE\"):\n return (query, renamed, directory, move_directory)\n\n return \"Empty\" #Leave as last line","repo_name":"Leyalic/Bob3.0","sub_path":"Files/PrePackaging_Queries.py","file_name":"PrePackaging_Queries.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"23050685694","text":"import random\nimport math\nfrom utiles.utiles import obtener_color\nfrom funciones.configuraciones import establecer_configuraciones, configuraciones\nfrom funciones.escoger_modo import escoger_modo\nfrom funciones.leer_archivo import unificar_archivos\nfrom funciones.cronometro import fecha_actual, iniciar_cronometro, detener_cronometro, tiempo_transcurrido\nfrom funciones.actualizar_coincidencias import actualizar_coincidencias\nfrom funciones.normalizar_arriesgo import normalizar_palabra\nfrom funciones.validar_arriesgo import validar_arriesgo\nfrom funciones.presentar import presentar\nfrom funciones.actualizar_puntaje import actualizar_puntaje\nfrom funciones.resultado import resultado\nfrom funciones.continuar_jugando import continuar_jugando\nfrom funciones.guardar_partidas import guardar_partidas\n\n\ndef main(modo_juego, usuario_1, usuario_2=\"\"):\n # Condiciones iniciales del juego\n iniciar_partida = True\n establecer_configuraciones()\n configuracion = configuraciones()\n LONGITUD_PALABRA = configuracion[\"LONGITUD_PALABRA_SECRETA\"][0]\n LIMITE_PARTIDAS = configuracion[\"MAXIMO_PARTIDAS\"][0]\n LIMITE_INTENTOS = 5\n partida = 0\n tabla = {}\n cola_turnos, PRIMERO, SEGUNDO = escoger_modo(modo_juego, usuario_1, usuario_2)\n aciertos_totales1 = aciertos_totales2 = 0\n intentos_totales1 = intentos_totales2 = 0\n diccionario = unificar_archivos(LONGITUD_PALABRA)\n fecha = fecha_actual()\n tiempo_fin = 0\n if configuracion[\"REINICIAR_ARCHIVO_PARTIDAS\"][0]:\n metodo = \"w\"\n else:\n metodo = \"a\"\n\n while iniciar_partida and partida < LIMITE_PARTIDAS:\n # Condiciones iniciales de cada partida\n palabra_a_adivinar = random.choice(list(diccionario)).upper()\n intentos = 0\n arriesgo = \"\"\n coincidencias = []\n tiempo_inicio = iniciar_cronometro()\n\n # La lista coincidencias tiene una longitud dinámica\n for incognita in range(LONGITUD_PALABRA):\n coincidencias.append(\"?\")\n coincidencias_parciales = []\n\n # Validación de arriesgos e intentos durante cada partida\n while intentos <= LIMITE_INTENTOS and arriesgo != palabra_a_adivinar:\n actualizar_coincidencias(LIMITE_INTENTOS, coincidencias_parciales, LONGITUD_PALABRA)\n if intentos < LIMITE_INTENTOS:\n arriesgo = normalizar_palabra(input(f\"{cola_turnos[PRIMERO]}, tu arriesgo: \").upper())\n arriesgo_valido = validar_arriesgo(arriesgo, LONGITUD_PALABRA, obtener_color)\n if arriesgo_valido:\n resultado_parcial, coincidencias = presentar(palabra_a_adivinar, arriesgo, obtener_color, coincidencias)\n coincidencias_parciales.append(resultado_parcial)\n print(f\"{resultado_parcial}\\n\")\n if modo_juego == '2' and arriesgo != palabra_a_adivinar and intentos != LIMITE_INTENTOS - 1:\n # Cambio de turno agregando nombre del jugador actual y removiendolo del primer lugar\n cola_turnos.append(cola_turnos[PRIMERO])\n cola_turnos.pop(PRIMERO)\n intentos += 1\n print(f\"Palabra a adivinar: {' '.join(coincidencias)}\")\n else:\n print(f\"Palabra a adivinar: {palabra_a_adivinar}\")\n intentos += 1\n\n tabla, puntos = actualizar_puntaje(tabla, intentos, cola_turnos[0], cola_turnos[1])\n if modo_juego == '1':\n intentos_totales1 += intentos\n else:\n if cola_turnos[0] == usuario_1:\n intentos_totales1 += math.ceil(intentos/2)\n intentos_totales2 += math.floor(intentos/2)\n else:\n intentos_totales2 += math.ceil(intentos/2)\n intentos_totales1 += math.floor(intentos/2)\n tiempo_fin = detener_cronometro()\n tiempo = tiempo_transcurrido(tiempo_inicio, tiempo_fin)\n acertado = resultado(arriesgo, palabra_a_adivinar, modo_juego, tiempo)\n if acertado and cola_turnos[0] == usuario_1:\n aciertos_totales1 += 1\n elif acertado:\n aciertos_totales2 += 1\n print(f\"{cola_turnos[PRIMERO]}, obtuviste {puntos} puntos. Tenes acumulados {tabla.get(cola_turnos[PRIMERO])} puntos en total.\")\n if modo_juego == '2':\n print(f\"{cola_turnos[SEGUNDO]}, obtuviste {-puntos} puntos. Tenes acumulados {tabla.get(cola_turnos[SEGUNDO])} puntos en total.\" if puntos != -100 else f\"{cola_turnos[SEGUNDO]}, obtuviste {int(puntos/2)} puntos. Tenes acumulados {tabla.get(cola_turnos[SEGUNDO])} puntos en total.\")\n partida += 1\n if partida < LIMITE_PARTIDAS:\n iniciar_partida, cola_turnos = continuar_jugando(modo_juego, cola_turnos, tabla, PRIMERO, iniciar_partida)\n \n # Cuando se termina de jugar todas las partidas\n guardar_partidas(fecha, tiempo_fin, cola_turnos[PRIMERO], aciertos_totales1, intentos_totales1, metodo)\n if modo_juego == '2':\n guardar_partidas(fecha, tiempo_fin, cola_turnos[SEGUNDO], aciertos_totales2, intentos_totales2, metodo)\n","repo_name":"CamilaAgustinaRivero/fiuble","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5106,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"21722880179","text":"#!/usr/local/bin/python\nimport random\n\ndef simulateOneStudentWeighted():\n ''' This will simulate a single student's academic career at Westlake under the current system.\n Assumptions: \n Each student takes 28 classes, so 56 semesters\n These students have 4 classes that are essentially free (sport, art, yearbook, etc.) where they are a guaranteed 100\n To contend for the valedictorianship, a student must take between 15 and 19 AP classes and between 4 and 7 PAP classes, which will be uniformly randomly modeled\n To contend for the valedictorianship, a student will have 96 <= grade <= 100 for an AP, a 97-100 for PAP and a 98-100 for regular\n This covers the 4 unweighted a year thing and typical progressions through other requirements\n This simulation weights AP classes by 1.2 and pre-AP classes by 1.1\n '''\n total = 0\n numAPs = 30 + random.randint(0, 8)\n numPAPs = numAPs + 8 + random.randint(0, 6)\n for semester in range(48):\n if semester < numAPs:\n grade = random.randint(96,100)\n grade *= 1.2\n elif semester < numPAPs:\n grade = random.randint(97, 100)\n grade *= 1.1\n else:\n grade = random.randint(98, 100)\n total += grade\n GPA = total / 56.0\n return GPA\n\ndef simulateOneStudentUnweighted():\n ''' This will simulate a single student's academic career at Westlake under the proposed unweighted system.\n Assumptions:\n Each student takes 28 classes, so 56 semesters\n These students have 4 classes that are free (see above.)\n A contending student might take less AP classes under this system, so 12-19 AP and 4-7 PAP.\n To contend for the valedictorianship, a student will have 96 <= grade <= 100 for an AP, a 97-100 for PAP and a 98-100 for regular.\n '''\n total = 0\n numAPs = 24 + random.randint(0, 14)\n numPAPs = numAPs + 8 + random.randint(0, 6)\n for semester in range(48):\n if semester < numAPs:\n grade = random.randint(96,100)\n elif semester < numPAPs:\n grade = random.randint(97, 100)\n else:\n grade = random.randint(98, 100)\n total += grade\n GPA = total / 56.0\n return GPA\n\n\n\n\n\n\n\ndef simYear(weighted):\n ''' This will simulate a single year under the current system or new one. Returns true if there's a tie, false if not.\n One important assumption in this step is that there are realistically only about 10 students/year who\n are going to be competing for the valedictorianship. In our other system, this will be 15 to account for the increased number of students that could contend'''\n valGPA = 0\n for student in range(10 if weighted else 10):\n grade = simulateOneStudentWeighted() if weighted else simulateOneStudentUnweighted()\n if grade == valGPA:\n return True\n elif grade > valGPA:\n valGPA = grade\n return False\n\ndef simTrialsUnweighted():\n '''this will print summary statistics of the number of ties in the given number of trials'''\n ties = 0.0\n numTrials = 100000\n for trial in range(numTrials):\n ties += 1 if simYear(False) else 0\n print(\"Unweighted:\\n{} trials.\\n{} ties\\n{} tie rate.\\n\".format(numTrials, ties, ties/numTrials))\n\n\ndef simTrialsWeighted():\n '''this will print summary statistics of the number of ties in the given number of trials'''\n ties = 0.0\n numTrials = 100000\n for trial in range(numTrials):\n ties += 1 if simYear(True) else 0\n print(\"Weighted:\\n{} trials.\\n{} ties\\n{} tie rate.\\n\".format(numTrials, ties, ties/numTrials))\n\ndef main():\n simTrialsWeighted()\n simTrialsUnweighted()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"virajmehta/Multipliers","sub_path":"gradetest.py","file_name":"gradetest.py","file_ext":"py","file_size_in_byte":3742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"34383335679","text":"import multiprocessing\nimport sys\nimport os\nfrom rosetta_classic_abinitio import ClassicAbinitio\n\n\ndef wrap(p):\n pname, factor = p\n ClassicAbinitio(pname).run(factor)\n\n\ndef main():\n np = 4\n\n conf, reps = get_conf_and_reps()\n todo = open_todo_and_get_todolist(conf)\n\n todo *= reps\n\n p = multiprocessing.Pool(np)\n p.map(wrap, todo)\n\n\ndef get_conf_and_reps():\n reps = 1\n if len(sys.argv) > 1:\n conf = sys.argv[1]\n if len(sys.argv) > 2:\n reps = int(sys.argv[2])\n else:\n raise NotImplementedError('Running with not args is not supported')\n\n return conf, reps\n\n\ndef open_todo_and_get_todolist(conf):\n todo = []\n with open(conf) as f:\n for l in f.readlines():\n pname, factor = l.strip().split(' ')\n todo.append((pname, int(factor)))\n\n return todo\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"h3nnn4n/protein-prediction-framework","sub_path":"src/de/bot_classic_abinitio.py","file_name":"bot_classic_abinitio.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"70312410607","text":"import asyncio\nimport warnings\nfrom typing import Optional\n\nfrom mitmproxy import controller, ctx, flow, log, master, options, platform\nfrom mitmproxy.flow import Error\nfrom mitmproxy.proxy import commands\nfrom mitmproxy.proxy import server\nfrom mitmproxy.utils import asyncio_utils, human\n\n\nclass AsyncReply(controller.Reply):\n \"\"\"\n controller.Reply.q.get() is blocking, which we definitely want to avoid in a coroutine.\n This stub adds a .done asyncio.Event() that can be used instead.\n \"\"\"\n\n def __init__(self, *args):\n self.done = asyncio.Event()\n self.loop = asyncio.get_event_loop()\n super().__init__(*args)\n\n def commit(self):\n super().commit()\n try:\n self.loop.call_soon_threadsafe(lambda: self.done.set())\n except RuntimeError: # pragma: no cover\n pass # event loop may already be closed.\n\n def kill(self, force=False): # pragma: no cover\n warnings.warn(\"reply.kill() is deprecated, set the error attribute instead.\", DeprecationWarning, stacklevel=2)\n self.obj.error = flow.Error(Error.KILLED_MESSAGE)\n\n\nclass ProxyConnectionHandler(server.StreamConnectionHandler):\n master: master.Master\n\n def __init__(self, master, r, w, options):\n self.master = master\n super().__init__(r, w, options)\n self.log_prefix = f\"{human.format_address(self.client.peername)}: \"\n\n async def handle_hook(self, hook: commands.StartHook) -> None:\n with self.timeout_watchdog.disarm():\n # We currently only support single-argument hooks.\n data, = hook.args()\n data.reply = AsyncReply(data)\n await self.master.addons.handle_lifecycle(hook)\n await data.reply.done.wait()\n data.reply = None\n\n def log(self, message: str, level: str = \"info\") -> None:\n x = log.LogEntry(self.log_prefix + message, level)\n x.reply = controller.DummyReply() # type: ignore\n asyncio_utils.create_task(\n self.master.addons.handle_lifecycle(log.AddLogHook(x)),\n name=\"ProxyConnectionHandler.log\"\n )\n\n\nclass Proxyserver:\n \"\"\"\n This addon runs the actual proxy server.\n \"\"\"\n server: Optional[asyncio.AbstractServer]\n listen_port: int\n master: master.Master\n options: options.Options\n is_running: bool\n\n def __init__(self):\n self._lock = asyncio.Lock()\n self.server = None\n self.is_running = False\n\n def load(self, loader):\n loader.add_option(\n \"connection_strategy\", str, \"lazy\",\n \"Determine when server connections should be established.\",\n choices=(\"eager\", \"lazy\")\n )\n loader.add_option(\n \"proxy_debug\", bool, False,\n \"Enable debug logs in the proxy core.\",\n )\n\n def running(self):\n self.master = ctx.master\n self.options = ctx.options\n self.is_running = True\n self.configure([\"listen_port\"])\n\n def configure(self, updated):\n if not self.is_running:\n return\n if \"mode\" in updated and ctx.options.mode == \"transparent\": # pragma: no cover\n platform.init_transparent_mode()\n if any(x in updated for x in [\"server\", \"listen_host\", \"listen_port\"]):\n asyncio.create_task(self.refresh_server())\n\n async def refresh_server(self):\n async with self._lock:\n if self.server:\n await self.shutdown_server()\n self.server = None\n if ctx.options.server:\n if not ctx.master.addons.get(\"nextlayer\"):\n ctx.log.warn(\"Warning: Running proxyserver without nextlayer addon!\")\n self.server = await asyncio.start_server(\n self.handle_connection,\n self.options.listen_host,\n self.options.listen_port,\n )\n addrs = {f\"http://{human.format_address(s.getsockname())}\" for s in self.server.sockets}\n ctx.log.info(f\"Proxy server listening at {' and '.join(addrs)}\")\n\n async def shutdown_server(self):\n ctx.log.info(\"Stopping server...\")\n self.server.close()\n await self.server.wait_closed()\n self.server = None\n\n async def handle_connection(self, r, w):\n asyncio_utils.set_task_debug_info(\n asyncio.current_task(),\n name=f\"Proxyserver.handle_connection\",\n client=w.get_extra_info('peername'),\n )\n handler = ProxyConnectionHandler(\n self.master,\n r,\n w,\n self.options\n )\n await handler.handle_client()\n","repo_name":"The-Cracker-Technology/mitmproxy","sub_path":"mitmproxy/addons/proxyserver.py","file_name":"proxyserver.py","file_ext":"py","file_size_in_byte":4670,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"23107288770","text":"from DBconnection.dbconf import PostgresConnection\nimport pandas as pd\n\n\nclass Query1:\n def __init__(self):\n self.con = PostgresConnection().getConnection()\n print(\"Constructor called\")\n\n def execute1(self):\n con = PostgresConnection().getConnection()\n cur = con.cursor()\n query = \"select s.division, sum(t.total_price) \" \\\n \"from star_schema.fact_table t \" \\\n \"join star_schema.store_dim s on s.store_key=t.store_key \" \\\n \"group by cube(s.division)\" \\\n \"order by s.division\"\n cur.execute(query)\n result = cur.fetchall()\n pd_data = pd.DataFrame(list(result), columns=['division', 'sales'])\n pd_data['sales'] = pd_data['sales'].astype('float64')\n pd_data = pd_data.dropna()\n # print(pd_data)\n return pd_data.to_dict(orient='records')\n\n\nif __name__ == '__main__':\n query1 = Query1()\n data = query1.execute1()\n print(data)\n","repo_name":"esrdlab-vinternship/HDA-Buet-vtraining-23","sub_path":"api/QueryController/query1.py","file_name":"query1.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"23467312893","text":"\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 1 22:49:22 2019\n\n@author: Jarvis\n\"\"\"\n\nimport folium as fm\nimport numpy as np\nimport requests\nfrom bs4 import BeautifulSoup\nimport json\ndef GetActivePol(number):\n \"\"\"\n \n \"\"\"\n \n \n #Get the newest open activations\n url='https://emergency.copernicus.eu/mapping/activations-rapid/feed'\n resp = requests.get(url)\n soup=BeautifulSoup(resp.content, features='xml')\n \n #search for activations\n items =soup.findAll('item')\n first = items[number].title.text\n #Select only string values with activation Code\n activation = first[1:8] # code\n Info=first[10:len(first)]\n url='https://emergency.copernicus.eu/mapping/list-of-components/{}/aemfeed'.format(activation)\n resp = requests.get(url)\n soup=BeautifulSoup(resp.content, features='xml')\n #Scrape Polygons\n polygons =soup.findAll('georss:polygon')\n #m=fm.Map([46.90814465,14.3134518],zoom_start=10,tiles='OpenStreetMap')\n poldata=[]\n for pol in polygons:\n newpoly=[]\n # print(str(pol)[16:len(pol)-18])\n polraw=str(pol)[16:len(pol)-18]\n polsplit=polraw.split(\" \")\n a=0\n #print(len(polsplit)/2)\n for i in range(0,int(len(polsplit)/2)):\n newpoly.append(polsplit[a:a+2])\n a=a+2\n poldata.append(newpoly)\n \n print(poldata)\n \n \n jsondata={\n \"code\":activation,\n \"info\":Info,\n \"poldata\":poldata\n }\n \n print(jsondata)\n # for ponum, coor in enumerate(poldata):\n # print(coor)\n # jsondata[\"Pol-\"+str(ponum+1)]=coor\n print(jsondata)\n with open(activation+\".txt\",\"w\") as outfile:\n json.dump(jsondata,outfile)\n \n","repo_name":"sirks/emergenyApp","sub_path":"python/ScrapePoly.py","file_name":"ScrapePoly.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"20139207239","text":"# Ryan Carroll\n# CIS_115_NLE01\n# stadiumSeating.py\n# Converts ticket types into correct dollar amount then adds the total\n\na_value = 20 #\nb_value = 15 # Sets the ticket prices as constants\nc_value = 10 #\n\ndef main():\n num1, num2, num3 = get_tickets_sold() # Calls get tickets sold and sets output to num 1,2,3\n total_sum = calc_total_sales(num1, num2, num3) # Calls calc_total_sales passing num 1,2,3 - Sets to total_sum variable\n output(total_sum) # calls total sum function\n\ndef get_tickets_sold(): # Asks the user for input then sets the input to variables num 1,2,3\n num1 = int(input(\"How many Class A tickets were sold? \"))\n num2 = int(input(\"How many Class B tickets were sold? \"))\n num3 = int(input(\"How many Class C tickets were sold? \"))\n return int(num1), int(num2), int(num3) # Returns the integer of each variable\n\ndef calc_total_sales(num1, num2, num3): # Does math and outputs total_sum\n total_a = num1 * a_value\n total_b = num2 * b_value\n total_c = num3 * c_value\n total_sum = total_a + total_b + total_c\n return total_sum\n\ndef output(total_sum): # Prints the total sum variable\n print(f\"${total_sum}\")\n\n\nmain() # Calls main function\n","repo_name":"StackSatoshis/CIS_115","sub_path":"stadiumSeating.py","file_name":"stadiumSeating.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"30575777687","text":"from importlib.metadata import requires\nfrom turtle import pos\nfrom urllib import request\nfrom flask import Flask\nfrom flask import jsonify\nfrom flask import request\n\nimport json\nclass Aluno:\n nome=\"\"\n idade=0\n matricula=0\n\nclass Professor:\n nome=\"\"\n idade=0\n matricula=0\n\nclass Turma:\n materia =\"\"\n alunos =[]\n professores =[]\n\nclass School:\n diretor = []\n turmas = []\n\nscholl = School()\n\nb = Professor()\nb.idade = 15\nb.matricula= 470820\nb.nome = \"eliabe\"\njsonStr = json.dumps(b.__dict__)\napp = Flask(__name__)\n@app.route('/professor', methods=['POST'])\ndef registerProfessor():\n a = json.loads(request.data)\n for i in range (0, len(scholl.turmas)):\n if( scholl.turmas[i].materia == a[\"materia\"]):\n al = Professor()\n al.nome = a[\"name\"]\n al.matricula = a[\"matricula\"] \n for j in range (0,len(scholl.turmas[i].professores)):\n if(scholl.turmas[i].professores[j].matricula == al.matricula ):\n return \"Professor Já Existente\"\n al.idade = a[\"idade\"]\n scholl.turmas[i].professores.append(al)\n return \"Sucess\"\n\n@app.route('/allmaterias', methods=['GET'])\ndef allmaterias():\n materias = []\n for i in scholl.turmas:\n materias.append(i.materia)\n resp = {\n \"materias\": materias\n }\n return json.loads(json.dumps(resp))\n\n@app.route('/alunos', methods=['POST'])\ndef registerAlunos():\n a = json.loads(request.data)\n for i in range (0, len(scholl.turmas)):\n if( scholl.turmas[i].materia == a[\"materia\"]):\n al = Aluno()\n al.nome = a[\"name\"]\n al.matricula = a[\"matricula\"] \n for j in range (0,len(scholl.turmas[i].alunos)):\n if(scholl.turmas[i].alunos[j].matricula == al.matricula ):\n return \"Aluno Já Existente\"\n al.idade = a[\"idade\"]\n scholl.turmas[i].alunos.append(al)\n return \"Sucess\"\n\n@app.route('/materia', methods=['POST'])\ndef registerMateria():\n a = json.loads(request.data)\n for i in range (0, len(scholl.turmas)):\n if( scholl.turmas[i].materia == a[\"materia\"]):\n return \"Materia Já Existente\"\n t = Turma()\n t.materia = a[\"materia\"]\n scholl.turmas.append(t)\n return \"Sucess\"\n\n@app.route('/alunos//', methods=['GET', 'POST'])\ndef getAluno(username, materia):\n if request.method == \"GET\" :\n for i in scholl.turmas:\n if( i.materia == materia):\n for j in i.alunos:\n if j.nome == username:\n resp = {\n \"matricula\":j.matricula,\n \"idade\":j.idade,\n \"nome\": j.nome\n }\n js = json.dumps(resp) \n return json.loads(js)\n return \"Aluno não existe\"\n return \"Materia Nao Existe\" \n \n@app.route('/m/', methods=['GET'])\ndef getMateria(materia):\n if request.method == \"GET\" :\n for i in scholl.turmas:\n if( i.materia == materia):\n alunos=[]\n for k in i.alunos:\n alunos.append(k.matricula)\n professor = []\n for k in i.professores:\n professor.append(k.matricula)\n resp= {\n \"materia\": i.materia,\n \"alunos\": alunos,\n \"professores\": professor\n }\n return json.loads(json.dumps(resp))\n\n@app.route('/r/', methods=['GET'])\ndef removeMateria(materia):\n if request.method == \"GET\" :\n for i in scholl.turmas:\n if( i.materia == materia):\n scholl.turmas.remove(i)\n return \"Sucesso\"\n return \"materia inexistente\"\n@app.route('/alunos//', methods=['GET', 'POST'])\ndef removeAluno(username, materia):\n if request.method == \"GET\" :\n for i in scholl.turmas:\n if( i.materia == materia):\n for j in i.alunos:\n if j.nome == username:\n i.alunos.remove(j)\n return \"Aluno Removido Com Sucesso\"\n return \"Aluno inexistente\" ","repo_name":"eliabe71/Trabalho3_SD","sub_path":"Q1/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4348,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"31028451067","text":"# The example function below keeps track of the opponent's history and plays whatever the opponent played two plays ago. It is not a very good player so you will need to change the code to pass the challenge.\n\ndef player(prev_play, opponent_history=[], my_history=[], won_games=[0], lost_games=[0],\n play_order=[{\n \"RR\": 0,\n \"RP\": 0,\n \"RS\": 0,\n \"PR\": 0,\n \"PP\": 0,\n \"PS\": 0,\n \"SR\": 0,\n \"SP\": 0,\n \"SS\": 0,\n }]):\n # Reset params if new player\n if prev_play != \"\":\n opponent_history.append(prev_play)\n else:\n print(\"reset\")\n opponent_history.clear()\n my_history.clear()\n won_games.insert(0, 0)\n lost_games.insert(0, 0)\n\n responses = {'P': 'R', 'R': 'S', 'S': 'P'}\n\n amountOfGames = len(my_history)\n\n if amountOfGames > 0:\n # Count for games won\n if responses[my_history[-1]] == opponent_history[-1]:\n won_games.insert(0, won_games[0] + 1)\n # Count for games lost\n if my_history[-1] == responses[opponent_history[-1]]:\n lost_games.insert(0, lost_games[0] + 1)\n\n # Calculate winrate\n if won_games[0] > 0:\n winrate = (won_games[0] / (won_games[0] + lost_games[0])) * 100\n elif lost_games[0] > 0:\n winrate = 0\n else:\n winrate = 100\n\n if winrate == 100:\n # Counter Quincy start with him for 100 % win\n order = [\"P\", \"S\", \"S\", \"R\", \"P\"]\n count = (amountOfGames) % 5\n guess = order[count]\n elif amountOfGames <= 10 or winrate >= 87:\n # counter Kriss --> Start with him second for a bit less than 100% win\n ideal_response = {'P': 'R', 'R': 'S', 'S': 'P'}\n guess = ideal_response[my_history[-1]]\n elif amountOfGames <= 20 or winrate > 82:\n # counter Mrugesh\n last_ten = my_history[-10:]\n most_frequent = max(set(last_ten), key=last_ten.count)\n\n if most_frequent == '':\n most_frequent = \"R\"\n\n ideal_response = {'P': 'R', 'R': 'S', 'S': 'P'}\n guess = ideal_response[most_frequent]\n\n else:\n # Counter Abby strategy\n last_two = \"\".join(my_history[-2:])\n if len(last_two) == 2:\n play_order[0][last_two] += 1\n\n potential_plays = [\n my_history[-1] + \"R\",\n my_history[-1] + \"P\",\n my_history[-1] + \"S\",\n ]\n\n sub_order = {\n k: play_order[0][k]\n for k in potential_plays if k in play_order[0]\n }\n\n prediction = max(sub_order, key=sub_order.get)[-1:]\n\n ideal_response = {'P': 'R', 'R': 'S', 'S': 'P'}\n guess = ideal_response[prediction]\n else:\n guess = \"P\"\n\n my_history.append(guess)\n return guess\n","repo_name":"UmbraSpirits/fcc-rock-paper-scissors","sub_path":"RPS.py","file_name":"RPS.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"39934400758","text":"import os\nimport shutil\n\nfrom multiply_data_access import DataAccessComponent\nfrom vm_support.sym_linker import create_sym_links\nfrom vm_support.utils import set_permissions\n\n\ndef create_dir(dir):\n try:\n if not os.path.exists(dir):\n os.makedirs(dir)\n except Exception as e:\n print(e)\n print(dir)\n return\n\nfrom osgeo import osr\n\nwgs84_srs = osr.SpatialReference()\nwgs84_srs.ImportFromEPSG(4326)\n\ndef get_working_dir(dir_name: str) -> str:\n working_dir = f'/datastore/working_dirs/{dir_name}'\n working_dir = f'/home/jovyan/data/working_dirs/{dir_name}'\n if os.path.exists(working_dir):\n shutil.rmtree(working_dir)\n os.makedirs(working_dir)\n return working_dir\n\nname = '/tmp'\nworking_dir = get_working_dir(name)\n\nprint(working_dir)\n\n\ndef get_static_data(data_access_component: DataAccessComponent, roi: str, roi_grid: str, start_time: str,\n stop_time: str, emulation_directory: str, dem_directory: str):\n create_dir(emulation_directory)\n create_dir(dem_directory)\n\n print('Retrieving emulators ...')\n emu_urls = data_access_component.get_data_urls(roi, start_time, stop_time, 'ISO_MSI_A_EMU,ISO_MSI_B_EMU', roi_grid)\n set_permissions(emu_urls)\n create_sym_links(emu_urls, emulation_directory)\n\n print('Retrieving DEM ...')\n dem_urls = data_access_component.get_data_urls(roi, start_time, stop_time, 'Aster_DEM', roi_grid)\n set_permissions(dem_urls)\n create_sym_links(dem_urls, dem_directory)\n print('Done retrieving static data')\n\n# data_access_component = DataAccessComponent()\n\n# param_roi = 'POLYGON ((5.163574 52.382529, 5.163574 52.529813, 5.493164 52.529813, 5.493164 52.382529, 5.163574 52.382529))'\n# spatial_resolution = 20\n#\n# # define output grid\n# param_roi_grid = 'EPSG:4326'\n# param_destination_grid = 'EPSG:4326'\n#\n# param_start_time_as_string = '2008-04-16'\n# param_stop_time_as_string = '2008-04-20'\n# time_step = 5 # in days\n#\n# emulators_directory = '{}/emulators'.format(working_dir)\n# dem_directory = '{}/dem'.format(working_dir)\n#\n#\n# get_static_data(data_access_component=data_access_component, roi=param_roi,\n# start_time=param_start_time_as_string, stop_time=param_stop_time_as_string,\n# emulation_directory=emulators_directory, dem_directory=dem_directory, roi_grid=param_roi_grid)","repo_name":"adeelaashraf/NaaVRE","sub_path":"docker/MULTIPLY/test_multiply.py","file_name":"test_multiply.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"2"}
+{"seq_id":"20006493489","text":"import os\n#import time\nfrom controls import getch, movement\nfrom map import create_board, print_board\nfrom introduction import print_logo, story, about, help_controls\n#from battle import fight\nfrom hall_of_fame import print_hall\nfrom character import Player\nfrom game_inventory import print_table\n#from bossfight import bossfight\nfrom winlosescreen import lose\n\ndef new_game():\n\tstory()\n\tx = None\n\twhile not x:\n\t\tx = getch()\n\tos.system('clear')\n\tplayer = Player(input('Name your hero:'))\n\tcurrent_map = create_board('map_1.txt')\n\tinv = {}\n\tfeedback = ''\n\twhile not(x == 'q'):\n\t\tif not player.alive:\n\t\t\tlose(player)\n\t\tx = None\n\t\tos.system('clear')\n\t\tprint_board(current_map)\n\t\tprint(feedback)\n\t\tx = getch()\n\t\tmovement_return = movement(player, inv, current_map, x)\n\t\tif x == \"i\":\n\t\t\tprint_table(inv)\n\t\t\tx = None\n\t\t\twhile not x:\n\t\t\t\tx = getch()\n\t\tif x == \"h\":\n\t\t\thelp_controls()\n\t\t\tx = None\n\t\t\twhile not x == 'c':\n\t\t\t\tx = getch()\n\t\tif x == \"m\":\n\t\t\tif 'First aid kit' in inv:\n\t\t\t\tplayer.use_item('First aid kit')\n\t\t\telse:\n\t\t\t\tprint('You do not own the first aid kit')\n\t\t\tx = None\n\t\t\twhile not x == 'c':\n\t\t\t\tx = getch()\n\t\tcurrent_map = movement_return[0]\n\t\tinv = movement_return[1]\n\t\tfeedback = movement_return[2]\n\n\ndef main():\n\tos.system('clear')\n\tprint_logo()\n\tx = None\n\tabout()\n\twhile not x == '1' and not x == '2':\n\t\tx = getch()\n\tif x == '1':\n\t\tnew_game()\n\telif x == '2':\n\t\tos.system('clear')\n\t\tprint_hall()\n\t\tquit()\n\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"mrezlik/rogal_the_game","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"35794455704","text":"import operator\nimport readline\nimport colorama\nfrom colorama import Fore, Back, Style\nOPERATORS = {\n\t'+': operator.add,\n\t'-': operator.sub,\n\t'*': operator.mul,\n\t'/': operator.truediv,\n\t'^': operator.pow,\n\t'%': operator.mod,\n\t'~': operator.inv,\n\t'|': operator.abs,\n\t'n': operator.neg,\n\t'p': operator.pos,\n\t'<': operator.lt,\n\t'>': operator.gt,\n\t'=': operator.eq,\n}\ndef calculate(arg):\n\tstack = list()\n\tfor operand in arg.split():\n\t\ttry:\n\t\t\toperand = float(operand)\n\t\t\tstack.append(operand)\n\n\t\texcept:\n\t\t\targ2 = stack.pop()\n\t\t\targ1 = stack.pop()\n\t\t\tprint(Back.BLUE + Fore.YELLOW,arg2, arg1)\n\t\t\toperator_fn = OPERATORS[operand]\n\t\t\tprint(Back.YELLOW + Fore.BLUE + str(operator_fn))\n\t\t\tresult = operator_fn(arg1, arg2)\n\t\t\t\n\t\t\tstack.append(result)\n\treturn stack.pop()\ndef main():\n\tprint(Fore.RED + \"THIS\")\n\tprint(Fore.GREEN + \"IS\")\n\tprint(\"AN\")\n\tprint(Fore.BLUE + \"RPN\")\n\tprint(\"CALCULATOR\")\n\twhile True:\n\t\tresult = calculate(input('rpn calc> '))\n\t\tprint(Style.RESET_ALL)\n\t\tprint(\"Result:\", result)\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"pbalex/c4cs-w17-rpn","sub_path":"rpn.py","file_name":"rpn.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"37882678668","text":"class Solution:\n def floodFill(self, image: List[List[int]], sr: int, sc: int, color: int) -> List[List[int]]:\n \n initial_color = image[sr][sc]\n directions = [1, 0, -1, 0, 1] \n inbound = lambda row, col: 0 <= row < len(image) and 0 <= col < len(image[0])\n \n q = deque()\n if color != image[sr][sc]:\n q.append((sr, sc))\n \n while q:\n size = len(q)\n for idx in range(size):\n row, col = q.popleft()\n if not inbound(row, col) or image[row][col] != initial_color:\n continue\n \n image[row][col] = color\n for idx in range(len(directions)-1):\n q.append((row + directions[idx], col + directions[idx+1]))\n \n \n return image\n ","repo_name":"Aklile-Yilma/A2SV-Competitive-Programming","sub_path":"0733-flood-fill/0733-flood-fill.py","file_name":"0733-flood-fill.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"2"}
+{"seq_id":"27784308201","text":"# coding=utf-8\n# /usr/local/bin/python3 /Users/renweiqiang/Desktop/毕业论文/Dissertation/code/crawler/fenci.py\nimport pymysql\nimport time\nimport jieba\nimport jieba.analyse\n\n\ntext = '苗族分布在我国西南数省区。按方言划分,大致可分为湘西方言区、黔东方言区、川滇黔方言区。黔东南清水江流域一带是全国苗族最大的聚居区,大致包括凯里、剑河、黄平、台江、雷山、丹寨、施秉、黄平、镇远、三穗,以及广西三江和湖南靖县等地。在此广大苗族聚居区普遍流传着一种以创世为主体内容的诗体神话,俗称“古歌”或“古歌古词”。 苗族古歌内容包罗万象,从宇宙的诞生、人类和物种的起源、开天辟地、初民时期的滔天洪水,到苗族的大迁徙、苗族的古代社会制度和日常生产生活等,无所不包,成为苗族古代神话的总汇。 苗族古歌古词神话大多在鼓社祭、婚丧活动、亲友聚会和节日等场合演唱,演唱者多为中老年人、巫师、歌手等。酒席是演唱古歌的重要场合。苗族的古歌古词神话是一个民族的心灵记忆,是苗族古代社会的百科全书和“经典”,具有史学、民族学、哲学、人类学等多方面价值。今天,这些古歌古词神话还在民间流传唱诵。 但由于受到现代文化和市场经济的冲击,苗族古歌已濒临失传。以台江为例,在全县13万苗族同胞中,能唱完整部古歌的已寥寥无几,目前只有二百余人能唱一些不完整的古歌,而且都是中老年人,传承古歌较多的老人年事已高。如不抓紧抢救保护,苗族古歌这一民族瑰宝将最终在世间消失。'\n\ndef stopwordslist(filepath):\n stopwords = [line.strip() for line in open(filepath).readlines()]\n return stopwords\ndef filter_seg_list(seg_list):\n stop_words = stopwordslist('/Users/renweiqiang/Desktop/毕业论文/学习总结/LDA_Python/文本预处理/stop_words.txt')\n filter_seg = []\n for word in seg_list:\n if word not in stop_words:\n filter_seg.append(word)\n filter_seg = [i for i in filter_seg if i != '']\n return filter_seg\ndef filter_number_and_single(word):\n if(word.isdigit()):\n return False\n length = len(word)\n if(length not in [0, 1]):\n return word\n return False\n\n# corpus = []\n# corpus.append(\" \".join(keywords))\n# #print( \" \".join(keywords))\n\n\n\ndef get_keywords(text):\n keywords = filter_seg_list(jieba.cut(text)) # 去除停用词\n keywords = [j for j in keywords if filter_number_and_single(j) != False]\n jieba_keywords_text = \" \".join(keywords)\n # topK = 20\n # withWeight = False\n # tags = jieba.analyse.extract_tags(jieba_keywords_text, topK=topK, withWeight=withWeight)\n #return ' '.join(keywords), \" \".join(tags)\n return ' '.join(keywords)\n# keywords = get_keywords(text)\n# print(keywords[0])\n# print(keywords[1])\n\ndb = pymysql.connect(\"localhost\",\"root\",\"\",\"feiyi\")\ncursor = db.cursor()\n# sql = \"select count(*) from `minglu`\"\n# cursor.execute(sql)\n# count = cursor.fetchone()[0]\n\n# max_id_sql = \"select max(id) from `minglu`\"\n# cursor.execute(sql)\n# max_id = cursor.fetchone()[0]\n# min_id = 8\n\n# while(max_id <= 3260):\n# content_sql = '''select `content` from `minglu` where `id` in ({0}, {1})'''.format(min_id, max_id)\n \n#sql = \"select `id`, `content` from `minglu` where `id` > 10\"\nmax_id = 3157\ncurrent_id = 8\nstep = 100\n\nwhile(current_id <= max_id):\n next_id = current_id + step\n\n sql = ''' select `id`, `content` from `minglu` where `id` >= {0} and `id` <= {1} '''.format(current_id, next_id)\n try:\n # 执行SQL语句\n cursor.execute(sql)\n # 获取所有记录列表\n results = cursor.fetchall()\n for row in results:\n id = row[0]\n content = row[1]\n if content.strip():\n keywords = get_keywords(content)\n update_sql = ''' update `minglu` set `full_keywords` = \"{0}\" where `id` = {1} '''.format(keywords, id)\n #print(update_sql)\n print('---' + str(id)) \n try:\n cursor.execute(update_sql)\n db.commit()\n except:\n db.rollback()\n \n except:\n print (\"Error: unable to fetch data\")\n\n current_id = current_id + step\n\n time.sleep(1)\n\n\n\n# 关闭数据库连接\ndb.close()\n\n\n\n","repo_name":"rwqzcq/Dissertation","sub_path":"code/crawler/fenci2.py","file_name":"fenci2.py","file_ext":"py","file_size_in_byte":4475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"2482006882","text":"# orderedDict is a dict subclass which remembers the order in which the entries were done\n\n# od = collections.OrderedDict()\n# od['a'] = 2\n# od['b'] = 1\n# od['c'] = 3\n# print(od)\n#\n# OrderedDict({[(a,2), (b,1), (c, 3)]})\n\nfrom collections import OrderedDict\n\n\nd = OrderedDict()\nd[1] = 'a'\nd[2] = 'k'\nd[3] = 'h'\nd[4] = 'i'\nd[5] = 'l'\nprint(d)\n","repo_name":"Akhileshbhagat1/All-prectice-of-python","sub_path":"specialisedCOLLECTIONdataTYPES/orderedDict.py","file_name":"orderedDict.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"72246415408","text":"import sys, random\nimport numpy as np\nfrom typing import Tuple, List\nfrom queue import Queue\nfrom Utilities import geoDistance\n\nclass DBScan:\n def __init__(self, Eps : float, MinPt : int):\n self.core = -1\n self.border = -2\n self.Eps = Eps\n self.MintPt = MinPt\n\n def get_neighbor(self, userPointList : list, sample_idx):\n neighbors = []\n curUser = userPointList[sample_idx]\n\n for usrIdx, usrVal in enumerate(userPointList): \n if (usrIdx != sample_idx): \n # Geodesic distance\n geodesic_distance = geoDistance(usrVal.location, curUser.location)\n if geodesic_distance < self.Eps:\n neighbors.append(usrIdx)\n \n return neighbors\n\n def fit(self, userPointList : list):\n # initialize all points as outliers\n self.point_label = [0] * len(userPointList)\n point_count = []\n\n # initilize list for core/border points\n core = []\n border = []\n\n # print(point_label)\n \n # Find the neighbours of each individual point\n for usrIdx, usrVal in enumerate(userPointList):\n point_count.append(self.get_neighbor(userPointList, usrIdx))\n\n # print(point_count)\n\n # Find all the core points, border points and outliers\n for usrIdx in range(len(point_count)):\n if (len(point_count[usrIdx]) >= self.MintPt):\n self.point_label[usrIdx] = self.core\n core.append(usrIdx)\n else:\n border.append(usrIdx)\n\n for i in border:\n for j in point_count[i]:\n if j in core:\n self.point_label[i] = self.border\n break\n \n # Assign points to a cluster\n self.cluster = 1\n\n # Here we use a queue to find all the neighbourhood points of a core point and find the indirectly reachable points\n # We are essentially performing Breadth First search of all points which are within Epsilon distance for each other\n for i in range(len(self.point_label)):\n q = Queue()\n if (self.point_label[i] == self.core):\n self.point_label[i] = self.cluster\n for x in point_count[i]:\n if(self.point_label[x] == self.core):\n q.put(x)\n self.point_label[x] = self.cluster\n elif(self.point_label[x] == self.border):\n self.point_label[x] = self.cluster\n while not q.empty():\n neighbors = point_count[q.get()]\n for y in neighbors:\n if (self.point_label[y] == self.core):\n self.point_label[y] = self.cluster\n q.put(y)\n if (self.point_label[y] == self.border):\n self.point_label[y] = self.cluster\n self.cluster += 1 # Move on to the next cluster\n \n # return self.point_label, self.cluster #label for each userIdx and nunber of cluster\n\n def getCluster(self) -> List:\n clusterArray = [[] for i in range(self.cluster)]\n outliers = []\n\n for userIdx, label in enumerate(self.point_label):\n if(label != 0):\n clusterArray[label].append(userIdx)\n # else:\n # own_cluster = [userIdx]\n # outliers.append(own_cluster)\n\n if (len(outliers) > 0):\n clusterArray = list(filter(None, clusterArray + outliers))\n return clusterArray","repo_name":"ACM-Research/vr-user-behavior-clustering","sub_path":"21F/scripts/Common/DBScan.py","file_name":"DBScan.py","file_ext":"py","file_size_in_byte":3659,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"}
+{"seq_id":"6409245488","text":"from telegram import ReplyKeyboardMarkup, Update, KeyboardButton, ParseMode, ChatAction\n\n\n############################################\n# all keyboards\n############################################\n\n# main menu keyboard options\nfirst_menu_keyboard_buttons = [\n [KeyboardButton('📲 Скачать Трек/Альбом/Плейлист из Spotify')],\n [KeyboardButton('🔎 Найти и скачать Трек/Альбом/Плейлист из Интернета')]\n]\nfirst_menu_markup = ReplyKeyboardMarkup(first_menu_keyboard_buttons, one_time_keyboard=True)\n\n# search type options\nsearch_type_buttons = [\n [KeyboardButton('📀 Альбом')],\n [KeyboardButton('🎵 Трек')],\n [KeyboardButton('🎧 Плейлист')]\n]\nsearch_type_buttons_markup = ReplyKeyboardMarkup(search_type_buttons, one_time_keyboard=True)\n\n# Quality keyboard options\nquality_menu_keyboard_buttons = [\n [KeyboardButton('Лучшее'), KeyboardButton('Q320K'), KeyboardButton('Q256K')],\n [KeyboardButton('Q192K'), KeyboardButton('Q128K'), KeyboardButton('Q96K')],\n [KeyboardButton('Q32K'), KeyboardButton('Худшее')],\n [KeyboardButton('↩️ Назад')]\n]\n\nquality_menu_markup = ReplyKeyboardMarkup(quality_menu_keyboard_buttons, one_time_keyboard=True)\n\n# Music format keyboard options\nmusic_format_menu_keyboard_buttons = [\n [KeyboardButton('MP3'), KeyboardButton('FLAC')],\n [KeyboardButton('AAC'), KeyboardButton('M4A')],\n [KeyboardButton('OPUS'), KeyboardButton('VORBIS'), KeyboardButton('WAV')],\n [KeyboardButton('↩️ Назад')]\n]\n\nmusic_format_menu_markup = ReplyKeyboardMarkup(music_format_menu_keyboard_buttons, one_time_keyboard=True)\n\n# Begin uploading music\nfinal_downloading_menu_buttons = [\n [KeyboardButton('Начать загрузку')]\n]\n\nfinal_downloading_menu_markup = ReplyKeyboardMarkup(final_downloading_menu_buttons, one_time_keyboard=True)\n\n# Uploading type menu\nuploading_type_menu_buttons = [\n [KeyboardButton('🗂 Архив')],\n [KeyboardButton('🎵 Треки по отдельности')]\n]\n\nuploading_type_menu_markup = ReplyKeyboardMarkup(uploading_type_menu_buttons, one_time_keyboard=True)\n\n\n# Uploading and downloading finished\nfinal_menu_buttons = [\n [KeyboardButton('🏠На главную')]\n]\n\nfinal_menu_markup = ReplyKeyboardMarkup(final_menu_buttons, one_time_keyboard=True)\n","repo_name":"azelenkovsky/spotify-downloader-bot","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"38711354515","text":"#!/usr/bin/env python \nimport sys\nimport os\nimport re\nimport yaml\nfrom termcolor import cprint\nimport arrow\n\nfrom imapclient import IMAPClient\nimport ssl\nimport email\n\nfrom ipdb import set_trace\n\n\nsettings_file = os.environ.get(\"SETTINGS_FILE\", \"settings.yaml\")\nsettings = yaml.safe_load(open(settings_file))\n\n\ndef get_body_of_mail(message_data):\n email_message = email.message_from_bytes(message_data[b'RFC822'])\n subject = email_message.get('Subject')\n date = email_message.get('Date')\n if subject != settings['mail']['subject']:\n cprint(f\"DEBUG: Skipping mail '{subject}' from {date}\", 'magenta')\n return\n body = \"\"\n print(f\"DEBUG: Processing mail '{subject}' from {date}\")\n if email_message.is_multipart():\n for part in email_message.get_payload():\n body += part.get_payload()\n else:\n body = email_message.get_payload()\n return body\n\n\ndef get_mails_from_ead():\n ssl_context = ssl.create_default_context(cafile=\"/etc/ssl/certs/ca-certificates.crt\")\n server = IMAPClient(settings['mail']['server'], ssl_context=ssl_context)\n server.login(settings['mail']['user'], settings['mail']['pass'])\n server.select_folder('INBOX')\n # just check the last 5 mails (independent of (un)seen)\n messages = server.search()[-5:]\n for __, message_data in server.fetch(messages, 'RFC822').items():\n body = get_body_of_mail(message_data)\n if body:\n yield(body)\n server.logout()\n\n\ndef get_abholtermin(email_body):\n abholtermin = re.search(settings['mail']['regex_abholung'], email_body)\n if not abholtermin:\n cprint(\"ERROR: Problem with the regex\", \"red\")\n sys.exit(1)\n __, date, description = abholtermin.group(1).strip().split(' ', 2)\n return date, description\n\n\ndef check_notification(date):\n # check if tomorrow is Abholung\n date_abholung = arrow.get(date, \"DD.MM.YYYY\")\n tomorrow = arrow.now().shift(days=+1)\n if date_abholung.day == tomorrow.day and date_abholung.month == tomorrow.month \\\n and date_abholung.year == tomorrow.year:\n cprint(f\"DEBUG: Bingo! Will notify others about the news! ({date_abholung.format('DD.MM.YYYY')} vs {tomorrow.format('DD.MM.YYYY')})\", 'green')\n return True\n else:\n print(f\"DEBUG: Zonk! Will not notify. ({date_abholung.format('DD.MM.YYYY')} vs {tomorrow.format('DD.MM.YYYY')})\")\n return False\n #return True\n\n\ndef read_mails_and_notify(irc_bot):\n # irc_bot is a irc.client.Reactor object\n cprint(f\"DEBUG: Let's check our mails at {arrow.now().format()}\", 'yellow')\n for mail in get_mails_from_ead():\n date, description = get_abholtermin(mail)\n if check_notification(date):\n msg = settings['msg'].format(description)\n print(f\"INFO: Sending irc message {msg}\")\n irc_bot.privmsg(settings['irc']['channel'], msg)\n cprint(f\"DEBUG: Done. Checked our mails at {arrow.now().format()}\", 'yellow')\n\n\nif __name__ == '__main__':\n read_mails_and_notify()\n","repo_name":"kmille/cda-garbage","sub_path":"imap_ead.py","file_name":"imap_ead.py","file_ext":"py","file_size_in_byte":3056,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"}
+{"seq_id":"37432087347","text":"import PyPDF2\nfrom PyPDF2 import utils\n\n\n# OUTPUT_DIR = \"pdf-paranoia-encrypted\"\n# filename = \"meetingminutes.pdf\"\n\n# pdf = open(filename, \"rb\")\n# pdf_reader = PyPDF2.PdfFileReader(pdf)\n# pdf_writer = PyPDF2.PdfFileWriter()\n\n# for page_num in range(pdf_reader.numPages):\n# page = pdf_reader.getPage(page_num)\n# pdf_writer.addPage(page)\n\n# pdf_writer.encrypt(\"swordfish1\")\n# output_pdf = open(f\"{OUTPUT_DIR}/{filename[:-4]}_swordfish1.pdf\", \"wb\")\n# pdf_writer.write(output_pdf)\n# output_pdf.close()\n# pdf.close()\n\n\npdf = open(\"meetingminutes_swordfish1.pdf\", \"rb\")\npdf_reader = PyPDF2.PdfFileReader(pdf)\n\npdf_writer = PyPDF2.PdfFileWriter()\n\ntry:\n check = pdf_reader.decrypt(\"swordfish\")\n if check == 0:\n print(\"file not decrypted\")\nexcept Exception as e:\n print(e)\n\n\nfor page_num in range(pdf_reader.numPages):\n page = pdf_reader.getPage(page_num)\n\noutput_pdf = open(\"meetingminutes_decrypted.pdf\", \"wb\")\npdf_writer.write(\"output.pdf\")\noutput_pdf.close()\npdf.close()\n\n\nx = 5\n\nif x > 10:\n print(x)\nif x > 12:\n print(x + 1)\nelse:\n print(\"x is not a numbner\")\n","repo_name":"Sylruilshu/automate-the-boring-stuff","sub_path":"chapter-15/practice-projects/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"}
+{"seq_id":"33528574551","text":"\nimport spacy\nimport pickle\nimport os\nimport pandas as pd\n\n#from TiposGenerales import *\n\nclass Clasificar:\n\n def __init__(self):\n\n #CARGAR DICCIONARIO CATEGORÍAS\n with open('/media/nofi-ai/NOFI_2022/DESARROLLO/ESA/DATA/categorias.pkl', 'rb') as f:\n self.categorias = pickle.load(f)\n # print(self.categorias)\n\n def UBICAR_EN_AUDIOTECA (self, nombreArchivo):\n\n #ANALIZAR TIPO (CLASIFICAR POR NOMBRE) PONER ETIQUETA SEGÚN ONTOLOGÍA AUDIOSET (GOOGLE)\n pass\n\n def CLASIFICAR_POR_NOMBRE(self, nombreArchivo='S-S AULLIDO LEJANO NOCHE.wav'):\n\n #ANALIZAR NOMBRE DE ARCHIVO PARA ESTIMAR A QUÉ CLASE PERTENECE:\n\n #RECIBE : NOMBRE\n #ENTREGA:\n\n\n\n #ABREVIATURAS Y EQUIVALENCIAS DE SENTIDO\n\n # TODO: EXTRAER TAG (TIPO)\n # TIPO: S-S, AMB, DIR, MUSICA, FX. ESTIMAR POR TAG (EJ: SI EL ARCHIVO\n # SE LLAMA \"S-S ...\") Y SINO, ESTIMAR POR FORMATO DE NOMBRE (EJ: MVI_7048\n # ES UN ARCHIVO DE CÁMARA).\n\n tag, nombre, extension = self.dividirTagNombreExt(nombreArchivo)\n\n nlp = spacy.load('es_core_news_lg')\n nombre = nlp(nombre)\n for token in nombre:\n print(token.text, token.pos_, token.dep_, token.rank, token.head.text)\n\n #extraer la palabra mas importante\n palabra_importante = nombre[0]\n\n import json\n\n with open('/media/nofi-ai/NOFI_2022/DESARROLLO/ESA/AUDIOTECA/DATA/ontology_es.json', 'r') as f:\n elementos = json.load(f)\n candidatos = {}\n for elemento in elementos:\n token = nlp(elemento['name_es'].lower())\n if token.similarity(palabra_importante) > 0.75:\n candidatos[token] = token.similarity(palabra_importante)\n # print(\"el tipo de sonido es: \", elemento['name_es'], \" por un porcentaje de \", token.similarity(palabra_importante)) # Imprime el nombre del elemento más similar\n max_value = max(candidatos.items(), key=lambda x: x[1])\n tipo_de_sonido = str(max_value[0])\n print(tipo_de_sonido)\n\n #EXTRAER RAIZ Y RESTO\n\n\n #TODO: HAY QUE ARMAR UN DICT CON ESTA ESTRUCTURA {RAIZ: palabra, RESTO: [RESTO1, ...]}.\n # EN EL FUTURO PODEMOS USAR MAS DATOS DE LAS PALABRAS\n\n\n\n # RESTO: QUITAR STOP WORDS. ENCONTRAR NÚCLEO (EJ: PASOS CEMENTO, \"PASOS\" ES EL\n # NÚCLEO)\n\n # INFERIR UBICACIÓN CORRECTA POR UBICACIÓN USUAL DE NÚCLEO. DEBE HABER UNA LISTA\n # DE PALABRAS (NÚCLEOS) COMUNES PARA CADA SECCIÓN DE LA AUDIOTECA.\n\n # BUSCAR EXCEPCIONES (EJ: SI \"PASOS\" VA SEGUIDO DE UNA CONSTRUCCIÓN ESPECÍFICA,\n # COMO \"PASOS CABALLO\") ESTO DEBE ENTENDERSE COMO PASOS DE CABALLO Y POR LO\n # TANTO, IR A ANIMALES.\n\n #UBICAR SONIDO EN CARPETA CORRECTA\n\n return tag, nombre,extension\n\n def CLASIFICAR_POR_AUDIO(self, nombreArchivo, instancia):\n\n #CHEQUEAR A QUE CLASE PERTENECE ANALIZANDO AUDIO TOMANDO\n #COMO PUNTO DE PARTIDA EL RESULTADO DE LA CLASIFICACIÓN POR NOMBRE\n\n #LA IDEA ES IMPLEMENTAR ESTO A PARTIR DE MODELO PREEXISTENTE DE TENSOR FLOW\n\n tipo = 'S-S'\n subtipo = 'SONO'\n return tipo, subtipo\n\n#UTILIDADES\n def dividirTagNombreExt(self, nombreArchivo):\n\n \"\"\"RECIBE NOMBRE DE ARCHIVO Y DEVUELVE TAG + NOMBRE \"\"\"\n # NORMALIZAR (pasamos a minúscula, borramos puntos y _)\n nombreArchivo, extension = os.path.splitext(nombreArchivo)\n nombreArchivo = nombreArchivo.lower()\n nombreArchivo = nombreArchivo.replace('.', ' ')\n nombreArchivo = nombreArchivo.replace('_', ' ')\n\n tag, *resto = nombreArchivo.split()\n nombre = \" \".join(resto)\n for categoria, etiquetas in self.categorias.items():\n # print(categoria, etiquetas)\n # print(\"tag \", tag)\n # Si la etiqueta está dentro de la lista de etiquetas de la categoría\n if tag in etiquetas:\n # Asigno a la variable 'tag' la palabra clave (categoría) actual\n tag_ok = categoria\n break\n try:\n tag = tag_ok\n except tag_ok == None:\n tag = '_'\n\n\n return tag, nombre, extension\n\n\n def ubicarRaiz(self, palabra, resto, path2Audioteca = '/home/nofi/AUDIOTECA/'):\n\n # RECIBE: UNA FRASE SEPARADA EN RAIZ Y RESTO + EL PATH DE LA AUDIOTECA\n # DEVUELVE: EL PATH FINAL DE UBICACIÓN DEL ARCHIVO\n\n\n \"\"\"El objetivo de esta función es buscar palabras clave en las distintas\n carpetas de la audioteca. Cada carpeta tiene que tener una lista de palabras}\n clave. Si encuentra la palabra en una de esas listas, devuelve el path donde\n se podrá ubicar el archivo\"\"\"\n\n raiz = path2Audioteca\n\n hits = []\n hitpath = []\n\n with open(raiz + 'CLAVES.pck', 'rb') as claves:\n CLAVES = pickle.load(claves)\n\n #ITERAR POR LISTA DE PALABRAS CLAVE, BUSCANDO SI ESTÁ PRESENTE NUESTRA\n # palabra (DADA A LA FUNCIÓN COMO ARGUMENTO).\n for carpeta in CLAVES.keys():\n for subcarpeta in CLAVES[carpeta].keys():\n\n #DE ENCONTRAR LA PALABRA CLAVE, SUMAR (UBICACIÓN Y PATH) A LISTA DE hits\n #print(CLAVES[carpeta][subcarpeta])\n if palabra in CLAVES[carpeta][subcarpeta].keys():\n path = path2Audioteca + carpeta + '/' + subcarpeta + '/'\n hitpath.append(path)\n hits.append([carpeta, subcarpeta, palabra])\n print ('HIT: ', CLAVES[carpeta][subcarpeta])\n print(hits)\n print(palabra)\n\n if len(hits) > 1:\n print('AMBIGÜEDAD')\n #SI len(hits) >= 1, tenemos un hit, FIJARSE PUNTAJE DE LA PALABRA CLAVE\n\n if len(hits) == 1:\n\n # SI ES < 3 CREAR POPUP PARA OBTENER PERMISO DEL USUARIO. SI ESTÁ OK,\n # DAR UN PUNTO A PALABRA CLAVE PARA ESA UBICACIÓN, COPIAR resto A LISTA EN\n # ESA UBICACIÓN Y return path\n print (hits)\n if CLAVES[hits[0][0]][hits[0][1]][hits[0][2]] < 3:\n #TODO: consultar resultado para evitar falso positivo\n CLAVES[hits[0][0]][hits[0][1]][hits[0][2]] += 1\n\n #if consultarResultado[0](raiz, hitpath):\n # CLAVES[hits[0]][hits[1]][hits[2]] += 1\n # return path\n #else:\n # return consultarResultado[1]\n\n else:\n\n CLAVES[hits[0][0]][hits[0][1]][hits[0][2]] += 1\n\n else:\n\n path = ubicarManualmente(raiz)\n\n direccion = separar(hitpath[-1])\n\n # CLAVES[direccion[-2]][direccion[-1]][palabra] = [1, []]\n print(path)\n\n\n return path\n\n #SI len(hits) > 1 tenemos una ambiguedad.\n\n #RESOLVER AMBIGUEDAD: COMPARAR RESTO PARA VER SI COINCIDE CON EL RESTO\n #DE CUALQUIER OTRA LISTA. SI COINCIDE EN UNA PALABRA PERO NO EN OTRA, DAR\n #PUNTAJE A ESA PALABRA.\n\n\n return path\n\n def consultarResultado(self, raiz = '/', path = None):\n\n if path == '/' or no:\n\n return False, ubicarManualmente(raiz)\n\n elif si:\n return True, None\n\n else:\n print ('error')\n\n def ubicarManualmente(self, raiz = '/', path = None):\n\n return path\n\n def separar(self, path):\n allparts = []\n while 1:\n parts = os.path.split(path)\n if parts[0] == path: # sentinel for absolute paths\n allparts.insert(0, parts[0])\n break\n elif parts[1] == path: # sentinel for relative paths\n allparts.insert(0, parts[1])\n break\n else:\n path = parts[0]\n allparts.insert(0, parts[1])\n return allparts\n\n\n\nif __name__ == '__main__':\n clasificar = Clasificar()\n print(clasificar.CLASIFICAR_POR_NOMBRE())\n\n\n\n #EJEMPLO DE USO\n\n # import TiposGenerales\n\n # arch = 'S-S PASOS CEMENTO'\n\n # tipo = Tipo (CLASIFICAR_POR_AUDIO(arch, CLASIFICAR_POR_NOMBRE(arch)))\n\n\n","repo_name":"nofi-sys/SOUND_GEEZER","sub_path":"ANALISIS/CLASIFICAR.py","file_name":"CLASIFICAR.py","file_ext":"py","file_size_in_byte":8085,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"27331325735","text":"\"\"\"The 'ZnFlow' package.\"\"\"\nimport contextlib\nimport importlib.metadata\nimport logging\nimport sys\n\nfrom znflow import exceptions\nfrom znflow.base import (\n CombinedConnections,\n Connection,\n FunctionFuture,\n Property,\n disable_graph,\n empty_graph,\n get_attribute,\n get_graph,\n)\nfrom znflow.combine import combine\nfrom znflow.graph import DiGraph\nfrom znflow.node import Node, nodify\nfrom znflow.visualize import draw\n\n__version__ = importlib.metadata.version(__name__)\n\n__all__ = [\n \"DiGraph\",\n \"Node\",\n \"draw\",\n \"nodify\",\n \"FunctionFuture\",\n \"Connection\",\n \"get_attribute\",\n \"disable_graph\",\n \"Property\",\n \"CombinedConnections\",\n \"combine\",\n \"exceptions\",\n \"get_graph\",\n \"empty_graph\",\n]\n\nwith contextlib.suppress(ImportError):\n from znflow import deployment\n\n __all__ += [\"deployment\"]\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.WARNING)\n\n# Formatter for advanced logging\n# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s : %(message)s')\nformatter = logging.Formatter(\"%(asctime)s (%(levelname)s): %(message)s\")\n\nchannel = logging.StreamHandler(sys.stdout)\nchannel.setLevel(logging.DEBUG)\nchannel.setFormatter(formatter)\n\nlogger.addHandler(channel)\n","repo_name":"zincware/ZnFlow","sub_path":"znflow/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"2"}
+{"seq_id":"6913036670","text":"\"\"\"\nLeetCode\n1626. Best Team With No Conflicts\nJanuary 2023 Challenge\njramaswami\n\"\"\"\n\n\nfrom typing import *\nimport functools\nimport collections\n\n\nPerson = collections.namedtuple('Person', ['age', 'score'])\n\n\nclass Solution:\n def bestTeamScore(self, scores: List[int], ages: List[int]) -> int:\n\n people = [Person(a, s) for s, a in zip(scores, ages)]\n people.sort()\n\n @functools.cache\n def rec(i,mx):\n if i >= len(people):\n return 0\n\n # Do not pick this person.\n result = rec(i+1, mx)\n if mx <= people[i].score:\n # If you can, pick this person.\n result = max(\n result,\n people[i].score + rec(i+1, max(people[i].score, mx))\n )\n return result\n\n return rec(0, 0)\n\n\ndef test_1():\n scores = [1,3,5,10,15]\n ages = [1,2,3,4,5]\n expected = 34\n assert Solution().bestTeamScore(scores, ages) == expected\n\n\ndef test_2():\n scores = [4,5,6,5]\n ages = [2,1,2,1]\n expected = 16\n assert Solution().bestTeamScore(scores, ages) == expected\n\n\ndef test_3():\n scores = [1,2,3,5]\n ages = [8,9,10,1]\n expected = 6\n assert Solution().bestTeamScore(scores, ages) == expected\n","repo_name":"jramaswami/LeetCode_Python","sub_path":"best_team_with_no_conflicts.py","file_name":"best_team_with_no_conflicts.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"10846306249","text":"def palindrome(num_list):\n for number in num_list:\n num_string = str(number)\n size = len(num_string)\n reversed_num = num_string[size::-1]\n if reversed_num == number:\n print(\"True\")\n else:\n print(\"False\")\n\n\nnumbers_list = list(input().split(\", \"))\npalindrome(numbers_list)\n\n# ------------------------------------- Problem to resolve ------------------------------\n#\n# A palindrome is a number that reads the same backward as forward, such as 323 or 1001.\n# Write a function that receives a list of positive integers, separated by comma and space \", \".\n# The function should check if each integer is a palindrome - True or False. Print the result.\n# Input\t Output\n# 123, 323, 421, 121\t False\n# True\n# False\n# True\n# 32, 2, 232, 1010\t False\n# True\n# True\n# False\n\n","repo_name":"ivoivanov0830006/1.3.Python_FUNDAMENTALS","sub_path":"4.Functions/*08.Palindrome_integers.py","file_name":"*08.Palindrome_integers.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"}
+{"seq_id":"30830601391","text":"import requests\nfrom json import dumps\nfrom pathlib import Path\nfrom astropy.io.fits import PrimaryHDU, getdata, getheader\nfrom tenacity import retry, retry_if_exception_type, retry_if_result, \\\n stop_after_attempt, wait_exponential\n\n\ndef is_false(value):\n return value is False\n\n\ndef result_if_max_retry_count(retry_state):\n pass\n\n\nclass PlateSolution:\n\n def __init__(self, file=None, directory=None, api_key=None,\n api_url='http://nova.astrometry.net/api/'):\n if api_key is None:\n api_key = {'apikey': 'vfsyxlmdxfryhprq'}\n self.api_url = api_url\n self.api_key = api_key\n self.file = file\n self.directory = directory\n\n def plate_solution(self):\n session = self._login()\n if not session:\n return PlateSolution.fail('Login')\n\n sub_id = self._upload(session)\n if not sub_id:\n return PlateSolution.fail('Upload')\n\n sub_url = self._get_url(f\"submissions/{sub_id}\")\n job_id = self._sub_status(sub_url)\n if not job_id:\n return PlateSolution.fail('Submission ID')\n\n job_url = self._get_url(f\"jobs/{job_id}\")\n download_url = self.api_url.replace(\"/api/\", f\"/wcs_file/{job_id}/\")\n wcs_file = Path(self.directory) / \"temp\" / \"wcs.fits\"\n wcs_file = self._job_status(job_url, wcs_file, download_url)\n if not wcs_file:\n return PlateSolution.fail('Job Status')\n else:\n print(\"WCS file creation successful.\")\n return wcs_file\n\n def _get_url(self, service):\n return self.api_url + service\n\n @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10),\n retry=(retry_if_result(is_false) | retry_if_exception_type(requests.exceptions.RequestException)),\n retry_error_callback=result_if_max_retry_count)\n def _login(self):\n r = requests.post(self._get_url('login'), data={'request-json': dumps(self.api_key)})\n if r.status_code >= 400:\n return False\n elif r.json()['status'] == 'success':\n return r.json()['session']\n return False\n\n @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10),\n retry=(retry_if_result(is_false) | retry_if_exception_type(requests.exceptions.RequestException)),\n retry_error_callback=result_if_max_retry_count)\n def _upload(self, session):\n files = {'file': open(self.file, 'rb')}\n headers = {'request-json': dumps({\"session\": session}), 'allow_commercial_use': 'n',\n 'allow_modifications': 'n', 'publicly_visible': 'n'}\n\n r = requests.post(self.api_url + 'upload', files=files, data=headers)\n\n if r.json()['status'] == 'success':\n return r.json()['subid']\n return False\n\n @retry(stop=stop_after_attempt(20), wait=wait_exponential(multiplier=1, min=4, max=10),\n retry=(retry_if_result(is_false) | retry_if_exception_type(requests.exceptions.RequestException)),\n retry_error_callback=result_if_max_retry_count)\n def _sub_status(self, sub_url):\n r = requests.get(sub_url)\n if r.json()['job_calibrations']:\n return r.json()['jobs'][0]\n return False\n\n @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10),\n retry=(retry_if_result(is_false) | retry_if_exception_type(requests.exceptions.RequestException)),\n retry_error_callback=result_if_max_retry_count)\n def _job_status(self, job_url, wcs_file, download_url):\n r = requests.get(job_url)\n if r.json()['status'] == 'success':\n r = requests.get(download_url)\n with wcs_file.open('wb') as f:\n f.write(r.content)\n hdu = PrimaryHDU(data=getdata(filename=self.file), header=getheader(filename=wcs_file))\n hdu.writeto(wcs_file, overwrite=True)\n return wcs_file\n return False\n\n @staticmethod\n def fail(error_type):\n print(\"WARNING: After multiple attempts, EXOTIC could not retrieve a plate solution from nova.astrometry.net\"\n f\" due to {error_type}. EXOTIC will continue reducing data without a plate solution.\")\n return False\n","repo_name":"rzellem/EXOTIC","sub_path":"exotic/api/plate_solution.py","file_name":"plate_solution.py","file_ext":"py","file_size_in_byte":4283,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"2"}
+{"seq_id":"35584549799","text":"from modul_mnk import *\r\nimport matplotlib.pyplot as plt\r\nimport pylab\r\nimport numpy as np\r\n\r\nyy = []\r\nxx = []\r\n\r\nwith open (\"windmil.txt\",\"r\") as file:\r\n dane = file.readlines()\r\n for line in dane:\r\n yy.append(float(line.split()[1]))\r\n xx.append(float(line.split()[0]))\r\n dane.append(yy)\r\n dane.append(xx)\r\n\r\nprint(xx)\r\nprint(yy)\r\n\r\nres = []\r\nbsr = []\r\n\r\nfor n in range(2,10):\r\n aa, bb = gen_ur_mnk(xx, yy, n)\r\n\r\n print(aa)\r\n print(bb)\r\n\r\n\r\n wsp = numpy.linalg.solve(aa, bb) # a0,a1,a2\r\n print('wsp:', wsp)\r\n\r\n def prawdziwe():\r\n li = []\r\n for i in xx:\r\n wielomian4(i,wsp)\r\n li.append(wielomian4(i,wsp))\r\n return li\r\n\r\n prawdziwe()\r\n r_kwadrat(yy,prawdziwe())\r\n blad(prawdziwe(),prawdziwe())\r\n\r\n res.append(r_kwadrat(yy,prawdziwe()))\r\n bsr.append(blad(yy,prawdziwe()))\r\n\r\n print(res)\r\n print(bsr)\r\n\r\n p = range(2, 17)\r\n a = min(p)\r\n b = max(p)\r\n dx = (b - a) / (len(p)+10)\r\n\r\n zz = [a]\r\n ww = [wielomian4(a, wsp)]\r\n\r\n for i in p:\r\n zz.append(zz[-1] + dx)\r\n ww.append(wielomian4((zz[-1]), wsp))\r\n\r\n print(zz)\r\n print(ww)\r\n n = str(n)\r\n x = zz\r\n y = ww\r\n pylab.plot(x, y)\r\n pylab.title('Model regresji' + \" dla \" + n + \" stopnia wielomianu\")\r\n plt.xlabel(\"prędkość wiatru[mph]\")\r\n plt.ylabel(\"moc wiatraka\")\r\n pylab.grid(True)\r\n # pylab.show()\r\n\r\n plt.plot(xx, yy, 'ro')\r\n plt.axis([0, 11, 0, 3])\r\n plt.show()\r\n\r\n\r\nplt.show()\r\n\r\n\r\ndef wykres1():\r\n x = [2,3,4,5,6,7,8,9]\r\n y = res\r\n pylab.plot(x,y)\r\n pylab.title('Wartość współczynnika determinacji w zależności od stopnia wielomianu')\r\n plt.xlabel(\"stopień wielomianu\")\r\n plt.ylabel(\"wartość współczynnika determinacji\")\r\n pylab.grid(True)\r\n pylab.show()\r\n\r\ndef wykres2():\r\n i = [2,3,4,5,6,7,8,9]\r\n j = bsr\r\n pylab.plot(i,j)\r\n pylab.title('Wartość błędu średniego w zależności od stopnia wielomianu')\r\n plt.xlabel(\"stopień wielomianu\")\r\n plt.ylabel(\"wartość błędu średniokwadratowego\")\r\n pylab.grid(True)\r\n pylab.show()\r\n\r\nwykres1()\r\nwykres2()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"MarcelinaSS/The-method-of-least-squares","sub_path":"test_mnk_nasz.py","file_name":"test_mnk_nasz.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"40748634167","text":"from docx import Document # pip install python-docx \r\nfrom docx.shared import Inches,Pt,RGBColor\r\nfrom PIL import Image\r\nimport json\r\nimport os\r\nimport math\r\nimport argparse\r\n\r\nclass project_creator:\r\n max_img_width = 700\r\n def __init__(self,info_file,new_page_new_question = False):\r\n # file name saved \r\n self.info_file = info_file\r\n self.new_page_new_question = new_page_new_question \r\n\r\n def __enter__(self):\r\n self.extract_info()\r\n # start a new word file \r\n self.document = Document()\r\n # use sections to edit page layout and margin\r\n sections = self.document.sections\r\n for section in sections:\r\n section.top_margin = Inches(0.5)\r\n section.bottom_margin = Inches(0.5)\r\n section.left_margin = Inches(0.5)\r\n section.right_margin = Inches(0.5)\r\n\r\n return self\r\n \r\n def __exit__(self ,type, value, traceback):\r\n # save the file at last\r\n self.document.save(self.file_name +'.docx')\r\n print(self.file_name,\" Saved 😁😀\")\r\n return False\r\n\r\n def extract_info(self):\r\n # data file and read data and update class\r\n with open(self.info_file,'r',encoding=\"utf8\") as info_file:\r\n try:\r\n info = json.load(info_file)\r\n self.file_name = info[\"file_name\"]\r\n self.userinfo = info[\"userinfo\"]\r\n self.questions = info[\"data\"][\"questions\"]\r\n self.files = info[\"data\"][\"files\"]\r\n self.isHeading = info[\"format\"][\"heading\"]\r\n self.block = info[\"format\"][\"data\"]\r\n self.isEnd_name = info[\"format\"][\"end_name\"] \r\n except :\r\n raise Exception(f' Unable to read data from json File -> {self.info_file} -try fixing some format.')\r\n else:\r\n # for the directory for code should be present\r\n directory = os.path.normpath(info[\"directory\"])\r\n if os.path.isdir(directory):\r\n self.directory = directory \r\n else :\r\n raise Exception('Directory not found') \r\n \r\n def file_reader(self,file_name):\r\n print(f'reading file => {file_name} 🕵️')\r\n # try except block for error \r\n try:\r\n with open(file_name ,mode = 'r',encoding=\"utf8\") as code:\r\n return code.read()\r\n except :\r\n raise Exception(\"file reading error -check file name and location (we are using encoding='utf8' for decoding)\")\r\n\r\n def question(self,data):\r\n # add heading for questions\r\n que = self.document.add_heading(data, level=1)\r\n font = que.style.font\r\n font.size = Pt(20)\r\n font.color.rgb = RGBColor(0,0,0)\r\n \r\n def code(self,data):\r\n # sol\r\n ans = self.document.add_paragraph(\"\")\r\n ans.add_run('Sol.').bold = True\r\n\r\n # add code to the doc\r\n code = self.document.add_paragraph(data)\r\n # indent for code\r\n paragraph_format = code.paragraph_format\r\n paragraph_format.left_indent = Inches(0.5)\r\n font = code.style.font\r\n font.size = Pt(14)\r\n font.color.rgb = RGBColor(0,0,50)\r\n \r\n def image(self,image):\r\n # add iamge to the doc\r\n # output\r\n output = self.document.add_paragraph(\"\")\r\n output.add_run('output.').bold = True\r\n \r\n # image width and height of image\r\n w,h = Image.open(image).size\r\n\r\n # big - 1920 1080 # idle - 700 _\r\n if w < self.max_img_width:\r\n self.document.add_picture(image)\r\n else:\r\n self.document.add_picture(image,width=Inches(7.5))\r\n \r\n def data_block(self,question,code_data,ss):\r\n # write questions as heading in bold\r\n if self.block[\"question\"] :\r\n self.question(question) # print(question)\r\n\r\n if self.block[\"solution\"] :\r\n self.code(code_data) # print(code_data)\r\n \r\n if self.block[\"picture\"] : \r\n self.image(ss) # print(ss)\r\n \r\n def create_file(self):\r\n # write file name at top in center\r\n if self.isHeading:\r\n heading = self.document.add_heading(self.file_name, 0) # print(self.file_name)\r\n heading.alignment = 1\r\n\r\n if len(self.questions) != len(self.files):\r\n raise Exception(\"question should have files to use and files should have questions check the info file for corrections \")\r\n\r\n for question,files in zip(self.questions,self.files):\r\n # question # code = self.file_reader(files[0]) # ss = files[1]\r\n self.data_block(question,self.file_reader(files[0]),files[1])\r\n\r\n if self.new_page_new_question and question != self.questions[-1]:\r\n self.document.add_page_break()\r\n\r\n if self.isEnd_name:\r\n # add name of the student in the end the file \r\n hr = self.document.add_paragraph(\"\")\r\n hr.add_run(\"_____________________________________________________________________\").bold = True\r\n hr.alignment = 1\r\n para = ''\r\n for key,value in zip(self.userinfo.keys(),self.userinfo.values()):\r\n para += f'{key} : {value}\\n' \r\n else : para = para[:-1]\r\n\r\n end_userinfo = self.document.add_paragraph(para)\r\n font = end_userinfo.style.font\r\n font.color.rgb = RGBColor(0,0,0)\r\n\r\n def walk_in_dir(self):\r\n print(self.directory)\r\n os.chdir(self.directory)\r\n print(f\"Dir changed to {self.directory}\")\r\n\r\n @staticmethod\r\n def open_file(file_name):\r\n # to open the file for results \r\n print(\"opening file for checking ...\")\r\n os.startfile(file_name+'.docx')\r\n\r\n @classmethod\r\n def runner(cls,info_file,new_page_new_question = False):\r\n with cls(info_file,new_page_new_question) as clas:\r\n # change dir to the directory for reading and saving the data\r\n clas.walk_in_dir()\r\n \r\n # this will create file and save all the changes\r\n clas.create_file()\r\n file_name = clas.file_name\r\n \r\n # to open the after it saved\r\n cls.open_file(file_name)\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('-n','--newPage',action=\"store_true\",help = \"for get every new question on new page.\")\r\n parser.add_argument('-i','--infofile', default = 'info.json', help = \"json data file info-file default (info.json).\")\r\n \r\n args = parser.parse_args()\r\n \r\n project_creator.runner(args.infofile,new_page_new_question = args.newPage)\r\n","repo_name":"rishi23root/class_automations","sub_path":"project_automation/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6771,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"41121599975","text":"import c4d\nimport collections\nimport time\nimport traceback\n\n\nclass Rect(object):\n ''' Represents an axis-aligned rectangle in two dimensional space\n represented as four integer numbers. For floating-point precision\n pixel-arithmetic, compute the components manually. '''\n\n def __init__(self, x1, y1, x2=None, y2=None, w=None, h=None):\n super(Rect, self).__init__()\n self.x1 = int(x1)\n self.y1 = int(y1)\n\n if x2 is None:\n if w is None:\n raise ValueError('neither x2 nor w specified')\n x2 = self.x1 + w\n if y2 is None:\n if h is None:\n raise ValueError('neither y2 nor h specified')\n y2 = self.y1 + h\n\n self.x2 = int(x2)\n self.y2 = int(y2)\n\n def __iter__(self):\n yield self.x1\n yield self.y1\n yield self.x2\n yield self.y2\n\n def __contains__(self, other):\n if isinstance(other, (list, tuple)):\n if len(other) != 2:\n message, 'list/tuple for __contains__ must have 2 elements'\n raise ValueError(message, len(other))\n x, y = other\n if x < self.x1 or x > self.x2:\n return False\n if y < self.y1 or y > self.y2:\n return False\n return True\n elif type(other) is Rect:\n if other.x2 < self.x1 or other.y2 < self.y1:\n return False\n if other.x1 > self.x2 or other.y1 > self.y2:\n return False\n return True\n else:\n raise TypeError('unsupported type for __contains__', type(other))\n\n def __repr__(self):\n return 'Rect{0}'.format(self.tup())\n\n def __copy__(self, **overrides):\n copy = Rect(self.x1, self.y1, self.x2, self.y2)\n for k, v in overrides.iteritems():\n setattr(copy, k, v)\n return copy\n\n def tup(self, grow=0, relative=False):\n if relative:\n return self.x1, self.y1, self.w + grow, self.h + grow\n else:\n return self.x1, self.y1, self.x2 + grow, self.y2 + grow\n\n def get_normalized(self):\n x1, y1, x2, y2 = self.tup()\n if x2 < x1: x1, x2 = x2, x1\n if y2 < y1: y1, y2 = y2, y1\n return Rect(x1, y1, x2, y2)\n\n def get_width(self):\n return self.x2 - self.x1\n\n def get_height(self):\n return self.y2 - self.y1\n\n def set_width(self, w):\n self.x2 = self.x1 + int(w)\n\n def set_height(self, h):\n self.y2 = self.y1 + int(h)\n\n def center(self):\n return (self.x1 + self.w / 2), (self.y1 + self.h / 2)\n\n copy = __copy__\n width = w = property(get_width, set_width)\n height = h = property(get_height, set_height)\n\n\nclass ExtendedUserArea(c4d.gui.GeUserArea):\n ''' The *ExtendedUserArea* provides a bunch of helper functions and\n pre-implemented overrides of the *GeUserArea* class to make it easier\n to implement custom user interfaces. '''\n\n def SendAction(self, bc=None):\n if bc is None:\n bc = c4d.BaseContainer()\n bc.SetId(c4d.BFM_ACTION)\n bc.SetInt32(c4d.BFM_ACTION_ID, self.GetId())\n return self.SendParentMessage(bc)\n\n def GetMouse(self, msg, drag=False):\n if drag:\n return (msg.GetInt32(c4d.BFM_DRAG_SCREENX), msg.GetInt32(c4d.BFM_DRAG_SCREENY))\n else:\n return (msg.GetInt32(c4d.BFM_INPUT_X), msg.GetInt32(c4d.BFM_INPUT_Y))\n\n def GetMouseLocal(self, msg, drag=False):\n x, y = self.GetMouse(msg, drag)\n if drag:\n conv = self.Screen2Local()\n else:\n conv = self.Global2Local()\n\n return (x + conv['x'], y + conv['y'])\n\n def GenerateMouseDrag(self, msg, state=None, only_mouse_change=False):\n device = msg.GetInt32(c4d.BFM_INPUT_DEVICE)\n channel = msg.GetInt32(c4d.BFM_INPUT_CHANNEL)\n if state is None:\n state = c4d.BaseContainer()\n prev_mouse = None\n while self.GetInputState(device, channel, state):\n if not state.GetInt32(c4d.BFM_INPUT_VALUE):\n break\n\n mouse = self.GetMouseLocal(state)\n if not only_mouse_change or prev_mouse != mouse:\n yield mouse\n prev_mouse = mouse\n else:\n yield None\n\n def MouseEvent(self, msg, channel):\n return False\n\n def KeyboardEvent(self, msg, channel):\n return False\n\n def DragEvent(self, msg, dragtype, data, dropped):\n return False\n\n def InputEvent(self, msg):\n device = msg.GetInt32(c4d.BFM_INPUT_DEVICE)\n channel = msg.GetInt32(c4d.BFM_INPUT_CHANNEL)\n if device == c4d.BFM_INPUT_MOUSE:\n return self.MouseEvent(msg, channel)\n elif device == c4d.BFM_INPUT_KEYBOARD:\n return self.KeyboardEvent(msg, channel)\n return False\n\n def Message(self, msg, result):\n if msg.GetId() == c4d.BFM_DRAGRECEIVE:\n quit = msg.GetInt32(c4d.BFM_DRAG_LOST)\n if not quit: quit = msg.GetInt32(c4d.BFM_DRAG_ESC)\n if not quit: quit = not self.CheckDropArea(msg, True, True)\n if quit:\n return self.SetDragDestination(c4d.MOUSE_FORBIDDEN)\n\n data = self.GetDragObject(msg)\n dropped = bool(msg.GetInt32(c4d.BFM_DRAG_FINISHED))\n result = self.DragEvent(msg, data['type'], data['object'], dropped)\n if dropped:\n result = True\n return result\n return super(ExtendedUserArea, self).Message(msg, result)\n\n\nclass IconView(ExtendedUserArea):\n ''' Simple User Area to present an icon. If the icon is clicked, it\n will send a `c4d.gui.GeDialog.Command()` event with the ID of the\n user area. '''\n\n AlignLeft = 'left'\n AlignRight = 'right'\n AlignTop = 'top'\n AlignBottom = 'bottom'\n AlignCenter = 'center'\n\n def __init__(self, icon, alignh=AlignCenter, alignv=AlignCenter,\n width=None, height=None, bgcol=None, cursor=None):\n super(IconView, self).__init__()\n if isinstance(icon, int):\n icon = c4d.gui.GetIcon(icon)\n elif isinstance(icon, c4d.bitmaps.BaseBitmap):\n w, h = icon.GetSize()\n icon = {'bmp': icon, 'x': 0, 'y': 0, 'w': w, 'h': h}\n elif not (icon is None or isinstance(icon, dict)):\n raise TypeError('expected int, BaseBitmap or dict', type(icon))\n\n self.icon = icon\n self.alignh = alignh\n self.alignv = alignv\n self.width = width\n self.height = height\n self.cursor = cursor\n self.bgcol = bgcol\n self.pressed = False\n self.double_buffered = True\n self.on_click = None\n\n def get_icon(self):\n return self.icon\n\n def _get_icon_dict(self):\n bmp = self.get_icon()\n if isinstance(bmp, c4d.bitmaps.BaseBitmap):\n bmp = {'bmp': bmp, 'x': 0, 'y': 0, 'w': bmp.GetBw(), 'h': bmp.GetBh()}\n elif not isinstance(bmp, dict) and not bmp is None:\n message = '{0}.get_banner_image() must return BaseBitmap or dict'\n warnings.warn(message.format(type(self).__name__), RuntimeWarning)\n return bmp\n\n # ExtendedUserArea\n\n def MouseEvent(self, msg, channel):\n if channel == c4d.BFM_INPUT_MOUSELEFT:\n rect = Rect(0, 0, self.GetWidth(), self.GetHeight())\n mouse = self.GetMouseLocal(msg)\n framerate = 1.0 / 15\n for mouse in self.GenerateMouseDrag(msg):\n self.pressed = mouse in rect\n self.Redraw()\n time.sleep(framerate)\n if self.pressed:\n if self.on_click:\n try:\n self.on_click()\n except Exception as exc:\n traceback.print_exc()\n self.SendAction()\n self.pressed = False\n self.Redraw()\n return True\n return False\n\n # c4d.gui.GeUserArea\n\n def DrawMsg(self, x1, y1, x2, y2, msg):\n if self.double_buffered:\n self.OffScreenOn()\n\n if self.bgcol:\n bgcol = self.bgcol\n elif self.pressed:\n bgcol = c4d.COLOR_BGFOCUS\n else:\n bgcol = c4d.COLOR_BG\n\n self.DrawSetPen(bgcol)\n self.DrawRectangle(x1, y1, x2 - 1, y2 - 1)\n icon = self._get_icon_dict()\n\n if icon:\n bmp, bx, by, bw, bh = (icon[n] for n in 'bmp x y w h'.split())\n dw, dh = self.GetMinSize()\n width, height = self.GetWidth(), self.GetHeight()\n\n if self.alignh == self.AlignRight:\n xpos = width - dw\n elif self.alignh == self.AlignCenter:\n xpos = (width - dw) / 2\n elif self.alignh == self.AlignLeft or True:\n xpos = 0\n\n if self.alignv == self.AlignBottom:\n ypos = height - dh\n elif self.alignv == self.AlignCenter:\n ypos = (height - dh) / 2\n elif selg.alignv == self.AlignTop or True:\n ypos = 0\n\n flags = c4d.BMP_ALLOWALPHA\n self.DrawBitmap(bmp, xpos, ypos, dw, dh, bx, by, bw, bh, flags)\n\n if self.pressed:\n self.DrawBorder(\n c4d.BORDER_THIN_IN, 0, 0,\n self.GetWidth() - 1, self.GetHeight() - 1)\n\n def GetMinSize(self):\n icon = self._get_icon_dict()\n if not icon:\n return (0, 0)\n else:\n bw, bh = icon['w'], icon['h']\n width, height = bw, bh\n if self.width is None:\n if self.height is not None:\n width = (self.height / float(bh)) * bw\n else:\n width = self.width\n if self.height is None:\n if self.width is not None:\n height = (self.width / float(bw)) * bh\n else:\n height = self.height\n return (int(width), int(height))\n\n\n def Message(self, msg, result):\n if self.cursor is not None and msg.GetId() == c4d.BFM_GETCURSORINFO:\n result.SetId(c4d.BFM_GETCURSORINFO)\n result.SetLong(c4d.RESULT_CURSOR, self.cursor)\n return True\n return super(IconView, self).Message(msg, result)\n\n\n\ndef handle_file_select(dialog, param, type=c4d.FILESELECTTYPE_ANYTHING,\n title='', flags=c4d.FILESELECT_LOAD, force_suffix=''):\n ''' Opens a file selection dialog for which the result will be filled\n into the parameter in the *dialog* identified with *paramid*. The\n Cinema 4D filename widget is a little buggy, and using this function\n is convenient if your dialog uses a string and button widget instead.\n\n :param dialog: :class:`c4d.gui.GeDialog`\n :param param: :class:`int` -- The id of the string widget.\n :param type: :class:`int` -- Passed to `c4d.storage.LoadDialog`\n :param title: :class:`str` -- See :func:`c4d.storage.LoadDialog`\n :param flags: :class:`int` -- See :func:`c4d.storage.LoadDialog`\n :param force_suffix: :class:`str` -- See :func:`c4d.storage.LoadDialog`\n :return: True if the file selection was handled and the parameter\n set, False if not.\n '''\n\n def_path = dialog.GetString(param)\n filename = c4d.storage.LoadDialog(type, title, flags, force_suffix, def_path)\n if filename:\n dialog.SetString(param, filename)\n return True\n return False\n\n","repo_name":"0anion0/nr.c4d","sub_path":"nr/c4d/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":10155,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"2"}
+{"seq_id":"38223504356","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n-------------------------------------------------\n\n @ Author : Max_Pengjb\n @ date : 2018/9/23 22:37\n @ IDE : PyCharm\n @ GitHub : https://github.com/JackyPJB\n @ Contact : pengjianbiao@hotmail.com\n-------------------------------------------------\n Description : \n-------------------------------------------------\n\"\"\"\n\n__author__ = 'Max_Pengjb'\n\noperate = ['+', '-', '*', '/']\n\n\ndef eval_rpn(express_list):\n stack = []\n top = 0\n for operation in express_list:\n if operation in operate:\n if top < 2:\n return False\n m = stack.pop()\n n = stack.pop()\n if operation == '+':\n k = m + n\n elif operation == '-':\n k = m - n\n elif operation == '*':\n k = m * n\n elif operation == '/':\n k = n / m\n else:\n pass\n stack.append(k)\n top -= 1\n else:\n stack.append(int(operation))\n top += 1\n if top > 1:\n return False\n else:\n return stack[0]\n\n\ninpp = [\"4\", \"2\", \"/\"]\nprint(eval_rpn(inpp))\n","repo_name":"Max-PJB/python-learning","sub_path":"stack_queue/Reverse_Polish_notation_RPN.py","file_name":"Reverse_Polish_notation_RPN.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"}
+{"seq_id":"8169137726","text":"# SALUDO\n\ndef holaMundo(nombre):\n return f\"Hola. Bienvenido {nombre}\"\n\n# CALCULADORA\n\ndef calculadora(numero1,numero2,basicas = False):\n suma = numero1 + numero2\n resta = numero1 - numero2\n division = numero1 / numero2\n multiplicacion = numero1 * numero2\n\n cadena =\"\" \n\n if basicas != False:\n cadena += \"Suma: \" + str(suma)\n cadena += \"\\n\"\n cadena += \"Resta: \" + str(resta)\n cadena += \"\\n\"\n else:\n cadena += \"Suma: \" + str(suma)\n cadena += \"\\n\"\n cadena += \"Resta: \" + str(resta)\n cadena += \"\\n\"\n cadena += \"Multiplicación: \" + str(multiplicacion)\n cadena += \"\\n\"\n cadena += \"División: \" + str(division)\n cadena += \"\\n\"\n\n return cadena\n \n\n \n \n\n","repo_name":"PilarNew/pybasic","sub_path":"12-modulos/mimodulo.py","file_name":"mimodulo.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"69979162608","text":"import RPi.GPIO as GPIO\r\nimport time\r\n\r\n# callable class\r\nclass BtnEventEx:\r\n def __init__(self):\r\n self.button_pin = 16\r\n self.led_pin = 18\r\n\r\n GPIO.setup(self.button_pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\r\n GPIO.setup(self.led_pin, GPIO.OUT)\r\n\r\n self.light_on = False\r\n\r\n # button_callback 함수를 정의합니다.\r\n def button_callback(self, channel):\r\n if self.light_on == False: # LED 불이 꺼져있을때\r\n GPIO.output(self.led_pin,1) # LED ON\r\n print(\"LED ON!\")\r\n else: # LED 불이 져있을때\r\n GPIO.output(self.led_pin,0) # LED OFF\r\n print(\"LED OFF!\")\r\n self.light_on = not self.light_on # False <=> True\r\n \r\n\r\n def __call__(self):\r\n self.light_on = False\r\n GPIO.add_event_detect(self.button_pin,GPIO.RISING, callback=self.button_callback, bouncetime=300)\r\n\r\n try:\r\n while 1: #무한반복\r\n time.sleep(0.1) # 0.1초 딜레이\r\n except KeyboardInterrupt:\r\n GPIO.remove_event_detect(self.button_pin)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n ex = BtnEventEx()\r\n ex()","repo_name":"cooluks2/iot","sub_path":"02.device/RaspberryPi/01_GPIO-TEST/btneventex.py","file_name":"btneventex.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"12446670725","text":"\"\"\"Determines and saves results.\"\"\"\n\n# libraries:\nimport bs4\nimport requests\nimport os\nfrom bs4 import BeautifulSoup\nfrom prettytable import PrettyTable\n\n# other files:\nimport sentiment_analyzer\nimport csv_handler\nimport search_scraper\n\n# import web_interface\n\n\nclass color:\n \"\"\"Defines different colors and text formatting settings to be used for CML output printing.\"\"\"\n\n PURPLE = \"\\033[95m\"\n CYAN = \"\\033[96m\"\n DARKCYAN = \"\\033[36m\"\n BLUE = \"\\033[94m\"\n GREEN = \"\\033[92m\"\n YELLOW = \"\\033[93m\"\n RED = \"\\033[91m\"\n BOLD = \"\\033[1m\"\n UNDERLINE = \"\\033[4m\"\n END = \"\\033[0m\"\n\n\ndef run_results_generator(scored_articles, stocks_list, abbrv_list, write_file):\n # find duplicate links and remove them\n links = []\n for article in scored_articles:\n check_list = isinstance(article, list)\n if article[\"link\"] in links:\n print(\"*!!* Duplicate article - removing...\", article[\"link\"])\n scored_articles.remove(article)\n else:\n links.append(article[\"link\"])\n\n fin_scored_stocks = generate_results(\n stocks_list, abbrv_list, scored_articles, write_file\n )\n\n return fin_scored_stocks\n\n\ndef remove_dup_articles(scored_articles):\n result = []\n seen = set()\n result = []\n for dic in scored_articles:\n key = (dic[\"link\"], dic[\"title\"])\n if key in seen:\n continue\n result.append(dic)\n seen.add(key)\n scored_articles = result\n return scored_articles\n\n\ndef generate_results(stocks_list, abbrv_list, scored_articles, write_file):\n \"\"\"Driver function to generate results with.\"\"\"\n [i for n, i in enumerate(scored_articles) if i not in scored_articles[n + 1 :]]\n\n no_dup = []\n seen = set()\n result = []\n for dic in scored_articles:\n key = (dic[\"link\"], dic[\"title\"])\n if key in seen:\n continue\n no_dup.append(dic)\n seen.add(key)\n\n scored_stocks = calc_stock_sentiment(no_dup, stocks_list)\n # save run date to overall dict for csv purposes\n scored_stocks = calc_recent_stock_sentiment(no_dup, scored_stocks)\n\n scored_stocks = calc_stock_trifold_rating(no_dup, scored_stocks)\n\n scored_stocks = calc_ovr_stock_article_feelings(no_dup, scored_stocks)\n\n # write data\n if write_file == \"\":\n pass\n elif \".csv\" in write_file:\n print(\"VALID CSV\")\n csv_handler.write_data(scored_articles, write_file)\n else:\n write_file = \"my_results.csv\"\n csv_handler.write_data(scored_articles, write_file)\n\n scored_stocks = calc_ovr_media_rating(scored_articles, scored_stocks)\n\n # get stock price/attribute information\n i = 0\n while i < len(scored_stocks):\n (\n price,\n previous_close,\n open_price,\n avg_volume,\n volume,\n yr_target,\n ) = search_scraper.get_stock_attributes(abbrv_list[i])\n\n # ^^^ I should print this out in a results_table\n\n scored_stocks[i][\"current_price\"] = price\n scored_stocks[i][\"volume\"] = volume\n scored_stocks[i][\"avg_volume\"] = avg_volume\n scored_stocks[i][\"yr_target\"] = yr_target\n\n i += 1\n\n fin_scored_stocks = predict_stock_well_being(scored_stocks)\n print(\"FINALIZED SCORED STOCKS\", fin_scored_stocks)\n return fin_scored_stocks\n\n # will need to ask user if they want to get media results for the stocks inside the CML, not a UI issue\n\n\ndef calc_article_sent_scores(articles):\n \"\"\"Averages all sentence scores together, if multiple, and produces one averaged score for a body of text.\"\"\"\n\n print(\"Calcuting text score....\")\n for article in articles:\n ovr_text_sent_score = 0\n sent_count = 0\n for txsent in article[\"text_sent\"]:\n sent_count += 1\n ovr_text_sent_score += txsent[\"compound\"]\n ovr_text_sent_score = ovr_text_sent_score / sent_count\n\n # ovr_title_sent_score\n ovr_title_sent_score = 0\n sent_count = 0\n for tisent in article[\"title_sent\"]:\n sent_count += 1\n ovr_title_sent_score += tisent[\"compound\"]\n ovr_title_sent_score = ovr_title_sent_score / sent_count\n\n # ovr_desc_sent_score\n ovr_desc_sent_score = 0\n sent_count = 0\n for dsent in article[\"desc_sent\"]:\n sent_count += 1\n ovr_desc_sent_score += dsent[\"compound\"]\n ovr_desc_sent_score = ovr_desc_sent_score / sent_count\n\n article[\"ovr_text_sent_score\"] = float(ovr_text_sent_score)\n article[\"ovr_title_sent_score\"] = float(ovr_title_sent_score)\n article[\"ovr_desc_sent_score\"] = float(ovr_desc_sent_score)\n\n trifold_score, trifold_rating = calc_article_trifold_rating(\n ovr_text_sent_score, ovr_title_sent_score, ovr_desc_sent_score\n )\n article[\"trifold_score\"] = trifold_score\n article[\"trifold_rating\"] = trifold_rating\n\n # call calc_sent_rating():\n # text_sent_rating\n text_sent_rating = calc_sent_rating(ovr_text_sent_score)\n article[\"text_sent_rating\"] = text_sent_rating\n # title_sent_rating\n title_sent_rating = calc_sent_rating(ovr_title_sent_score)\n article[\"title_sent_rating\"] = title_sent_rating\n # desc_sent_rating\n desc_sent_rating = calc_sent_rating(ovr_desc_sent_score)\n article[\"desc_sent_rating\"] = desc_sent_rating\n\n # add all these scores back to articles dictionary and return it so others can ues the scores\n\n return articles\n\n\ndef calc_sent_rating(sent_score):\n \"\"\"Calculates the sentiment rating for a given title, description, or text sentiment rating for an article.\"\"\"\n\n rating = \"Unknown\"\n if float(sent_score) >= -0.05554 and float(sent_score) <= 0.05554:\n rating = \"Neutral\"\n elif float(sent_score) <= -0.05555 and float(sent_score) >= -0.30554:\n rating = \"Somewhat Negative\"\n elif float(sent_score) <= -0.30555 and float(sent_score) >= -0.70554:\n rating = \"Negative\"\n elif float(sent_score) <= -0.70555 and float(sent_score) >= 1.0:\n rating = \"Very Negative\"\n elif float(sent_score) >= 0.05555 and float(sent_score) <= 0.30554:\n rating = \"Somewhat Positive\"\n elif float(sent_score) >= 0.30555 and float(sent_score) <= 0.70554:\n rating = \"Positive\"\n elif float(sent_score) >= 0.70555 and float(sent_score) <= 1.0:\n rating = \"Very Positive\"\n\n return rating\n\n\ndef calc_article_trifold_rating(\n ovr_text_sent_score, ovr_title_sent_score, ovr_desc_sent_score\n):\n \"\"\"Calculates a overall 'trifold' score for an article based on the title, description, and text sentiment scores.\"\"\"\n\n trifold_score = (\n ovr_text_sent_score + ovr_title_sent_score + ovr_desc_sent_score\n ) / 3\n\n trifold_rating = calc_sent_rating(trifold_score)\n\n return trifold_score, trifold_rating\n\n\ndef calc_stock_sentiment(scored_articles, stocks_list):\n \"\"\"Calculates average sentiment score for a stock based on all articles (text) for given stock.\"\"\"\n scored_stocks = []\n for stock in stocks_list:\n article_count = 0\n stock_sent_score = 0\n for article in scored_articles:\n if article[\"stock\"] == stock:\n article_count += 1\n stock_sent_score += float(article[\"ovr_text_sent_score\"])\n avg_stock_sent_score = stock_sent_score / article_count\n avg_stock_sent_feelings = calc_sent_rating(avg_stock_sent_score)\n\n stock_sent_dict = {\n \"stock\": stock,\n \"avg_stock_sent_score\": avg_stock_sent_score,\n \"avg_stock_sent_feelings\": avg_stock_sent_feelings,\n \"article_count\": article_count,\n }\n scored_stocks.append(stock_sent_dict)\n\n return scored_stocks\n\n\ndef calc_recent_stock_sentiment(scored_articles, scored_stocks):\n \"\"\"Calculates average sentiment score for a stock based the most recent articles (within last 7 days).\"\"\"\n\n for stock in scored_stocks:\n recent_article_count = 0\n day_article_count = 0\n day_stock_sent_score = 0\n stock_sent_score = 0\n for article in scored_articles:\n if article[\"stock\"] == stock[\"stock\"]:\n if (\n \"day\" in article[\"date\"]\n ): # see if day is in it because then we know it is less than a week old/recent\n recent_article_count += 1\n stock_sent_score += float(article[\"ovr_text_sent_score\"])\n elif \"hour\" in article[\"date\"]:\n # day\n day_article_count += 1\n day_stock_sent_score += float(article[\"ovr_text_sent_score\"])\n # recent\n recent_article_count += 1\n stock_sent_score += float(article[\"ovr_text_sent_score\"])\n\n try:\n rcnt_text_sent_score = stock_sent_score / recent_article_count\n except:\n rcnt_text_sent_score = 0\n stock[\"recent_article_count\"] = recent_article_count\n stock[\"rcnt_text_sent_score\"] = rcnt_text_sent_score\n stock[\"rcnt_text_sent_rating\"] = calc_sent_rating(rcnt_text_sent_score)\n\n try:\n day_text_sent_score = day_stock_sent_score / day_article_count\n except:\n day_text_sent_score = 0\n stock[\"day_article_count\"] = day_article_count\n stock[\"day_stock_sent_score\"] = day_stock_sent_score\n stock[\"day_stock_sent_rating\"] = calc_sent_rating(day_stock_sent_score)\n\n return scored_stocks\n\n\ndef calc_ovr_stock_article_feelings(scored_articles, scored_stocks):\n \"\"\"Sees if the articles for a stock are generally positive, neutral, or negative.\"\"\"\n # parses all of the article['text_sent_rating']\n\n for stock in scored_stocks:\n positive_article_count = 0\n neutral_article_count = 0\n negative_article_count = 0\n\n for article in scored_articles:\n sent_score = article[\"ovr_text_sent_score\"]\n if article[\"stock\"] == stock[\"stock\"]:\n if float(sent_score) > 0.05:\n positive_article_count += 1\n elif float(sent_score) >= -0.05 and float(sent_score) <= 0.05:\n neutral_article_count += 1\n elif float(sent_score) < -0.05:\n negative_article_count += 1\n else:\n pass\n\n count_list = []\n count_list.append(positive_article_count)\n count_list.append(neutral_article_count)\n count_list.append(negative_article_count)\n largest = max(count_list)\n if largest == positive_article_count:\n stock[\"overall_stock_articles_feelings\"] = \"Positive\"\n elif largest == neutral_article_count:\n stock[\"overall_stock_articles_feelings\"] = \"Neutral\"\n elif largest == negative_article_count:\n stock[\"overall_stock_articles_feelings\"] = \"Negative\"\n else:\n stock[\"overall_stock_articles_feelings\"] = \"Undetermined\"\n\n stock[\"positive_article_count\"] = positive_article_count\n stock[\"neutral_article_count\"] = neutral_article_count\n stock[\"negative_article_count\"] = negative_article_count\n\n return scored_stocks\n\n\ndef calc_stock_trifold_rating(scored_articles, scored_stocks):\n \"\"\"Takes the trifold ratings for each article for a given stock and gets the average trifold rating.\"\"\"\n for stock in scored_stocks:\n stock_trifold_rating = 0\n stock_article_count = 0\n for article in scored_articles:\n if article[\"stock\"] in stock[\"stock\"]:\n stock_article_count += 1\n stock_trifold_rating += float(article[\"trifold_score\"])\n\n try:\n ovr_stock_trifold_rating = stock_trifold_rating / stock_article_count\n except:\n ovr_stock_trifold_rating = 0\n stock[\"ovr_stock_trifold_rating\"] = ovr_stock_trifold_rating\n stock[\"ovr_stock_trifold_feelings\"] = calc_sent_rating(ovr_stock_trifold_rating)\n\n return scored_stocks\n\n\ndef calc_ovr_media_rating(scored_articles, scored_stocks):\n \"\"\"Calculates a given websites rating for a given stock based on it's overall articles.\"\"\"\n\n media_list = []\n for article in scored_articles:\n media = article[\"media\"]\n if media in media_list:\n pass\n else:\n media_list.append(media)\n\n for stock in scored_stocks:\n stock_media_list = []\n for media in media_list:\n article_count = 0\n media_sent_score = 0\n for article in scored_articles:\n if article[\"media\"] == media and article[\"stock\"] == stock[\"stock\"]:\n article_count += 1\n media_sent_score += float(article[\"ovr_text_sent_score\"])\n\n try:\n stock_media_avg_sent_score = media_sent_score / article_count\n except:\n stock_media_avg_sent_score = 0\n media_sent_rating = calc_sent_rating(stock_media_avg_sent_score)\n\n if not media:\n media = \"Unknown Source\"\n\n media_dict = {\n \"media\": media,\n \"media_avg_sent_score\": stock_media_avg_sent_score,\n \"article_count\": article_count,\n \"media_sent_rating\": media_sent_rating,\n }\n stock_media_list.append(media_dict)\n stock[\"media_results\"] = stock_media_list\n\n return scored_stocks\n\n\ndef predict_stock_well_being(scored_stocks):\n \"\"\"Predicts the overall view of a stock and whether it will continue to rise or fall.\"\"\"\n\n # CURRENTLY BASIC FUNCTION/CALCULATION - more updates to come in future PRs\n # takes stock_trifold_rating, ovr_stock_text_sent, calc_recent_stock_sentiment, ovr_stock_feelings as inputs\n\n for stock in scored_stocks:\n wght_rcnt_text = 0.25 * (float(stock[\"rcnt_text_sent_score\"]) * 100) # .25\n\n if wght_rcnt_text == 0:\n wght_avg_text = 0.40 * (\n float(stock[\"avg_stock_sent_score\"]) * 100\n ) # .40 if nc\n wght_trifold = 0.25 * (\n float(stock[\"ovr_stock_trifold_rating\"]) * 100\n ) # .25 if nc\n else:\n wght_avg_text = 0.25 * (float(stock[\"avg_stock_sent_score\"]) * 100) # .25\n wght_trifold = 0.15 * (\n float(stock[\"ovr_stock_trifold_rating\"]) * 100\n ) # .15\n\n print(\"YR\" + (stock[\"yr_target\"]))\n print(\"CR\" + (stock[\"current_price\"]))\n\n if stock[\"yr_target\"] != 'N/A':\n if float(stock[\"yr_target\"]) == float(stock[\"current_price\"]):\n per = 100.0\n elif float(stock[\"yr_target\"]) > float(stock[\"current_price\"]):\n try:\n per = (\n abs(float(stock[\"yr_target\"]) - float(stock[\"current_price\"]))\n / float(stock[\"current_price\"])\n ) * 100.0\n except ZeroDivisionError:\n per = 0\n\n print(\"per\", per)\n if per >= 15:\n weight_yr_per = 20\n elif per >= 10:\n weight_yr_per = 15\n elif per >= 5:\n weight_yr_per = 10\n else:\n weight_yr_per = 7.5\n elif float(stock[\"yr_target\"]) < float(stock[\"current_price\"]):\n try:\n per = (\n abs(float(stock[\"yr_target\"]) - float(stock[\"current_price\"]))\n / float(stock[\"current_price\"])\n ) * 100.0\n except ZeroDivisionError:\n per = 0\n weight_yr_per = 5\n else:\n weight_yr_per = 10\n\n\n print(\"wght\", weight_yr_per)\n\n if stock[\"overall_stock_articles_feelings\"] == \"Positive\": # .10\n weight_feelings = 10\n elif stock[\"overall_stock_articles_feelings\"] == \"Neutral\":\n weight_feelings = 5\n elif stock[\"overall_stock_articles_feelings\"] == \"Negative\":\n weight_feelings = 0\n elif stock_sentiments == \"Undertermined\":\n weight_feelings = 0\n print(\"*!!* Stock Sentiment is Undetermined.\")\n else:\n pass\n\n volume = int(stock[\"volume\"].replace(\",\", \"\"))\n avg_volume = int(stock[\"avg_volume\"].replace(\",\", \"\"))\n\n if volume > avg_volume: # .05\n volume_wght = 5\n elif avg_volume > volume:\n volume_wght = 0\n elif volume == avg_volume:\n volume_wght = 3\n else:\n volume_wght = 2.5\n\n stock_well_being_prediction = (\n wght_rcnt_text\n + wght_avg_text\n + weight_feelings\n + wght_trifold\n + volume_wght\n + weight_yr_per\n )\n\n stock[\"stock_well_being_prediction\"] = stock_well_being_prediction\n # will need to finetune these calculations\n if stock_well_being_prediction < 15.5555 or stock_well_being_prediction < 0:\n stock[\"stock_well_being_prediction_feelings\"] = \"Poor Wellbeing\"\n if (\n stock_well_being_prediction > 15.5555\n and stock_well_being_prediction < 40.55555\n ):\n stock[\"stock_well_being_prediction_feelings\"] = \"Moderate Wellbeing\"\n elif (\n stock_well_being_prediction > 40.55555\n and stock_well_being_prediction < 65.555\n ):\n stock[\"stock_well_being_prediction_feelings\"] = \"Good Wellbeing\"\n elif (\n stock_well_being_prediction > 65.555\n and stock_well_being_prediction < 100.555\n ):\n stock[\"stock_well_being_prediction_feelings\"] = \"Extremely Good Wellbeing\"\n\n return scored_stocks\n\n\ndef predict_historical_stock_well_being():\n \"\"\"Given an input file of scored stocks over time, generate/predict the overall stock well being rating more accurately given more data.\"\"\"\n # to be implemented later\n","repo_name":"lussierc/StockStoryScraper","sub_path":"src/results_generator.py","file_name":"results_generator.py","file_ext":"py","file_size_in_byte":18080,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"}
+{"seq_id":"32459600831","text":"__author__ = 'Jos\\'user'\nimport twitter\nimport io\nimport json\nimport datetime\n\nfrom datetime import timedelta\nfrom flask import Flask, request, render_template\nfrom flask_googlemaps import GoogleMaps\nfrom flask_googlemaps import Map\n\n\n#Funcion para la conexion.\ndef oauth_login():\n CONSUMER_KEY = 'kvAbp1mrWFTvUtdwMZm2SbnGE'\n CONSUMER_SECRET = 'WqGXQIpOVKbjwP8FRWF4u7Xy3kc5kMkujuvEDT9fqZfBiykCLI'\n OAUTH_TOKEN = '7730092-BvcE6lKJs8455JE8hyEhYHKXHX5g9X05izuuU47qIX'\n OAUTH_TOKEN_SECRET = 'xGzotzBjBImJNDLAogP60jb3GVlRnp3M9jtp3QSFgJDAI'\n\n auth = twitter.oauth.OAuth(OAUTH_TOKEN, OAUTH_TOKEN_SECRET, CONSUMER_KEY, CONSUMER_SECRET)\n\n twitter_api = twitter.Twitter(auth=auth)\n return twitter_api\n\ndef geo(tw,ht):\n query = tw.search.tweets(q=('#'+ht),count=100)\n \n listado=[]\n \n for resultado in query[\"statuses\"]:\n # only process a result if it has a geolocation\n if resultado[\"place\"]:\n #(resultado[\"place\"][\"bounding_box\"][\"coordinates\"][0])\n momento = datetime.datetime.strptime(resultado[\"created_at\"], '%a %b %d %H:%M:%S +0000 %Y') + timedelta(hours=1)\n latitud = 0\n longitud = 0\n for e in resultado[\"place\"][\"bounding_box\"][\"coordinates\"][0]:\n latitud += e[0]\n longitud += e[1]\n latitud = latitud/len(resultado[\"place\"][\"bounding_box\"][\"coordinates\"][0])\n longitud = longitud/len(resultado[\"place\"][\"bounding_box\"][\"coordinates\"][0])\n \n momento = momento + datetime.timedelta(hours=1)\n listado.append({\"id\":resultado[\"id\"], \"lugar\" : resultado[\"place\"][\"full_name\"], \"momento\" : momento, \"latitud\" : latitud, \"longitud\" : longitud, \"usuario\":resultado[\"user\"]})\n \n return listado\n\ndef tagMethod(tag):\n\tlistado = geo(oauth_login(),tag)\n\tl={}\n\n\tfor e in listado:\n\t\tl.update({e['usuario']['profile_image_url']:[(e['longitud'],e['latitud'])]})\n\n\tmapa = Map(\n\t\tidentifier=\"view-side\",\n\t\tlat=40.3450396,\n\t\tlng=-3.6517684,\n\t\tzoom=6,\n\t\tmarkers=l,\n\t\tstyle=\"height:600px;width:800px;margin:0;\"\n\t)\n\n\treturn render_template('tag.html', mapa=mapa, tag=tag, listado=listado)\n\n\n\n\napp = Flask(__name__)\nGoogleMaps(app)\n\n@app.route('/')\ndef index():\n\treturn render_template('index.html')\n\n@app.route('/tag/')\ndef tag1(tag):\n\treturn tagMethod(tag)\n\t\n@app.route('/tag/', methods=['POST'])\ndef tag2():\n\treturn tagMethod(request.form['tag'])\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n\n\n","repo_name":"mortalswat/GeoWeb","sub_path":"geoweb.py","file_name":"geoweb.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"24721054644","text":"from binascii import hexlify\nimport getpass\nimport os\nimport socket\nimport warnings\nfrom errno import ECONNREFUSED, EHOSTUNREACH\n\nfrom paramiko.agent import Agent\nfrom paramiko.common import DEBUG\nfrom paramiko.config import SSH_PORT\nfrom paramiko.dsskey import DSSKey\nfrom paramiko.ecdsakey import ECDSAKey\nfrom paramiko.hostkeys import HostKeys\nfrom paramiko.py3compat import string_types\nfrom paramiko.resource import ResourceManager\nfrom paramiko.rsakey import RSAKey\nfrom paramiko.ssh_exception import (\n SSHException, BadHostKeyException, NoValidConnectionsError\n)\nfrom paramiko.transport import Transport\nfrom paramiko.util import retry_on_signal, ClosingContextManager\n\n\nclass SSHClient (ClosingContextManager):\n \"\"\"\n A high-level representation of a session with an SSH server. This class\n wraps `.Transport`, `.Channel`, and `.SFTPClient` to take care of most\n aspects of authenticating and opening channels. A typical use case is::\n\n client = SSHClient()\n client.load_system_host_keys()\n client.connect('ssh.example.com')\n stdin, stdout, stderr = client.exec_command('ls -l')\n\n You may pass in explicit overrides for authentication and server host key\n checking. The default mechanism is to try to use local key files or an\n SSH agent (if one is running).\n\n Instances of this class may be used as context managers.\n\n .. versionadded:: 1.6\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Create a new SSHClient.\n \"\"\"\n self._system_host_keys = HostKeys()\n self._host_keys = HostKeys()\n self._host_keys_filename = None\n self._log_channel = None\n self._policy = RejectPolicy()\n self._transport = None\n self._agent = None\n\n def load_system_host_keys(self, filename=None):\n \"\"\"\n Load host keys from a system (read-only) file. Host keys read with\n this method will not be saved back by `save_host_keys`.\n\n This method can be called multiple times. Each new set of host keys\n will be merged with the existing set (new replacing old if there are\n conflicts).\n\n If ``filename`` is left as ``None``, an attempt will be made to read\n keys from the user's local \"known hosts\" file, as used by OpenSSH,\n and no exception will be raised if the file can't be read. This is\n probably only useful on posix.\n\n :param str filename: the filename to read, or ``None``\n\n :raises IOError:\n if a filename was provided and the file could not be read\n \"\"\"\n if filename is None:\n # try the user's .ssh key file, and mask exceptions\n filename = os.path.expanduser('~/.ssh/known_hosts')\n try:\n self._system_host_keys.load(filename)\n except IOError:\n pass\n return\n self._system_host_keys.load(filename)\n\n def load_host_keys(self, filename):\n \"\"\"\n Load host keys from a local host-key file. Host keys read with this\n method will be checked after keys loaded via `load_system_host_keys`,\n but will be saved back by `save_host_keys` (so they can be modified).\n The missing host key policy `.AutoAddPolicy` adds keys to this set and\n saves them, when connecting to a previously-unknown server.\n\n This method can be called multiple times. Each new set of host keys\n will be merged with the existing set (new replacing old if there are\n conflicts). When automatically saving, the last hostname is used.\n\n :param str filename: the filename to read\n\n :raises IOError: if the filename could not be read\n \"\"\"\n self._host_keys_filename = filename\n self._host_keys.load(filename)\n\n def save_host_keys(self, filename):\n \"\"\"\n Save the host keys back to a file. Only the host keys loaded with\n `load_host_keys` (plus any added directly) will be saved -- not any\n host keys loaded with `load_system_host_keys`.\n\n :param str filename: the filename to save to\n\n :raises IOError: if the file could not be written\n \"\"\"\n\n # update local host keys from file (in case other SSH clients\n # have written to the known_hosts file meanwhile.\n if self._host_keys_filename is not None:\n self.load_host_keys(self._host_keys_filename)\n\n with open(filename, 'w') as f:\n for hostname, keys in self._host_keys.items():\n for keytype, key in keys.items():\n f.write('%s %s %s\\n' % (hostname, keytype, key.get_base64()))\n\n def get_host_keys(self):\n \"\"\"\n Get the local `.HostKeys` object. This can be used to examine the\n local host keys or change them.\n\n :return: the local host keys as a `.HostKeys` object.\n \"\"\"\n return self._host_keys\n\n def set_log_channel(self, name):\n \"\"\"\n Set the channel for logging. The default is ``\"paramiko.transport\"``\n but it can be set to anything you want.\n\n :param str name: new channel name for logging\n \"\"\"\n self._log_channel = name\n\n def set_missing_host_key_policy(self, policy):\n \"\"\"\n Set policy to use when connecting to servers without a known host key.\n\n Specifically:\n\n * A **policy** is an instance of a \"policy class\", namely some subclass\n of `.MissingHostKeyPolicy` such as `.RejectPolicy` (the default),\n `.AutoAddPolicy`, `.WarningPolicy`, or a user-created subclass.\n\n .. note::\n This method takes class **instances**, not **classes** themselves.\n Thus it must be called as e.g.\n ``.set_missing_host_key_policy(WarningPolicy())`` and *not*\n ``.set_missing_host_key_policy(WarningPolicy)``.\n\n * A host key is **known** when it appears in the client object's cached\n host keys structures (those manipulated by `load_system_host_keys`\n and/or `load_host_keys`).\n\n :param .MissingHostKeyPolicy policy:\n the policy to use when receiving a host key from a\n previously-unknown server\n \"\"\"\n self._policy = policy\n\n def _families_and_addresses(self, hostname, port):\n \"\"\"\n Yield pairs of address families and addresses to try for connecting.\n\n :param str hostname: the server to connect to\n :param int port: the server port to connect to\n :returns: Yields an iterable of ``(family, address)`` tuples\n \"\"\"\n guess = True\n addrinfos = socket.getaddrinfo(hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM)\n for (family, socktype, proto, canonname, sockaddr) in addrinfos:\n if socktype == socket.SOCK_STREAM:\n yield family, sockaddr\n guess = False\n\n # some OS like AIX don't indicate SOCK_STREAM support, so just guess. :(\n # We only do this if we did not get a single result marked as socktype == SOCK_STREAM.\n if guess:\n for family, _, _, _, sockaddr in addrinfos:\n yield family, sockaddr\n\n def connect(\n self,\n hostname,\n port=SSH_PORT,\n username=None,\n password=None,\n pkey=None,\n key_filename=None,\n timeout=None,\n allow_agent=True,\n look_for_keys=True,\n compress=False,\n sock=None,\n gss_auth=False,\n gss_kex=False,\n gss_deleg_creds=True,\n gss_host=None,\n banner_timeout=None\n ):\n \"\"\"\n Connect to an SSH server and authenticate to it. The server's host key\n is checked against the system host keys (see `load_system_host_keys`)\n and any local host keys (`load_host_keys`). If the server's hostname\n is not found in either set of host keys, the missing host key policy\n is used (see `set_missing_host_key_policy`). The default policy is\n to reject the key and raise an `.SSHException`.\n\n Authentication is attempted in the following order of priority:\n\n - The ``pkey`` or ``key_filename`` passed in (if any)\n - Any key we can find through an SSH agent\n - Any \"id_rsa\", \"id_dsa\" or \"id_ecdsa\" key discoverable in\n ``~/.ssh/``\n - Plain username/password auth, if a password was given\n\n If a private key requires a password to unlock it, and a password is\n passed in, that password will be used to attempt to unlock the key.\n\n :param str hostname: the server to connect to\n :param int port: the server port to connect to\n :param str username:\n the username to authenticate as (defaults to the current local\n username)\n :param str password:\n a password to use for authentication or for unlocking a private key\n :param .PKey pkey: an optional private key to use for authentication\n :param str key_filename:\n the filename, or list of filenames, of optional private key(s) to\n try for authentication\n :param float timeout:\n an optional timeout (in seconds) for the TCP connect\n :param bool allow_agent:\n set to False to disable connecting to the SSH agent\n :param bool look_for_keys:\n set to False to disable searching for discoverable private key\n files in ``~/.ssh/``\n :param bool compress: set to True to turn on compression\n :param socket sock:\n an open socket or socket-like object (such as a `.Channel`) to use\n for communication to the target host\n :param bool gss_auth:\n ``True`` if you want to use GSS-API authentication\n :param bool gss_kex:\n Perform GSS-API Key Exchange and user authentication\n :param bool gss_deleg_creds: Delegate GSS-API client credentials or not\n :param str gss_host:\n The targets name in the kerberos database. default: hostname\n :param float banner_timeout: an optional timeout (in seconds) to wait\n for the SSH banner to be presented.\n\n :raises BadHostKeyException: if the server's host key could not be\n verified\n :raises AuthenticationException: if authentication failed\n :raises SSHException: if there was any other error connecting or\n establishing an SSH session\n :raises socket.error: if a socket error occurred while connecting\n\n .. versionchanged:: 1.15\n Added the ``banner_timeout``, ``gss_auth``, ``gss_kex``,\n ``gss_deleg_creds`` and ``gss_host`` arguments.\n \"\"\"\n if not sock:\n errors = {}\n # Try multiple possible address families (e.g. IPv4 vs IPv6)\n to_try = list(self._families_and_addresses(hostname, port))\n for af, addr in to_try:\n try:\n sock = socket.socket(af, socket.SOCK_STREAM)\n if timeout is not None:\n try:\n sock.settimeout(timeout)\n except:\n pass\n retry_on_signal(lambda: sock.connect(addr))\n # Break out of the loop on success\n break\n except socket.error as e:\n # Raise anything that isn't a straight up connection error\n # (such as a resolution error)\n if e.errno not in (ECONNREFUSED, EHOSTUNREACH):\n raise\n # Capture anything else so we know how the run looks once\n # iteration is complete. Retain info about which attempt\n # this was.\n errors[addr] = e\n\n # Make sure we explode usefully if no address family attempts\n # succeeded. We've no way of knowing which error is the \"right\"\n # one, so we construct a hybrid exception containing all the real\n # ones, of a subclass that client code should still be watching for\n # (socket.error)\n if len(errors) == len(to_try):\n raise NoValidConnectionsError(errors)\n\n t = self._transport = Transport(sock, gss_kex=gss_kex, gss_deleg_creds=gss_deleg_creds)\n t.use_compression(compress=compress)\n if gss_kex and gss_host is None:\n t.set_gss_host(hostname)\n elif gss_kex and gss_host is not None:\n t.set_gss_host(gss_host)\n else:\n pass\n if self._log_channel is not None:\n t.set_log_channel(self._log_channel)\n if banner_timeout is not None:\n t.banner_timeout = banner_timeout\n t.start_client()\n ResourceManager.register(self, t)\n\n server_key = t.get_remote_server_key()\n keytype = server_key.get_name()\n\n if port == SSH_PORT:\n server_hostkey_name = hostname\n else:\n server_hostkey_name = \"[%s]:%d\" % (hostname, port)\n\n # If GSS-API Key Exchange is performed we are not required to check the\n # host key, because the host is authenticated via GSS-API / SSPI as\n # well as our client.\n if not self._transport.use_gss_kex:\n our_server_key = self._system_host_keys.get(server_hostkey_name,\n {}).get(keytype, None)\n if our_server_key is None:\n our_server_key = self._host_keys.get(server_hostkey_name,\n {}).get(keytype, None)\n if our_server_key is None:\n # will raise exception if the key is rejected; let that fall out\n self._policy.missing_host_key(self, server_hostkey_name,\n server_key)\n # if the callback returns, assume the key is ok\n our_server_key = server_key\n\n if server_key != our_server_key:\n raise BadHostKeyException(hostname, server_key, our_server_key)\n\n if username is None:\n username = getpass.getuser()\n\n if key_filename is None:\n key_filenames = []\n elif isinstance(key_filename, string_types):\n key_filenames = [key_filename]\n else:\n key_filenames = key_filename\n if gss_host is None:\n gss_host = hostname\n self._auth(username, password, pkey, key_filenames, allow_agent,\n look_for_keys, gss_auth, gss_kex, gss_deleg_creds, gss_host)\n\n def close(self):\n \"\"\"\n Close this SSHClient and its underlying `.Transport`.\n\n .. warning::\n Failure to do this may, in some situations, cause your Python\n interpreter to hang at shutdown (often due to race conditions).\n It's good practice to `close` your client objects anytime you're\n done using them, instead of relying on garbage collection.\n \"\"\"\n if self._transport is None:\n return\n self._transport.close()\n self._transport = None\n\n if self._agent is not None:\n self._agent.close()\n self._agent = None\n\n def exec_command(self, command, bufsize=-1, timeout=None, get_pty=False):\n \"\"\"\n Execute a command on the SSH server. A new `.Channel` is opened and\n the requested command is executed. The command's input and output\n streams are returned as Python ``file``-like objects representing\n stdin, stdout, and stderr.\n\n :param str command: the command to execute\n :param int bufsize:\n interpreted the same way as by the built-in ``file()`` function in\n Python\n :param int timeout:\n set command's channel timeout. See `Channel.settimeout`.settimeout\n :return:\n the stdin, stdout, and stderr of the executing command, as a\n 3-tuple\n\n :raises SSHException: if the server fails to execute the command\n \"\"\"\n chan = self._transport.open_session(timeout=timeout)\n if get_pty:\n chan.get_pty()\n chan.settimeout(timeout)\n chan.exec_command(command)\n stdin = chan.makefile('wb', bufsize)\n stdout = chan.makefile('r', bufsize)\n stderr = chan.makefile_stderr('r', bufsize)\n return stdin, stdout, stderr\n\n def invoke_shell(self, term='vt100', width=80, height=24, width_pixels=0,\n height_pixels=0):\n \"\"\"\n Start an interactive shell session on the SSH server. A new `.Channel`\n is opened and connected to a pseudo-terminal using the requested\n terminal type and size.\n\n :param str term:\n the terminal type to emulate (for example, ``\"vt100\"``)\n :param int width: the width (in characters) of the terminal window\n :param int height: the height (in characters) of the terminal window\n :param int width_pixels: the width (in pixels) of the terminal window\n :param int height_pixels: the height (in pixels) of the terminal window\n :return: a new `.Channel` connected to the remote shell\n\n :raises SSHException: if the server fails to invoke a shell\n \"\"\"\n chan = self._transport.open_session()\n chan.get_pty(term, width, height, width_pixels, height_pixels)\n chan.invoke_shell()\n return chan\n\n def open_sftp(self):\n \"\"\"\n Open an SFTP session on the SSH server.\n\n :return: a new `.SFTPClient` session object\n \"\"\"\n return self._transport.open_sftp_client()\n\n def get_transport(self):\n \"\"\"\n Return the underlying `.Transport` object for this SSH connection.\n This can be used to perform lower-level tasks, like opening specific\n kinds of channels.\n\n :return: the `.Transport` for this connection\n \"\"\"\n return self._transport\n\n def _auth(self, username, password, pkey, key_filenames, allow_agent,\n look_for_keys, gss_auth, gss_kex, gss_deleg_creds, gss_host):\n \"\"\"\n Try, in order:\n\n - The key passed in, if one was passed in.\n - Any key we can find through an SSH agent (if allowed).\n - Any \"id_rsa\", \"id_dsa\" or \"id_ecdsa\" key discoverable in ~/.ssh/\n (if allowed).\n - Plain username/password auth, if a password was given.\n\n (The password might be needed to unlock a private key, or for\n two-factor authentication [for which it is required].)\n \"\"\"\n saved_exception = None\n two_factor = False\n allowed_types = set()\n two_factor_types = set(['keyboard-interactive','password'])\n\n # If GSS-API support and GSS-PI Key Exchange was performed, we attempt\n # authentication with gssapi-keyex.\n if gss_kex and self._transport.gss_kex_used:\n try:\n self._transport.auth_gssapi_keyex(username)\n return\n except Exception as e:\n saved_exception = e\n\n # Try GSS-API authentication (gssapi-with-mic) only if GSS-API Key\n # Exchange is not performed, because if we use GSS-API for the key\n # exchange, there is already a fully established GSS-API context, so\n # why should we do that again?\n if gss_auth:\n try:\n self._transport.auth_gssapi_with_mic(username, gss_host,\n gss_deleg_creds)\n return\n except Exception as e:\n saved_exception = e\n\n if pkey is not None:\n try:\n self._log(DEBUG, 'Trying SSH key %s' % hexlify(pkey.get_fingerprint()))\n allowed_types = set(self._transport.auth_publickey(username, pkey))\n two_factor = (allowed_types & two_factor_types)\n if not two_factor:\n return\n except SSHException as e:\n saved_exception = e\n\n if not two_factor:\n for key_filename in key_filenames:\n for pkey_class in (RSAKey, DSSKey, ECDSAKey):\n try:\n key = pkey_class.from_private_key_file(key_filename, password)\n self._log(DEBUG, 'Trying key %s from %s' % (hexlify(key.get_fingerprint()), key_filename))\n allowed_types = set(self._transport.auth_publickey(username, key))\n two_factor = (allowed_types & two_factor_types)\n if not two_factor:\n return\n break\n except SSHException as e:\n saved_exception = e\n\n if not two_factor and allow_agent:\n if self._agent is None:\n self._agent = Agent()\n\n for key in self._agent.get_keys():\n try:\n self._log(DEBUG, 'Trying SSH agent key %s' % hexlify(key.get_fingerprint()))\n # for 2-factor auth a successfully auth'd key password will return an allowed 2fac auth method\n allowed_types = set(self._transport.auth_publickey(username, key))\n two_factor = (allowed_types & two_factor_types)\n if not two_factor:\n return\n break\n except SSHException as e:\n saved_exception = e\n\n if not two_factor:\n keyfiles = []\n rsa_key = os.path.expanduser('~/.ssh/id_rsa')\n dsa_key = os.path.expanduser('~/.ssh/id_dsa')\n ecdsa_key = os.path.expanduser('~/.ssh/id_ecdsa')\n if os.path.isfile(rsa_key):\n keyfiles.append((RSAKey, rsa_key))\n if os.path.isfile(dsa_key):\n keyfiles.append((DSSKey, dsa_key))\n if os.path.isfile(ecdsa_key):\n keyfiles.append((ECDSAKey, ecdsa_key))\n # look in ~/ssh/ for windows users:\n rsa_key = os.path.expanduser('~/ssh/id_rsa')\n dsa_key = os.path.expanduser('~/ssh/id_dsa')\n ecdsa_key = os.path.expanduser('~/ssh/id_ecdsa')\n if os.path.isfile(rsa_key):\n keyfiles.append((RSAKey, rsa_key))\n if os.path.isfile(dsa_key):\n keyfiles.append((DSSKey, dsa_key))\n if os.path.isfile(ecdsa_key):\n keyfiles.append((ECDSAKey, ecdsa_key))\n\n if not look_for_keys:\n keyfiles = []\n\n for pkey_class, filename in keyfiles:\n try:\n key = pkey_class.from_private_key_file(filename, password)\n self._log(DEBUG, 'Trying discovered key %s in %s' % (hexlify(key.get_fingerprint()), filename))\n # for 2-factor auth a successfully auth'd key will result in ['password']\n allowed_types = set(self._transport.auth_publickey(username, key))\n two_factor = (allowed_types & two_factor_types)\n if not two_factor:\n return\n break\n except (SSHException, IOError) as e:\n saved_exception = e\n\n if password is not None:\n try:\n self._transport.auth_password(username, password)\n return\n except SSHException as e:\n saved_exception = e\n elif two_factor:\n try:\n self._transport.auth_interactive_dumb(username)\n return\n except SSHException as e:\n saved_exception = e\n\n # if we got an auth-failed exception earlier, re-raise it\n if saved_exception is not None:\n raise saved_exception\n raise SSHException('No authentication methods available')\n\n def _log(self, level, msg):\n self._transport._log(level, msg)\n\n\nclass MissingHostKeyPolicy (object):\n \"\"\"\n Interface for defining the policy that `.SSHClient` should use when the\n SSH server's hostname is not in either the system host keys or the\n application's keys. Pre-made classes implement policies for automatically\n adding the key to the application's `.HostKeys` object (`.AutoAddPolicy`),\n and for automatically rejecting the key (`.RejectPolicy`).\n\n This function may be used to ask the user to verify the key, for example.\n \"\"\"\n\n def missing_host_key(self, client, hostname, key):\n \"\"\"\n Called when an `.SSHClient` receives a server key for a server that\n isn't in either the system or local `.HostKeys` object. To accept\n the key, simply return. To reject, raised an exception (which will\n be passed to the calling application).\n \"\"\"\n pass\n\n\nclass AutoAddPolicy (MissingHostKeyPolicy):\n \"\"\"\n Policy for automatically adding the hostname and new host key to the\n local `.HostKeys` object, and saving it. This is used by `.SSHClient`.\n \"\"\"\n\n def missing_host_key(self, client, hostname, key):\n client._host_keys.add(hostname, key.get_name(), key)\n if client._host_keys_filename is not None:\n client.save_host_keys(client._host_keys_filename)\n client._log(DEBUG, 'Adding %s host key for %s: %s' %\n (key.get_name(), hostname, hexlify(key.get_fingerprint())))\n\n\nclass RejectPolicy (MissingHostKeyPolicy):\n \"\"\"\n Policy for automatically rejecting the unknown hostname & key. This is\n used by `.SSHClient`.\n \"\"\"\n\n def missing_host_key(self, client, hostname, key):\n client._log(DEBUG, 'Rejecting %s host key for %s: %s' %\n (key.get_name(), hostname, hexlify(key.get_fingerprint())))\n raise SSHException('Server %r not found in known_hosts' % hostname)\n\n\nclass WarningPolicy (MissingHostKeyPolicy):\n \"\"\"\n Policy for logging a Python-style warning for an unknown host key, but\n accepting it. This is used by `.SSHClient`.\n \"\"\"\n def missing_host_key(self, client, hostname, key):\n warnings.warn('Unknown %s host key for %s: %s' %\n (key.get_name(), hostname, hexlify(key.get_fingerprint())))\n","repo_name":"Komodo/KomodoEdit","sub_path":"contrib/paramiko/paramiko/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":26334,"program_lang":"python","lang":"en","doc_type":"code","stars":2110,"dataset":"github-code","pt":"2"}
+{"seq_id":"40225039211","text":"from bs4 import BeautifulSoup\nimport requests\nimport json\nfrom os.path import exists\n\ndef get_player_images():\n url = \"https://www.nba.com/players\"\n\n r = requests.get(url)\n data = r.text\n players_string = data[data.index('\"players\":[')+10:data.index(',\"region\":\"united-states\"}')]\n players_list = json.loads(players_string)\n\n missing_players = []\n failed_players = []\n for player in players_list:\n player_slug = player['PLAYER_SLUG']\n print(player_slug)\n player_file_path = f\"./player_images/{player_slug}.png\"\n\n if not exists(player_file_path):\n player_url = f\"https://www.nba.com/player/{player['PERSON_ID']}/{player_slug}\"\n\n r = requests.get(player_url)\n data = r.text\n soup = BeautifulSoup(data,'html.parser')\n headshot_img = soup.find('img', {\"alt\": f\"{player['PLAYER_FIRST_NAME']} {player['PLAYER_LAST_NAME']} Headshot\"})\n if headshot_img:\n headshot_img_src = headshot_img.get(\"src\")\n else:\n missing_players.append(player_slug)\n\n img_response = requests.get(headshot_img_src)\n if \"AccessDenied\" not in str(img_response.content):\n with open(f\"./images/players/{player_slug}.png\", \"wb\") as f:\n f.write(img_response.content)\n else:\n failed_players.append(player_slug)\n\n print(\"Finished!\")\n\n if len(missing_players) > 0:\n print(\"\\nMissing Headshots:\")\n for p in missing_players:\n print(p)\n\n if len(failed_players) > 0:\n print(\"\\nFailed Headshots:\")\n for p in failed_players:\n print(p)\n","repo_name":"scottgoodell/nba-shot-plots","sub_path":"scripts/scrape_headshots.py","file_name":"scrape_headshots.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"31171706923","text":"import os\nimport cv2\nimport numpy as np\nimport math\nimport datetime\n\nnow = datetime.datetime.now()\nDate = now.strftime(\"%d_%m_%Y\")\n\ndef get_image_path(image_name: str):\n now = datetime.datetime.now()\n Date = now.strftime(\"%d_%m_%Y\")\n\n image_path = f'Скриншоты/{Date}' + '/' + image_name\n\n return image_path\n\n\ndef get_image_colors(image_name: str):\n image_path = get_image_path(image_name)\n image = cv2.imread(image_path)\n\n image_colors = []\n\n for i in (1, 2, 3, 4):\n gbr_colors = image[image.shape[0] // i - 1, image.shape[1] // i - 1]\n image_colors.append(gbr_colors[::-1])\n\n return image_colors\n\n\ndef calculate_color_differences_percent(first_color: list, second_color: list):\n maximum_difference = math.sqrt(3 * 256 ** 2)\n\n color_difference = math.sqrt((first_color[0] - second_color[0]) ** 2 + (first_color[1] - second_color[1]) ** 2 + (\n first_color[2] - second_color[2]) ** 2)\n\n color_difference_percent = color_difference / maximum_difference * 100\n\n return color_difference_percent\n\n\ndef check_colors(image_colors: list, zone_colors: list):\n count = 0\n for i in image_colors:\n for j in zone_colors:\n if calculate_color_differences_percent(i, j) < 6.00:\n count += 1\n\n if count >= 2:\n return True\n else:\n return False\n\n\ndef get_zone_name(image_colors: list):\n orange_zone_colors = [[250, 223, 186], [204, 200, 182], [223, 213, 149]]\n red_zone_colors = [[196, 177, 190], [242, 200, 194], [215, 190, 156]]\n\n if check_colors(image_colors, orange_zone_colors):\n return 'В зоне ДНР/ЛНР'\n elif check_colors(image_colors, red_zone_colors):\n return 'В зоне ДНР/ЛНР'\n else:\n return 'Не в зоне ДНР/ЛНР'\n\n\n# result_column = {}\n# image_names = os.listdir(f'Скриншоты/{Date}')\n#\n# for image_name in image_names:\n# image_colors = get_image_colors(image_name)\n# zone_name = get_zone_name(image_colors)\n# result_column[image_name.lower()[:-4]] = zone_name\n#\n# print(result_column)\n\n\ndef get_territory_status(key_words_find: list, result_column: dict):\n territory_status = []\n for i in key_words_find:\n if i == 0:\n territory_status.append(0)\n else:\n row_list = i.split(', ')\n res_list = []\n for j in row_list:\n try:\n result_column[j]\n except KeyError:\n pass\n else:\n res_list.append(result_column[j])\n\n territory_status.append(res_list)\n\n return territory_status\n\n","repo_name":"tishenko1234/Check_territory","sub_path":"Zones.py","file_name":"Zones.py","file_ext":"py","file_size_in_byte":2660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"26851271030","text":"import re\nfrom knothash import hash\n\n\ndef fill(grid, x, y, region):\n grid[x][y] = region\n if x > 0 and grid[x - 1][y] == \"1\":\n fill(grid, x - 1, y, region)\n if x < len(grid) - 1 and grid[x + 1][y] == \"1\":\n fill(grid, x + 1, y, region)\n if y > 0 and grid[x][y - 1] == \"1\":\n fill(grid, x, y - 1, region)\n if y < len(grid) - 1 and grid[x][y + 1] == \"1\":\n fill(grid, x, y + 1, region)\n\n\ndef count_regions(grid):\n region = 2\n for i, row in enumerate(grid):\n for j, c in enumerate(row):\n if c == \"1\":\n fill(grid, i, j, str(region))\n region += 1\n return region - 2\n\n\ndef go(inp):\n rows = []\n for i in range(128):\n rows.append(hash(f\"{inp}-{i}\"))\n grid = []\n n = 0\n for row in rows:\n binary = f\"{int(row,16):0128b}\"\n grid.append(list(binary))\n n += len(re.findall(\"1\", binary))\n print(n)\n\n print(count_regions(grid))\n\n\nif __name__ == \"__main__\":\n inp = \"flqrgnkx\"\n inp = \"uugsqrei\"\n go(inp)\n","repo_name":"llimllib/personal_code","sub_path":"misc/advent/2017/14/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"2"}
+{"seq_id":"9849042554","text":"import copy\nimport datetime as DT\nimport ipaddress\nfrom typing import Dict, List, Optional, Set, Tuple, Union\n\nfrom fixpoint_coding_test.Server import Server, csv_to_params\n\n\nclass Network:\n \"\"\"\n 同一ネットワークサブネット内のサーバーをまとめて管理する\n \"\"\"\n\n subnet_ipaddress: ipaddress.IPv4Network\n servers: List[Server]\n\n def __init__(self, subnet_ipaddress: ipaddress.IPv4Network) -> None:\n self.subnet_ipaddress: ipaddress.IPv4Network = subnet_ipaddress\n self.servers: List[Server] = []\n\n def add_server(self, server: Server) -> None:\n \"\"\"\n ネットワークにサーバーを追加する\n\n Parameters\n ----------\n server : Server\n ネットワークに登録する`Server`インスタンス\n\n Raises\n ------\n ValueError\n 追加するサーバーがネットワークに所属していない\n \"\"\"\n\n if not (self.is_inside_network_ip(server=server)):\n raise ValueError(\"This server is not this network's subset.\")\n self.servers.append(server)\n\n def is_inside_network_ip(self, server: Server) -> bool:\n \"\"\"\n ネットワークにサーバーが所属可能か検査する\n\n Parameters\n ----------\n server : Server\n 確認を行う`Server`インスタンス\n\n Returns\n -------\n bool\n サブネットの範囲内であれば`True`\n \"\"\"\n\n return server.ip_address in self.subnet_ipaddress\n\n def get_network_downtime(self, continuous: int = 3) -> List[Tuple[DT.datetime, Optional[DT.datetime]]]:\n \"\"\"\n ネットワークがダウンしている期間の開始日時と終了日時を取得する。\n ネットワークがダウンしている期間は、全てのサーバーがダウンしている最短期間となる。\n\n Parameters\n ----------\n continuous : int, default = 3\n サーバーがダウンしていることを判定するために、何度連続で応答が無いかを決定する閾値。\n\n Returns\n -------\n List[Tuple[DT.datetime, Optional[DT.datetime]]]\n ネットワークがダウンしている開始日時と終了日時のペア。ログの末尾までダウンしている場合、終了日時は`None`となる。\n\n Raises\n ------\n ValueError\n `continuous` が 0以下に指定された\n\n\n Example1\n --------\n 出力は、実際には`datetime`オブジェクト(もしくは`None`)である点に注意。\\n\n server A -> 2020-10-13 10:00:00 ~ 2020-10-13 10:45:00\\n\n server B -> 2020-10-13 10:15:00 ~ 2020-10-13 11:00:00\\n\n Result: [\"2020-10-13 10:15:00\", 2020-10-13 10:45:00]\n\n Example2\n --------\n 出力は、実際には`datetime`オブジェクト(もしくは`None`)である点に注意。\\n\n server A -> 2020-10-13 10:00:00 ~ None\\n\n server B -> 2020-10-13 10:15:00 ~ None\\n\n Result: [\"2020-10-13 10:15:00\", None]\n \"\"\"\n\n if not (continuous > 0):\n raise ValueError(f\"continuous must over 0 (now {continuous})\")\n\n # ネットワーク内の全てのサーバーのダウンタイムを取得する\n downtimes: Dict[ipaddress.IPv4Interface, List[Tuple[DT.datetime, Optional[DT.datetime]]]] = {\n server.ip_address: server.get_downtimes(continuous=continuous) for server in self.servers\n }\n\n results_set: Set[Tuple[DT.datetime, Optional[DT.datetime]]] = set()\n\n for ip_address, _tmp_downtimes in downtimes.items():\n for outer_start, outer_end in _tmp_downtimes:\n _inside_results = False\n for inner_start, inner_end in results_set:\n # 既存のリザルトにデータが存在している場合、スキップ\n if is_overlap_time(outer_start, outer_end, inner_start, inner_end):\n _inside_results = True\n break\n if _inside_results:\n continue\n\n for server in self.servers:\n if server.ip_address is ip_address:\n continue # ��身に対してはスキップする\n\n for inner_start, inner_end in downtimes[server.ip_address]:\n if is_overlap_time(outer_start, outer_end, inner_start, inner_end):\n # 開始時刻の更新: 遅い方に変更する\n outer_start = max(outer_start, inner_start)\n\n # 終了時刻の更新\n if inner_end is None:\n # inner_endがNoneな場合 -> outer_endを残す = Do nothing.\n pass\n elif outer_end is None:\n # outer_endがNoneな場合 -> inner_endを残す\n outer_end = inner_end\n else:\n # どちらでもないときは終了時刻を早い方に変更\n outer_end = min(outer_end, inner_end)\n\n # 有効なペアが発見できているため、同一サーバ内の追加確認をスキップ\n break # breakするとfor文のelse処理に到達せずにおわる\n else:\n # 全てのパターンで条件に当てはまらなかったため、外側のペアを進める\n break\n else:\n # パターンが見つかったため、resultsに登録\n results_set.add((outer_start, outer_end))\n\n return sorted(list(results_set))\n\n\ndef is_overlap_time(\n start1: DT.datetime, end1: Optional[DT.datetime], start2: DT.datetime, end2: Optional[DT.datetime]\n) -> bool:\n \"\"\"\n 2つの時刻の範囲が重複しているか判定する。\n \"\"\"\n flag_s1_e2 = end2 is None or start1 <= end2\n flag_e1_s2 = end1 is None or end1 >= start2\n\n return flag_s1_e2 and flag_e1_s2\n\n\ndef load_data(file_path: str, networks: List[Network] = []) -> List[Network]:\n \"\"\"\n ログデータからネットワーク切り分けの行われたサーバーデータを生成する\n\n Parameters\n ----------\n file_path : str\n ログデータのファイルパス\n networks : List[Network], optional\n 既存のデータがある場合のみ指定。\n 追記形式でデータを読み込む\n\n Returns\n -------\n List[Network]\n IPアドレスで切り分けられたネットワークリスト\n \"\"\"\n\n _networks = copy.copy(networks)\n with open(file_path) as f:\n _ = f.readline() # ファイルの先頭は説明文なので読み飛ばす\n for line in f.readlines():\n line_strip = line.strip()\n if len(line_strip) == 0: # 入力が空の場合は処理をスキップ\n continue\n datetime, ip_address, response_msec = csv_to_params(line_strip)\n _tmp_ip = ipaddress.IPv4Interface(ip_address)\n for network in _networks:\n if network.subnet_ipaddress == _tmp_ip.network:\n # 既存ネットワーク上にデータを記録する\n for server in network.servers:\n if server.ip_address == _tmp_ip:\n # 既存のサーバに記録する\n server.append_ping_results(datetime_str=datetime, response_msec=response_msec)\n break\n else:\n # 新規サーバーに記録する\n server = Server(ip_address=ip_address)\n server.append_ping_results(datetime_str=datetime, response_msec=response_msec)\n network.add_server(server=server)\n break\n else:\n # 新規ネットワーク & 新規サーバに記録する\n server = Server(ip_address=ip_address)\n server.append_ping_results(datetime_str=datetime, response_msec=response_msec)\n\n network = Network(_tmp_ip.network)\n network.add_server(server=server)\n _networks.append(network)\n\n return _networks\n\n\ndef print_networks_error(\n networks: List[Network],\n continuous: int = 3,\n with_server_timeout: bool = True,\n with_server_overload: bool = True,\n time_threshold: int = 100,\n) -> None:\n \"\"\"ネットワーク内のエラー情報を含めたサーバーエラー情報を出力する\n\n Parameters\n ----------\n networks : List[Network]\n 表示するネットワークリスト\n continuous : int, default = 3\n ダウン/過負荷状態と判定するために、何応答分まとめて処理を行うかの指定。\n with_server_timeout : bool, default = True\n サーバータイムアウト情報を同時に出力するかどうかを指定する\n with_server_overload : bool, default = True\n サーバー過負荷情報を同時に出力するかどうかを指定する\n time_threshold : int, default = 100\n 過負荷状態と判定するための応答時間閾値\n\n Raises\n ------\n ValueError\n 入力値の入力範囲外の値が入力された\n \"\"\"\n\n if not (continuous > 0):\n raise ValueError(f\"continuous must over 0 (now {continuous})\")\n if not (time_threshold > 0):\n raise ValueError(f\"time_threshold must over 0 (now {time_threshold})\")\n\n def _add_label(\n time_pair: Tuple[DT.datetime, Optional[DT.datetime]],\n address: Union[ipaddress.IPv4Interface, ipaddress.IPv4Network],\n label: str,\n ) -> Tuple[DT.datetime, Optional[DT.datetime], Union[ipaddress.IPv4Interface, ipaddress.IPv4Network], str]:\n return (time_pair[0], time_pair[1], address, label)\n\n SWITCH_DOWN_LABEL = \"switch down\"\n DOWNTIME_LABEL = \"server down\"\n OVERLOAD_LABEL = \"server overload\"\n\n for network in networks:\n network_downtime_list_pre = network.get_network_downtime(continuous=continuous)\n network_downtime_list = [\n _add_label(data, network.subnet_ipaddress, SWITCH_DOWN_LABEL) for data in network_downtime_list_pre\n ]\n\n downtime_list = []\n overload_list = []\n for server in network.servers:\n if with_server_timeout:\n downtime_list_pre = server.get_downtimes(continuous=continuous)\n downtime_list.extend(\n [_add_label(data, server.ip_address, DOWNTIME_LABEL) for data in downtime_list_pre]\n )\n if with_server_overload:\n overload_list_pre = server.get_overload_times(continuous=continuous, time_threshold=time_threshold)\n overload_list.extend(\n [_add_label(data, server.ip_address, OVERLOAD_LABEL) for data in overload_list_pre]\n )\n\n errors_list = sorted(network_downtime_list + downtime_list + overload_list, key=lambda x: x[0:2])\n if len(errors_list) != 0:\n print(f\"{network.subnet_ipaddress}\", \"has error\" if len(network_downtime_list) != 0 else \"summary\")\n for start, end, address, label in errors_list:\n print(f\" {address} {label} {start} ~ {end if end is not None else ''}\")\n else:\n print(f\"{network.subnet_ipaddress} has no error\")\n","repo_name":"Hansyo/fixpoint-coding-test","sub_path":"fixpoint_coding_test/Network.py","file_name":"Network.py","file_ext":"py","file_size_in_byte":11690,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"3451458591","text":"import csv\nimport random\nfrom pathlib import Path\n\nfrom data.model import Record\nfrom data.dataset import Dataset\nimport configparser\n\nfrom model.model import NeuralNetwork\nimport torch\nfrom torch.utils.data import DataLoader\n\nparent = Path(__file__).parent\n\nconfig = configparser.ConfigParser()\nconfig.read(str(parent) + '/hyperparameters.ino')\nconfig = config[\"DEFAULT\"]\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nprint('Using {} device'.format(device))\n\n\ndef binary_acc(y_pred, y_test):\n y_pred_tag = torch.round(torch.sigmoid(y_pred))\n\n correct_results_sum = (y_pred_tag == y_test).sum().float()\n acc = correct_results_sum / y_test.shape[0]\n acc = torch.round(acc * 100)\n\n return acc\n\n\n# https://www.kaggle.com/fedesoriano/heart-failure-prediction\nif __name__ == '__main__':\n dataset = []\n for key in config:\n print(key)\n t = config['TEST_PERCENTAGE_SPLIT']\n\n with open(str(parent) + '/data/heart.csv', 'r', newline='') as csvfile:\n linereader = csv.reader(csvfile, delimiter=' ')\n for row in linereader:\n row = row[0].split(',')\n dataset.append(row)\n processed = []\n for rec in dataset[1:]:\n r = Record()\n r.process_record(rec)\n processed.append(r)\n feature_vector = []\n targets = []\n for rec in processed:\n feature_vector.append(rec.get_feature_vector())\n targets.append(rec.get_target())\n\n random.shuffle(processed)\n test_end = round(len(processed) / (1 / float(config['TEST_PERCENTAGE_SPLIT'])))\n test_x = feature_vector[:test_end]\n test_y = targets[:test_end]\n train_x = feature_vector[test_end:]\n train_y = targets[test_end:]\n print(\"Data Split --- Test: {} \\t Train: {}\".format(len(test_x), len(train_x)))\n\n train_set = Dataset(train_x, train_y)\n test_set = Dataset(test_x, test_y)\n\n net = NeuralNetwork(len(test_x[0]), 1)\n train_loader = DataLoader(train_set, batch_size=int(config['BATCH_SIZE']))\n test_loader = DataLoader(test_set, batch_size=len(test_set))\n # train_features, train_labels = iter(train_loader)\n opt = torch.optim.Adam(net.parameters(), lr=float(config['LEARNING_RATE']))\n loss_fn = torch.nn.BCEWithLogitsLoss()\n accuracy = []\n net.train()\n for epoch in range(int(config['MAX_EPOCHS'])):\n train_acc = []\n test_acc = []\n for batch, (local_batch, local_labels) in enumerate(train_loader):\n # Transfer to GPU\n local_batch, local_labels = local_batch.to(device), local_labels.to(device)\n\n preds = net(local_batch)\n local_labels = local_labels.unsqueeze(1)\n loss = loss_fn(preds, local_labels.float())\n\n t_acc = binary_acc(preds, local_labels.float())\n\n opt.zero_grad()\n loss.backward()\n opt.step()\n\n train_acc.append(t_acc)\n print(\"Epoch: {}, Batch: {}, Train accuracy: {}%\".format(epoch, batch, train_acc[-1]))\n print(\"Epoch: {}, Batch: {}, Train loss: {}\".format(epoch, batch, loss.item()))\n\n with torch.no_grad():\n test_batch = []\n test_labels = []\n for b, l in test_loader:\n test_batch = b\n test_labels = l\n test_batch, test_labels = test_batch.to(device), test_labels.to(device)\n test_labels = test_labels.unsqueeze(1)\n preds = net(test_batch)\n test_acc.append(binary_acc(preds, test_labels))\n print(\"Epoch: {}, Test accuracy: {}%\".format(epoch, test_acc[-1]))\n test_loss = loss_fn(preds, test_labels.float())\n print(\"Epoch: {}, Test loss: {}\".format(epoch, test_loss.item()))\n print(\"Epoch {}\".format(epoch, batch))\n\n print(\"EXIT\")\n","repo_name":"DiarmuidKelly/predictors","sub_path":"heart_disease/heart_disease.py","file_name":"heart_disease.py","file_ext":"py","file_size_in_byte":3990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"33399807386","text":"\"\"\"Classes and functions for handling a PDB component library.\"\"\"\n\nimport os, os.path\n\nfrom pCore import AttributableObject , \\\n logFile , \\\n LogFileActive , \\\n YAMLMappingFile_FromObject , \\\n YAMLMappingFile_ToObject , \\\n YAMLPickleFileExtension\nfrom pMolecule import BondType\nfrom .PDBComponent import PDBComponent , \\\n PDBComponentAtom , \\\n PDBComponentBond , \\\n PDBComponentLink , \\\n PDBComponentVariant\nfrom .PDBComponentCIFFileReader import PDBComponentCIFFileReader\n\n#===================================================================================================================================\n# . Parameters.\n#===================================================================================================================================\n# . Amino acids - note UNK is treated as an amino acid in the PDB standard!\n_AminoAcids = [ \"ALA\", \"ARG\", \"ASN\", \"ASP\", \"CYS\", \"GLN\", \"GLU\", \"GLY\", \"HIS\", \"ILE\", \"LEU\", \"LYS\", \"MET\", \"PHE\", \"PRO\", \"SER\", \"THR\", \"TRP\", \"TYR\", \"UNK\", \"VAL\" ]\n\n# . Default variants for the amino acids.\n# . All amino acids appear to be fully protonated in the cif file. This means ARG and LYS do not need defaults.\n_DefaultVariants = { \"ASP\" : \"Deprotonated\", \"GLU\" : \"Deprotonated\", \"HIS\" : \"Delta Protonated\" }\n\n# . Reduced set of components to save in the default distribution - amino acids, water and some counterions.\n_ReducedComponentSet = set ( _AminoAcids + [ \"HOH\", \"CL\", \"K\", \"NA\" ] )\n\n# . Paths.\n_ComponentPath = \"components\"\n_DefaultCIFFileName = \"components.cif\"\n_LibraryPath = \"pdbComponents\"\n_LinkPath = \"links\"\n_VariantPath = \"variants\"\n\n#===================================================================================================================================\n# . Class.\n#===================================================================================================================================\nclass PDBComponentLibrary ( AttributableObject ):\n \"\"\"Library of PDB components, links and variants.\"\"\"\n\n _attributable = dict ( AttributableObject._attributable )\n _attributable.update ( { \"componentPaths\" : None ,\n \"components\" : None ,\n \"isSetup\" : False ,\n \"libraryPaths\" : None ,\n \"linkPaths\" : None ,\n \"links\" : None ,\n \"paths\" : None ,\n \"readOnly\" : True ,\n \"variantPaths\" : None ,\n \"variants\" : None } )\n\n def _CheckOptions ( self ):\n \"\"\"Check options.\"\"\"\n super ( PDBComponentLibrary, self )._CheckOptions ( )\n self.DefineLibraryPaths ( self.paths )\n self.SetupPaths ( makePaths = not self.readOnly )\n self.ResetCache ( )\n\n def AddItems ( self, items, libraryPath = None ):\n \"\"\"Add items to the library.\"\"\"\n # . Find library path.\n if libraryPath is None:\n libraryPath = self.libraryPaths[0]\n # . Loop over items.\n for item in items:\n # . Identify item type and path.\n if isinstance ( item, PDBComponent ): path = os.path.join ( libraryPath, _ComponentPath )\n elif isinstance ( item, PDBComponentLink ): path = os.path.join ( libraryPath, _LinkPath )\n elif isinstance ( item, PDBComponentVariant ): path = os.path.join ( libraryPath, _VariantPath )\n else: raise TypeError ( \"Invalid PDB component, link or variant.\" )\n # . Save the item.\n YAMLMappingFile_FromObject ( os.path.join ( path, item.key + YAMLPickleFileExtension ), \"!\" + item.__class__.__name__, item )\n\n def DefineLibraryPaths ( self, paths ):\n \"\"\"Define the library paths.\"\"\"\n if paths is None: self.libraryPaths = [ self.MakeStandardLibraryPath ( ) ]\n else: self.libraryPaths = paths\n\n def GetComponent ( self, componentLabel, missingItems = None ):\n \"\"\"Get a component.\"\"\"\n key = PDBComponent.MakeKey ( componentLabel )\n if key not in self.components:\n for componentPath in self.componentPaths:\n fileName = os.path.join ( componentPath, key + YAMLPickleFileExtension )\n if os.path.exists ( fileName ):\n self.components[key] = YAMLMappingFile_ToObject ( fileName, PDBComponent )\n break\n component = self.components.get ( key, None )\n if ( component is None ) and ( missingItems is not None ): missingItems.add ( ( \"Component\", key ) )\n return component\n\n def GetLink ( self, linkLabel, leftComponentLabel, rightComponentLabel, missingItems = None ):\n \"\"\"Get a link.\"\"\"\n keys = PDBComponentLink.MakeKeys ( linkLabel, leftComponentLabel, rightComponentLabel )\n key = keys[0]\n if key not in self.links:\n found = False\n for tag in keys:\n if found: break\n for linkPath in self.linkPaths:\n fileName = os.path.join ( linkPath, tag + YAMLPickleFileExtension )\n if os.path.exists ( fileName ):\n self.links[key] = YAMLMappingFile_ToObject ( fileName, PDBComponentLink )\n found = True\n break\n link = self.links.get ( key, None )\n if ( link is None ) and ( missingItems is not None ): missingItems.add ( ( \"Link\", key ) )\n return link\n\n def GetVariant ( self, variantLabel, componentLabel, missingItems = None ):\n \"\"\"Get a variant.\"\"\"\n keys = PDBComponentVariant.MakeKeys ( variantLabel, componentLabel )\n key = keys[0]\n if key not in self.variants:\n found = False\n for tag in keys:\n if found: break\n for variantPath in self.variantPaths:\n fileName = os.path.join ( variantPath, tag + YAMLPickleFileExtension )\n if os.path.exists ( fileName ):\n self.variants[key] = YAMLMappingFile_ToObject ( fileName, PDBComponentVariant )\n found = True\n break\n variant = self.variants.get ( key, None )\n if ( variant is None ) and ( missingItems is not None ): missingItems.add ( ( \"Variant\", key ) )\n return variant\n\n def MakeStandardLibraryPath ( self ):\n \"\"\"Make the standard library path.\"\"\"\n return os.path.join ( os.getenv ( \"PDYNAMO3_PARAMETERS\" ), _LibraryPath )\n\n def ResetCache ( self ):\n \"\"\"Clear stored components, links and variants.\"\"\"\n self.components = {}\n self.links = {}\n self.variants = {}\n\n def SetupPaths ( self, makePaths = False ):\n \"\"\"Set up all paths.\"\"\"\n # . Define paths.\n paths = list ( self.libraryPaths )\n for ( attribute, tailPath ) in ( ( \"componentPaths\", _ComponentPath ) ,\n ( \"linkPaths\" , _LinkPath ) ,\n ( \"variantPaths\" , _VariantPath ) ):\n localPaths = []\n for rootPath in self.libraryPaths:\n path = os.path.join ( rootPath, tailPath )\n localPaths.append ( path )\n paths.extend ( localPaths )\n setattr ( self, attribute, localPaths )\n # . Make paths.\n if makePaths:\n for path in paths:\n if not os.path.exists ( path ): os.mkdir ( path )\n\n#===================================================================================================================================\n# . Functions for making the default library.\n#===================================================================================================================================\ndef _MakeDefaultLinks ( ):\n \"\"\"Make links for the default distribution.\"\"\"\n # . Initialization.\n links = []\n # . Disulfide bridge.\n variant = PDBComponentVariant.WithOptions ( atomsToDelete = [ \"HG\" ], componentLabel = \"CYS\" )\n links.append ( PDBComponentLink.WithOptions ( label = \"Disulfide Bridge\", leftAtomLabel = \"SG\", leftVariant = variant, rightAtomLabel = \"SG\", rightVariant = variant, bondType = BondType.Single ) )\n # . Peptide bonds.\n # . The left variant is always the same.\n leftVariant = PDBComponentVariant.WithOptions ( atomsToDelete = [ \"HXT\", \"OXT\" ] )\n # . General case, PRO and UNK.\n rightVariant = PDBComponentVariant.WithOptions ( atomsToDelete = [ \"H2\" ] )\n links.append ( PDBComponentLink.WithOptions ( label = \"Peptide\", leftAtomLabel = \"C\", leftVariant = leftVariant, rightAtomLabel = \"N\", rightVariant = rightVariant, bondType = BondType.Single ) )\n rightVariant = PDBComponentVariant.WithOptions ( atomsToDelete = [ \"H\" ], componentLabel = \"PRO\" )\n links.append ( PDBComponentLink.WithOptions ( label = \"Peptide\", leftAtomLabel = \"C\", leftVariant = leftVariant, rightAtomLabel = \"N\", rightVariant = rightVariant, bondType = BondType.Single ) )\n rightVariant = PDBComponentVariant.WithOptions ( atomsToDelete = [ \"H2\" ], componentLabel = \"UNK\" )\n links.append ( PDBComponentLink.WithOptions ( label = \"Peptide\", leftAtomLabel = \"C\", leftVariant = leftVariant, rightAtomLabel = \"N\", rightVariant = rightVariant, bondType = BondType.Single ) )\n # . Finish up.\n return links\n\ndef _MakeDefaultVariants ( ):\n \"\"\"Make variants for the default distribution.\"\"\"\n # . Initialization.\n variants = []\n # . CTerminal.\n variants.append ( PDBComponentVariant.WithOptions ( label = \"C Terminal\", atomsToDelete = [ \"HXT\" ], formalCharges = { \"OXT\" : -1 } ) )\n # . NTerminal - general case and UNK.\n atomsToAdd = [ PDBComponentAtom.WithOptions ( atomicNumber = 1, label = \"H1\", pdbAlign = 0, toFollow = \"N\" ),\n PDBComponentAtom.WithOptions ( atomicNumber = 1, label = \"H2\", pdbAlign = 0, toFollow = \"H1\" ),\n PDBComponentAtom.WithOptions ( atomicNumber = 1, label = \"H3\", pdbAlign = 0, toFollow = \"H2\" ) ]\n bondsToAdd = [ PDBComponentBond.WithOptions ( atomLabel1 = \"H1\", atomLabel2 = \"N\", bondType = BondType.Single ),\n PDBComponentBond.WithOptions ( atomLabel1 = \"H2\", atomLabel2 = \"N\", bondType = BondType.Single ),\n PDBComponentBond.WithOptions ( atomLabel1 = \"H3\", atomLabel2 = \"N\", bondType = BondType.Single ) ]\n variants.append ( PDBComponentVariant.WithOptions ( label = \"N Terminal\", atomsToAdd = atomsToAdd, atomsToDelete = [ \"H\", \"H2\" ], formalCharges = { \"N\" : +1 }, bondsToAdd = bondsToAdd ) )\n variants.append ( PDBComponentVariant.WithOptions ( componentLabel = \"UNK\", label = \"N Terminal\", atomsToAdd = atomsToAdd, atomsToDelete = [ \"H\", \"H2\" ], formalCharges = { \"N\" : +1 }, bondsToAdd = bondsToAdd ) )\n # . NTerminal - PRO.\n atomsToAdd = [ PDBComponentAtom.WithOptions ( atomicNumber = 1, label = \"H2\", pdbAlign = 0, toFollow = \"N\" ),\n PDBComponentAtom.WithOptions ( atomicNumber = 1, label = \"H3\", pdbAlign = 0, toFollow = \"H2\" ) ]\n bondsToAdd = [ PDBComponentBond.WithOptions ( atomLabel1 = \"H2\", atomLabel2 = \"N\", bondType = BondType.Single ),\n PDBComponentBond.WithOptions ( atomLabel1 = \"H3\", atomLabel2 = \"N\", bondType = BondType.Single ) ]\n variants.append ( PDBComponentVariant.WithOptions ( componentLabel = \"PRO\", label = \"N Terminal\", atomsToAdd = atomsToAdd, atomsToDelete = [ \"H\" ], formalCharges = { \"N\" : +1 }, bondsToAdd = bondsToAdd ) )\n # . Deprotonated ASP and GLU.\n variants.append ( PDBComponentVariant.WithOptions ( componentLabel = \"ASP\", label = \"Deprotonated\", atomsToDelete = [ \"HD2\" ], formalCharges = { \"OD2\" : -1 } ) )\n variants.append ( PDBComponentVariant.WithOptions ( componentLabel = \"GLU\", label = \"Deprotonated\", atomsToDelete = [ \"HE2\" ], formalCharges = { \"OE2\" : -1 } ) )\n # . Histidine variants.\n # . Doubly protonated - leave as is but shift charge to Nepsilon.\n bondTypes = [ PDBComponentBond.WithOptions ( atomLabel1 = \"CE1\", atomLabel2 = \"ND1\", bondType = BondType.Single ),\n PDBComponentBond.WithOptions ( atomLabel1 = \"CE1\", atomLabel2 = \"NE2\", bondType = BondType.Double ) ]\n variants.append ( PDBComponentVariant.WithOptions ( componentLabel = \"HIS\", label = \"Doubly Protonated\", formalCharges = { \"ND1\" : 0, \"NE2\" : +1 }, bondTypes = bondTypes ) )\n # . Delta protonated.\n bondTypes = [ PDBComponentBond.WithOptions ( atomLabel1 = \"CE1\", atomLabel2 = \"ND1\", bondType = BondType.Single ),\n PDBComponentBond.WithOptions ( atomLabel1 = \"CE1\", atomLabel2 = \"NE2\", bondType = BondType.Double ) ]\n variants.append ( PDBComponentVariant.WithOptions ( componentLabel = \"HIS\", label = \"Delta Protonated\", atomsToDelete = [ \"HE2\" ], formalCharges = { \"ND1\" : 0, \"NE2\" : 0 }, bondTypes = bondTypes ) )\n # . Epsilon protonated.\n variants.append ( PDBComponentVariant.WithOptions ( componentLabel = \"HIS\", label = \"Epsilon Protonated\", atomsToDelete = [ \"HD1\" ], formalCharges = { \"ND1\" : 0, \"NE2\" : 0 } ) )\n # . Fully deprotonated.\n variants.append ( PDBComponentVariant.WithOptions ( componentLabel = \"HIS\", label = \"Fully Deprotonated\", atomsToDelete = [ \"HD1\", \"HE2\" ], formalCharges = { \"ND1\" : 0, \"NE2\" : -1 } ) )\n # . Finish up.\n return variants\n\ndef _ModifyAminoAcidComponents ( components ):\n \"\"\"Modify the amino acid components for the default distribution.\"\"\"\n for label in _AminoAcids:\n component = components[label]\n component.leftAtom = \"N\"\n component.leftLink = \"Peptide\"\n component.leftTermination = \"N Terminal\"\n component.isInChain = True\n component.isHeteroatom = False\n component.rightAtom = \"C\"\n component.rightLink = \"Peptide\"\n component.rightTermination = \"C Terminal\"\n defaultvariant = _DefaultVariants.get ( component.label, None )\n if defaultvariant is not None: component.variants = [ defaultvariant ]\n\ndef MakeDefaultPDBComponentLibrary ( cifPath = None, fullLibrary = False, log = logFile, libraryPaths = None, outPath = None ):\n \"\"\"Make the default library that comes with the pDynamo distribution.\"\"\"\n # . Get the cifPath.\n if cifPath is None:\n cifPath = os.path.join ( os.getenv ( \"PDYNAMO3_PARAMETERS\" ), _LibraryPath, _DefaultCIFFileName )\n # . Get the components.\n components = PDBComponentCIFFileReader.PathToComponents ( cifPath, asDictionary = True, log = log )\n # . Process the amino acid components.\n _ModifyAminoAcidComponents ( components )\n # . Create the default links and variants.\n links = _MakeDefaultLinks ( )\n variants = _MakeDefaultVariants ( )\n # . Get the items to put in the library.\n # . All items.\n if fullLibrary:\n items = list ( components.values ( ) )\n items.extend ( links )\n items.extend ( variants )\n # . Items in the reduced set only.\n else:\n items = []\n # . Components.\n for label in _ReducedComponentSet: items.append ( components[label] )\n # . Links.\n for item in links:\n if ( ( item.leftVariant.componentLabel is None ) or ( item.leftVariant.componentLabel in _ReducedComponentSet ) ) and \\\n ( ( item.rightVariant.componentLabel is None ) or ( item.rightVariant.componentLabel in _ReducedComponentSet ) ): items.append ( item )\n # . Variants.\n for item in variants:\n if ( item.componentLabel is None ) or ( item.componentLabel in _ReducedComponentSet ): items.append ( item )\n # . Create the library.\n library = PDBComponentLibrary.WithOptions ( paths = libraryPaths, readOnly = False )\n library.AddItems ( items, libraryPath = outPath )\n return library\n\n#===================================================================================================================================\n# . Test.\n#===================================================================================================================================\nif __name__ == \"__main__\":\n\n # . Make the default library.\n MakeDefaultPDBComponentLibrary ( fullLibrary = False )\n","repo_name":"pdynamo/pDynamo3","sub_path":"pBabel/PDBComponentLibrary.py","file_name":"PDBComponentLibrary.py","file_ext":"py","file_size_in_byte":16826,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"3"}
+{"seq_id":"31327084206","text":"\"\"\"\nAuthor: Cameron Buerke, cbuerke@purdue.edu\nAssignment: 04.5 - Prime List\nDate: 09/19/2022\n\nDescription:\n Prints all of the primes from 0 to an inputted number \n\nContributors:\n Name, login@purdue.edu [repeat for each]\n\nMy contributor(s) helped me:\n [ ] understand the assignment expectations without\n telling me how they will approach it.\n [ ] understand different ways to think about a solution\n without helping me plan my solution.\n [ ] think through the meaning of a specific error or\n bug present in my code without looking at my code.\n Note that if you helped somebody else with their code, you\n have to list that person as a contributor.\n\nAcademic Integrity Statement:\n I have not used source code obtained from any unauthorized\n source, either modified or unmodified; nor have I provided\n another student access to my code. The project I am\n submitting is my own original work.\n\"\"\"\n\n\"\"\"Import additional modules below this line (starting with unit 6).\"\"\"\n\n\n\"\"\"Write new functions below this line (starting with unit 4).\"\"\"\n\ndef is_prime(num):\n #initialize i as 2\n i = 2\n #initialize check as 0\n check = 0\n while ((i <= (num ** 0.5)) and (check == 0)):\n if (((num % i) == 0) and (i > 1)):\n check += 1\n \n i += 1\n # check for 1 or 0\n if num == 1 or num == 0:\n check = 1\n\n return not bool(check)\n \n\ndef main():\n #take input from the user \n integer = int(input(\"Enter a positive integer: \"))\n #only exit if the input is = -1 \n\n t_or_f = [0] * (integer + 1)\n check = 0\n primes = f\"\"\n\n for i in range(integer + 1):\n t_or_f[i] = is_prime(i)\n\n if t_or_f[i]:\n if (check != 0):\n primes += f\", {i}\"\n else:\n primes += f\"{i}\"\n check += 1\n\n # Print out the correct output\n print(f\"The primes up to {integer} are: \" + primes)\n \n\n\"\"\"Do not change anything below this line.\"\"\"\nif __name__ == \"__main__\":\n main()","repo_name":"cbuerke/purdue-python-coding-course","sub_path":"Python Code/Exercise 04/prime_list_cbuerke.py","file_name":"prime_list_cbuerke.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"13099741126","text":"import re\n\n\nNUMERICAL_VALUE = r'\\d+(?:\\.\\d+)?'\nPARENTHETICAL_EXPRESSION = r'\\(.+\\)'\nOPERATIONS = ('**', '*', '/', '+', '-')\n\n\ndef sub_math_expr(string: str) -> str:\n string = normalize_whitespace(string)\n match = match_entire_string(string)\n return _sub_math_expr(match)\n\n\ndef _sub_math_expr(match: re.match) -> str:\n string = re.sub(\n PARENTHETICAL_EXPRESSION,\n _sub_math_expr,\n match.group(0)\n )\n for operation in OPERATIONS:\n string = re.sub(\n binary_operation_regex(operation),\n maybe_compute_binary_operation,\n string\n )\n return string\n\n\ndef maybe_compute_binary_operation(match: re.match) -> str:\n if match:\n a, operation, b = match.groups()\n a, b = float(a), float(b)\n if operation == '**': result = a ** b\n elif operation == '*': result = a * b\n elif operation == '/': result = a / b\n elif operation == '+': result = a + b\n elif operation == '-': result = a - b\n else: raise ValueError(f'Unrecognized operation: {operation}')\n else:\n result = match.group(0)\n return maybe_simplify(result)\n\n\ndef maybe_simplify(maybe_number: str) -> str:\n try:\n maybe_number = float(maybe_number)\n maybe_number = int(maybe_number)\n except ValueError:\n pass\n finally:\n return str(maybe_number)\n\n\ndef binary_operation_regex(operation: str) -> str:\n return fr'({NUMERICAL_VALUE}) ({re.escape(operation)}) ({NUMERICAL_VALUE})'\n\n\ndef normalize_whitespace(string: str) -> str:\n return re.sub('[\\s\\n]+', ' ', string)\n\n\ndef match_entire_string(string: str) -> re.match:\n return re.match('.*', string)\n\n","repo_name":"mattpetersen/monty","sub_path":"sub_math_expr.py","file_name":"sub_math_expr.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"71011962642","text":"from collections.abc import Iterable\nfrom collections import OrderedDict\nfrom typing import Dict, Sequence, List, overload, Union\n\nimport ugraph as ug\nfrom ugraph import Tensor, Graph\n\n\nclass Executor(ug._ugraph_py_api.Executor):\n def __init__(self, graph: Graph, is_training: bool) -> None:\n return super(Executor, self).__init__(graph, is_training)\n\n @overload\n def forward(\n self, inputs: Sequence[Union[Tensor, None]], expect_outputs: Sequence[int] = []\n ) -> List[Tensor]:\n ...\n\n @overload\n def forward(\n self, inputs: Dict[str, Tensor], expect_outputs: Sequence[str] = []\n ) -> List[Tensor]:\n ...\n\n def inputs(self) -> List[Tensor]:\n return [ug.Tensor(tensor) for tensor in self._get_inputs()]\n\n def outputs(self) -> List[Tensor]:\n return [ug.Tensor(tensor) for tensor in self._get_outputs()]\n\n def forward(self, inputs, expect_outputs=[]) -> List[Tensor]:\n if not isinstance(inputs, Iterable):\n raise TypeError(\"executor inputs must be sequence of tensor\")\n\n if not isinstance(expect_outputs, Iterable):\n raise TypeError(\"executor expect_outputs must be sequence of str or int\")\n\n self._forward(inputs, expect_outputs)\n output_tensor = [ug.Tensor(tensor) for tensor in self._get_outputs()]\n\n if not expect_outputs:\n result = output_tensor\n else:\n if all([isinstance(output, int) for output in expect_outputs]):\n result = [output_tensor[idx] for idx in expect_outputs]\n elif all([isinstance(output, str) for output in expect_outputs]):\n output_names = self.get_output_names()\n output_name_map = dict(zip(output_names, output_tensor))\n result = [\n output_name_map[output_name] for output_name in expect_outputs\n ]\n else:\n raise KeyError(\"expect_outputs must be list of int or string\")\n\n if len(result) == 1:\n return result[0]\n else:\n return result\n\n\nclass StaticRunner:\n def __init__(self, module: \"ug.nn.Module\") -> None:\n self._is_init = False\n self._module = module\n\n def _build(self, *args, **kwargs):\n self._graph, is_training = ug.Graph.from_module(self._module, *args, **kwargs)\n self._executor = ug.Executor(self._graph, is_training)\n\n pos_arg_len = len([arg for arg in args if isinstance(arg, Tensor)])\n self._kwargs_index_map = OrderedDict()\n for key, value in kwargs.items():\n if isinstance(value, Tensor):\n self._kwargs_index_map[key] = pos_arg_len\n pos_arg_len += 1\n self._total_arg_len = pos_arg_len\n\n self._is_init = True\n\n def _to_executor_input(self, *args, **kwargs):\n inputs = [arg for arg in args if isinstance(arg, Tensor)]\n\n inputs.extend([None for _ in range(self._total_arg_len - len(inputs))])\n for key, value in kwargs.items():\n if isinstance(value, Tensor):\n inputs[self._kwargs_index_map[key]] = value\n\n return inputs\n\n def __call__(self, *args, **kwargs):\n return self.forward(*args, **kwargs)\n\n def forward(self, *args, **kwargs):\n if not self._is_init:\n self._build(*args, **kwargs)\n\n return self._executor.forward(self._to_executor_input(*args, **kwargs))\n","repo_name":"tingkuanpei/ugraph","sub_path":"python/ugraph/executor.py","file_name":"executor.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"7814762607","text":"#!/usr/bin/env python3\n\n\"\"\"Create a list of seqtab files, optionally filtering by project\n\n\"\"\"\n\nfrom os import path, walk\nimport argparse\nimport logging\nimport csv\nimport sys\nimport re\nfrom operator import itemgetter\nfrom itertools import groupby\nfrom glob import glob\n\nlog = logging.getLogger(__name__)\n\ndef get_args(arguments):\n parser = argparse.ArgumentParser()\n parser.add_argument('infiles', nargs='+')\n parser.add_argument('--projects', nargs='*')\n\n parser.add_argument('-s', '--seqtabs', type=argparse.FileType('w'),\n help='list of seqtab files, one per line')\n parser.add_argument('-i', '--sample-info', type=argparse.FileType('w'),\n help='concatenated seq info files')\n\n return parser.parse_args(arguments)\n\n\ndef main(arguments):\n logging.basicConfig(\n level=logging.INFO, format=\"%(asctime)s %(levelname)s: %(message)s\")\n\n args = get_args(arguments)\n projects = set(args.projects) if args.projects else set()\n\n # input identifies specimens with 'sampleid' but downstream programs expect 'specimen'\n if args.sample_info:\n writer = csv.DictWriter(\n args.sample_info,\n fieldnames=['specimen', 'sample_name', 'project', 'batch', 'controls'],\n extrasaction='ignore')\n writer.writeheader()\n\n for fname in args.infiles:\n outdir = path.dirname(path.abspath(fname))\n with open(fname) as f:\n reader = csv.DictReader(f)\n for row in reader:\n # input identifies specimens with 'sampleid' but 'specimen' is expected downstream\n row['specimen'] = row['sampleid']\n if projects and row['project'] not in projects:\n continue\n seqtab = path.join(outdir, 'dada', row['specimen'], 'seqtab.csv')\n if not path.exists(seqtab):\n print(f'missing file for {row[\"specimen\"]}')\n continue\n\n if args.sample_info:\n writer.writerow(row)\n if args.seqtabs:\n args.seqtabs.write(seqtab + '\\n')\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))\n","repo_name":"fhcrc/yapp","sub_path":"bin/gather_seqtabs.py","file_name":"gather_seqtabs.py","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"18214412972","text":"import pytest\n\nimport numpy as np\n\nfrom dask.distributed import Client\n\nfrom cuml.test.utils import unit_param, quality_param, stress_param\n\n\n@pytest.mark.parametrize('nrows', [unit_param(1e3), quality_param(1e5),\n stress_param(1e6)])\n@pytest.mark.parametrize('ncols', [unit_param(10), quality_param(100),\n stress_param(1000)])\n@pytest.mark.parametrize('centers', [10])\n@pytest.mark.parametrize(\"cluster_std\", [0.1])\n@pytest.mark.parametrize(\"dtype\", [np.float32, np.float64])\n@pytest.mark.parametrize(\"nparts\", [unit_param(1), unit_param(7),\n quality_param(100),\n stress_param(1000)])\n@pytest.mark.parametrize(\"output\", ['array', 'dataframe'])\ndef test_make_blobs(nrows,\n ncols,\n centers,\n cluster_std,\n dtype,\n nparts,\n cluster,\n output):\n\n c = Client(cluster)\n try:\n from cuml.dask.datasets import make_blobs\n\n X, y = make_blobs(nrows, ncols,\n centers=centers,\n cluster_std=cluster_std,\n dtype=dtype,\n n_parts=nparts,\n output=output)\n\n assert X.npartitions == nparts\n assert y.npartitions == nparts\n\n X = X.compute()\n y = y.compute()\n\n assert X.shape == (nrows, ncols)\n assert y.shape == (nrows, 1)\n\n if output == 'dataframe':\n assert len(y[0].unique()) == centers\n assert X.dtypes.unique() == [dtype]\n\n elif output == 'array':\n import cupy as cp\n assert len(cp.unique(y)) == centers\n assert y.dtype == dtype\n\n finally:\n c.close()\n","repo_name":"Pranjal31/cuml","sub_path":"python/cuml/test/dask/test_datasets.py","file_name":"test_datasets.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"43053937530","text":"import boto, os, sys\nimport boto.ec2\nimport time\n#import config\n\nCOREOS_OCT22 = \"ami-00000025\"\n#COREOS_IMAGE = \"ami-00000003\"\n#COREOS_SNAP = \"ami-0000001d\"\n#COREOS_CONT = \"ami-0000001e\"\n#COREOS_IMAGE = \"56eed2c2-40b6-4a52-bd55-823457f0ee66\"\nEC2_ENDPOINT = \"130.240.233.106\"\nEC2_ACCESS_KEY=\"58433a1650b14ac2a62a9ad06b9cf1c9\"\nEC2_SECRET_KEY=\"2fa63408610b427a8e5a080126d70e0e\"\n\nboto_region = boto.ec2.regioninfo.RegionInfo(name=\"nova\", endpoint=EC2_ENDPOINT)\nboto_connection = boto.connect_ec2(\n aws_access_key_id=EC2_ACCESS_KEY,\n aws_secret_access_key=EC2_SECRET_KEY,\n is_secure=False,\n region=boto_region,\n port=8773,\n path=\"/services/Cloud\")\n\ndef add_instance():\n try:\n response = boto_connection.run_instances(\n #image_id=\"56eed2c2-40b6-4a52-bd55-823457f0ee66\",\n #COREOS_CONT, \n COREOS_OCT22,\n key_name=\"web_sync2\", \n instance_type=\"m1.tiny\", \n security_groups=[\"default\"]\n #min_count=instances_count,\n #max_count=instances_count\n )\n\n for instance in response.instances: #waiting for the instance to ge an ip\n while instance.private_ip_address == \"\":\n instance.update()\n inst = response.instances[0]\n return inst\n\n\n\n except Exception as e:\n raise e\n #print \"Exception when creating node: \"+ str(e) \n\n\ndef remove_instance(instance,ip): #removes the instance provided\n instances = [instance]\n if (boto_connection.disassociate_address(ip)): #disassociate ip from instance before removing to avoid errors\n boto_connection.terminate_instances(instances)\n boto_connection.release_address(ip)\n #return \"vm at \"+ ip +\" removed\"\n #boto_connection.terminate_instances(instances)\n #time.sleep(1) #sleep because instance needs to be terminated before ip is removed\n #ip = (str(addr).split(\":\"))[1]\n #boto_connection.release_address(ip)\n #return \"something went wrong when removing vm.. :<\"\n\ndef get_floating():\n addr = boto_connection.allocate_address()\n return (str(addr).split(\":\"))[1]\n\n\ndef assign_floating(instance, ip):\n boto_connection.associate_address(instance,ip)\n\n\n","repo_name":"Gegga87/WebSync","sub_path":"manager/openstack_manager.py","file_name":"openstack_manager.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"5004393343","text":"# https://www.acmicpc.net/problem/2178\n# DFS/ BFS\nimport time\nimport sys\n# input = sys.stdin.readline\n# N = 100\n# M = 100\n# time :1s => 40,000,000\n# 시간 복잡도 : O(?)\nfrom collections import deque\n\ndef bfs(si,sj):\n q = deque()\n q.append((si,sj))\n visited[si][sj] = 1\n\n while q:\n ci,cj = q.popleft()\n for di,dj in (-1,0),(1,0),(0,1),(0,-1):\n ni,nj = ci+di, cj+dj\n if 0<=ni Response:\n return self._client.get(url, params=params, timeout=timeout) # type: ignore\n\n def post(\n self,\n url: str,\n data: Union[bytes, Dict[str, Any]],\n timeout: float = REQUEST_TIMEOUT_SECONDS,\n ) -> Response:\n return self._client.post(url, data=data, timeout=timeout) # type: ignore\n\n def put(\n self,\n url: str,\n data: Union[bytes, Dict[str, Any]],\n timeout: float = REQUEST_TIMEOUT_SECONDS,\n ) -> Response:\n return self._client.put(url, data=data, timeout=timeout) # type: ignore\n\n def patch(\n self,\n url: str,\n data: Union[bytes, Dict[str, Any]],\n timeout: float = REQUEST_TIMEOUT_SECONDS,\n ) -> Response:\n return self._client.patch(url, data=data, timeout=timeout) # type: ignore\n\n def _login_with_retries(\n self, max_attempts: int, cancellation_token: Optional[CancellationToken]\n ) -> VimeoClient:\n self._messenger.log_status(\n TaskStatus.RUNNING,\n f\"Connecting to the Vimeo API...\",\n )\n for attempt_num in range(1, max_attempts + 1):\n if cancellation_token is not None:\n cancellation_token.raise_if_cancelled()\n credentials = self._credential_store.get_multiple(\n prompt=\"Enter the Vimeo credentials.\",\n credentials=[\n Credential.VIMEO_ACCESS_TOKEN,\n Credential.VIMEO_CLIENT_ID,\n Credential.VIMEO_CLIENT_SECRET,\n ],\n request_input=(\n InputPolicy.ALWAYS if attempt_num > 1 else InputPolicy.AS_REQUIRED\n ),\n )\n access_token = credentials[Credential.VIMEO_ACCESS_TOKEN]\n client_id = credentials[Credential.VIMEO_CLIENT_ID]\n client_secret = credentials[Credential.VIMEO_CLIENT_SECRET]\n client = VimeoClient(\n token=access_token,\n key=client_id,\n secret=client_secret,\n )\n response: Response = client.get(\"/tutorial\") # type: ignore\n if response.status_code == 200:\n self._messenger.log_status(\n TaskStatus.RUNNING,\n f\"Successfully connected to the Vimeo API.\",\n )\n return client\n else:\n self._messenger.log_debug(\n f\"Vimeo client test request failed (attempt {attempt_num}/{max_attempts}). Response had HTTP status {response.status_code}.\",\n )\n raise RuntimeError(\n f\"Failed to connect to the Vimeo API ({max_attempts} attempts)\"\n )\n\n\ndef get_video_data(\n messenger: Messenger, client: ReccVimeoClient, cancellation_token: CancellationToken\n) -> Tuple[str, str]:\n # Wait for the video to be posted\n while True:\n response = client.get(\n \"/me/videos\",\n params={\n \"fields\": \"created_time,uri,metadata.connections.texttracks.uri\",\n \"per_page\": 1,\n \"sort\": \"date\",\n \"direction\": \"desc\",\n },\n )\n\n if response.status_code != 200:\n raise RuntimeError(\n f\"Vimeo client failed to access GET /videos (HTTP status {response.status_code}).\"\n )\n\n response_body = response.json()\n response_data = response.json()[\"data\"][0]\n if response_body[\"total\"] < 1 or (\n datetime.now(timezone.utc)\n - datetime.fromisoformat(response_data[\"created_time\"])\n > NEW_VIDEO_TIMEDELTA\n ):\n messenger.log_status(\n TaskStatus.RUNNING,\n f\"Video not yet found on Vimeo as of {datetime.now().strftime('%H:%M:%S')}. Retrying in {int(RETRY_DELAY.total_seconds())} seconds.\",\n )\n autochecklist.sleep_attentively(RETRY_DELAY, cancellation_token)\n else:\n messenger.log_status(\n TaskStatus.RUNNING,\n f\"Found newly-uploaded Vimeo video at URI '{response_data['uri']}'.\",\n )\n break\n\n video_uri = response_data[\"uri\"]\n texttrack_uri = response_data[\"metadata\"][\"connections\"][\"texttracks\"][\"uri\"]\n return (video_uri, texttrack_uri)\n\n\ndef disable_automatic_captions(\n texttracks_uri: str,\n client: ReccVimeoClient,\n messenger: Messenger,\n cancellation_token: CancellationToken,\n):\n # TODO: This isn't removing the autogenerated captions! Could it be that Vimeo only adds them after a certain amount of time?\n response = client.get(texttracks_uri, params={\"fields\": \"uri,name\"})\n\n if response.status_code != 200:\n raise RuntimeError(\n f\"The Vimeo client failed to get the text tracks for today's video. GET {texttracks_uri} returned HTTP status {response.status_code}.\"\n )\n\n response_data = response.json()[\"data\"]\n for texttrack in response_data:\n cancellation_token.raise_if_cancelled()\n # If we wanted to be sure we weren't disabling captions we want to\n # keep, we could check that the language field contains \"autogen.\"\n # That probably isn't necessary as long as this task is performed\n # before our captions are uploaded and there are never existing\n # captions we want to keep.\n try:\n patch_uri = texttrack[\"uri\"]\n patch_response = client.patch(patch_uri, data={\"active\": False})\n\n if patch_response.status_code != 200:\n raise RuntimeError(\n f\"PATCH {patch_uri} returned HTTP status {patch_response.status_code}.\"\n )\n # Catch exceptions instead of just moving this log statement into the\n # if statement so that, if the client itself throws an exception, it\n # gets caught.\n except Exception as e:\n messenger.log_problem(\n ProblemLevel.WARN,\n f\"The Vimeo client failed to disable text track '{texttrack['name']}' at '{texttrack['uri']}' due to an error: {e}\",\n stacktrace=traceback.format_exc(),\n )\n\n\ndef rename_video(video_uri: str, new_title: str, client: ReccVimeoClient):\n response = client.patch(\n video_uri,\n data={\"name\": new_title},\n )\n if response.status_code != 200:\n raise RuntimeError(\n f\"Vimeo client failed to rename video (HTTP status {response.status_code}).\"\n )\n\n\ndef upload_captions_to_vimeo(\n final_captions_file: Path,\n texttrack_uri: str,\n messenger: Messenger,\n client: ReccVimeoClient,\n):\n # See https://developer.vimeo.com/api/upload/texttracks\n\n # (1) Get text track URI: done in get_vimeo_video_data()\n\n # (2) Get upload link for text track\n (upload_link, uri) = _get_vimeo_texttrack_upload_link(texttrack_uri, client)\n messenger.log_status(\n TaskStatus.RUNNING,\n f\"Found the text track upload link and URI for the Vimeo video.\",\n )\n\n # (3) Upload text track\n _upload_texttrack(final_captions_file, upload_link, client)\n messenger.log_status(\n TaskStatus.RUNNING, \"Uploaded the text track for the Vimeo video.\"\n )\n\n # (4) Mark text track as active\n _activate_texttrack(uri, client)\n messenger.log_status(\n TaskStatus.RUNNING,\n \"Marked the newly-uploaded text track for the Vimeo video as active.\",\n )\n\n\ndef _get_vimeo_texttrack_upload_link(\n texttrack_uri: str, client: ReccVimeoClient\n) -> Tuple[str, str]:\n response = client.post(\n texttrack_uri,\n data={\n \"type\": CAPTIONS_TYPE,\n \"language\": CAPTIONS_LANGUAGE,\n \"name\": CAPTIONS_NAME,\n },\n )\n\n status_code = response.status_code\n if status_code != 201:\n raise RuntimeError(\n f\"Failed to get text track upload link for Vimeo video (HTTP status {status_code}).\"\n )\n\n response_body = response.json()\n return (response_body[\"link\"], response_body[\"uri\"])\n\n\ndef _upload_texttrack(\n final_captions_file: Path,\n upload_link: str,\n client: ReccVimeoClient,\n):\n # Read the captions from final.vtt\n # If you don't set the encoding to UTF-8, then Unicode characters get mangled\n with open(final_captions_file, \"r\", encoding=\"utf-8\") as f:\n vtt = f.read()\n\n # If you don't encode the VTT file as UTF-8, then for some reason some characters get dropped at the end of the\n # file (if there are Unicode characters)\n response = client.put(upload_link, data=vtt.encode(\"utf-8\"))\n\n status_code = response.status_code\n if status_code != 200:\n raise RuntimeError(\n f\"Failed to upload text track for Vimeo video (HTTP status {status_code})\"\n )\n\n\ndef _activate_texttrack(texttrack_uri: str, client: ReccVimeoClient):\n response = client.patch(texttrack_uri, data={\"active\": True})\n\n status_code = response.status_code\n if status_code != 200:\n raise RuntimeError(\n f\"Failed to mark text track at link '{texttrack_uri}' as active (HTTP status {status_code}).\"\n )\n","repo_name":"recc-tech/tech","sub_path":"scripts/mcr_teardown/vimeo.py","file_name":"vimeo.py","file_ext":"py","file_size_in_byte":10523,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"}
+{"seq_id":"696113552","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author : Peidong\n# @Site : \n# @File : eg54.py\n# @Software: PyCharm\n\"\"\"\n题目:列表转换为字典。\n\"\"\"\ni=['a','b']\nj=[1,2]\nk=['c',4]\nprint(dict([i,j,k]))","repo_name":"pd-pony/Python_exercise","sub_path":"eg54.py","file_name":"eg54.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"15769157333","text":"from typing import Any, Dict, Optional, Tuple\n\nimport torch\nfrom lightning import LightningDataModule\nfrom torch.utils.data import ConcatDataset, DataLoader, Dataset, random_split\n\nfrom torchvision.transforms import transforms\nfrom torchvision import datasets\nimport numpy\nimport torchvision\nimport albumentations as A\nfrom albumentations.pytorch.transforms import ToTensorV2\nfrom src.data.sentences_data import SentencesDataset\n\n\nclass Sentences_Datamodule(LightningDataModule):\n \"\"\"Example of LightningDataModule for Pizza_Steak_Sushi dataset.\n\n A DataModule implements 6 key methods:\n def prepare_data(self):\n # things to do on 1 GPU/TPU (not on every GPU/TPU in DDP)\n # download data, pre-process, split, save to disk, etc...\n def setup(self, stage):\n # things to do on every process in DDP\n # load data, set variables, etc...\n def train_dataloader(self):\n # return train dataloader\n def val_dataloader(self):\n # return validation dataloader\n def test_dataloader(self):\n # return test dataloader\n def teardown(self):\n # called on every process in DDP\n # clean up after fit or test\n\n This allows you to share a full dataset without explaining how to download,\n split, transform and process the data.\n\n Read the docs:\n https://lightning.ai/docs/pytorch/latest/data/datamodule.html\n \"\"\"\n\n def __init__(\n self,\n data_dir: str = \"data/\",\n train_file_path: str = None,\n vocab_file_path: str = None,\n train_val_test_split: Tuple[int, int, int] = (55_000, 5_000, 10_000),\n batch_size: int = 64,\n num_workers: int = 0,\n pin_memory: bool = False,\n seq_len: int = 20,\n n_vocab: int = 40000,\n ):\n super().__init__()\n\n # this line allows to access init params with 'self.hparams' attribute\n # also ensures init params will be stored in ckpt\n self.save_hyperparameters(logger=False)\n\n self.data_train: Optional[Dataset] = None\n self.data_val: Optional[Dataset] = None\n self.data_test: Optional[Dataset] = None\n \n def get_samples(self,number_of_samples = 10):\n \"\"\"Return sample images\n number_of_samples: int: 10\n \"\"\"\n if not self.data_train:\n self.prepare_data()\n self.setup()\n\n sentences_data = SentencesDataset(self.hparams.train_file_path,self.hparams.vocab_file_path,\n self.hparams.seq_len,self.hparams.n_vocab)\n\n sample_count = 0\n text_samples = []\n output_samples = []\n for item in sentences_data:\n if sample_count <= number_of_samples:\n text_samples.append(item[\"input\"])\n output_samples.append(item[\"target\"])\n sample_count += 1 \n else:\n break\n res = dict((v,k) for k,v in sentences_data.vocab.items())\n text_samples = [' '.join([res[i.item()] for i in j]) for j in text_samples]\n output_samples = [' '.join([res[i.item()] for i in j]) for j in output_samples]\n\n\n\n return text_samples,output_samples\n \n def get_sample_images_transformed(self,number_of_samples = 10):\n \"\"\"Return sample images\n number_of_samples: int: 10\n \"\"\"\n if not self.data_train:\n self.prepare_data()\n self.setup()\n\n sentences_data = SentencesDataset(self.hparams.train_file_path,self.hparams.vocab_file_path,\n self.hparams.seq_len,self.hparams.n_vocab)\n\n text_samples = [item[\"input\"] for item in sentences_data[:number_of_samples]] \n\n return text_samples\n\n def prepare_data(self):\n \"\"\"Download data if needed.\n\n Do not use it to assign state (self.x = y).\n \"\"\"\n pass\n\n def setup(self, stage: Optional[str] = None):\n \"\"\"Load data. Set variables: `self.data_train`, `self.data_val`, `self.data_test`.\n\n This method is called by lightning with both `trainer.fit()` and `trainer.test()`, so be\n careful not to execute things like random split twice!\n \"\"\"\n # load and split datasets only if not loaded already\n if not self.data_train and not self.data_val:\n trainset = SentencesDataset(self.hparams.train_file_path,self.hparams.vocab_file_path,\n self.hparams.seq_len,self.hparams.n_vocab)\n testset = SentencesDataset(self.hparams.train_file_path,self.hparams.vocab_file_path,\n self.hparams.seq_len,self.hparams.n_vocab)\n # self.data_val, self.data_test = random_split(\n # dataset=testset,\n # lengths=self.hparams.train_val_test_split,\n # generator=torch.Generator().manual_seed(42),\n # )\n self.data_train = trainset\n self.data_val = testset\n self.data_test = testset\n\n def train_dataloader(self):\n return DataLoader(\n dataset=self.data_train,\n batch_size=self.hparams.batch_size,\n num_workers=self.hparams.num_workers,\n pin_memory=self.hparams.pin_memory,\n shuffle=True,\n )\n\n def val_dataloader(self):\n return DataLoader(\n dataset=self.data_val,\n batch_size=self.hparams.batch_size,\n num_workers=self.hparams.num_workers,\n pin_memory=self.hparams.pin_memory,\n shuffle=False,\n )\n\n def test_dataloader(self):\n return DataLoader(\n dataset=self.data_test,\n batch_size=self.hparams.batch_size,\n num_workers=self.hparams.num_workers,\n pin_memory=self.hparams.pin_memory,\n shuffle=False,\n )\n\n def teardown(self, stage: Optional[str] = None):\n \"\"\"Clean up after fit or test.\"\"\"\n pass\n\n def state_dict(self):\n \"\"\"Extra things to save to checkpoint.\"\"\"\n return {}\n\n def load_state_dict(self, state_dict: Dict[str, Any]):\n \"\"\"Things to do when loading checkpoint.\"\"\"\n pass\n\n\nif __name__ == \"__main__\":\n _ = Sentences_Datamodule()\n","repo_name":"abishek-raju/vision_meets_nlp","sub_path":"src/data/sentences_datamodule.py","file_name":"sentences_datamodule.py","file_ext":"py","file_size_in_byte":6285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"72911309200","text":"#!/usr/bin/env python3.5\n#-*- coding: utf-8 -*-\n\nimport cryptocompare as crypto\n\n\nCrypto = crypto.get_coin_list(format=True)\n\ndef ListeCrypto():\n\tfor n in Crypto :\n\t\tprint (n)\n\ndef Demande():\n\t\n\tMonnaie = input('Entrez le nom de la monnaie dont vous voulez connaitre le prix ou \"Liste\" pour avoir la liste de toute les Crypto-Monnaies: ')\n\tif Monnaie in Crypto :\n\t\tprint (crypto.get_price([Monnaie],['EUR','GBP']))\n\telif Monnaie == 'Liste': \n\t\tListeCrypto()\n\telse :\n\t\tprint(\"La crypto-monnaie que vous demande n'est pas dans notre base de données \")\n\n\nListeCrypto()\nwhile 1 :\n\tDemande()\n","repo_name":"HadrienMorgana/IOT","sub_path":"TP_Crypto.py","file_name":"TP_Crypto.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"6147430832","text":"import io\nimport zipfile\nimport logging\n\nimport click\nimport numpy as np\nimport pandas as pd\n\nfrom .config import get_config\nfrom .database import get_db\n\nconfig = get_config()\n\nlog = logging.getLogger(\"tcad\")\n\ncli = click.Group(name=\"tcad\", help=\"TCAD ETL Helpers\")\n\n\ndef _normalize_csv_schema_columns(columns):\n return [col.strip().lower() for col in columns]\n\n\n@cli.command(\"json\")\n@click.option(\"--table\", type=str, required=True)\n@click.option(\"--out\", type=str, default=\"data/out\")\ndef convert_to_json(table, out):\n \"\"\"Convert .mdb table export to json\"\"\"\n from rich.console import Console\n\n console = Console()\n\n table_schema_df = pd.read_csv(f\"data/schema/{table}.csv\", index_col=False)\n table_schema_df.columns = _normalize_csv_schema_columns(table_schema_df.columns)\n\n table_schema = table_schema_df.set_index(\"column_name\")\n table_schema = table_schema[~table_schema.index.isin([\"filler\"])]\n\n def _column_type_map(column_type):\n column_type = column_type.strip()\n\n if column_type.startswith(\"char\"):\n return str\n\n if column_type.startswith(\"int\"):\n return int\n\n if column_type.startswith(\"numeric\"):\n return str\n\n return str\n\n table_schema[\"column_type\"] = table_schema[\"column_type\"].map(_column_type_map)\n\n json_schema = table_schema[~table_schema.index.isin([\"filler\"])].to_dict(\n orient=\"index\"\n )\n\n col_names = []\n col_spec = []\n dtypes = {}\n\n for col_name, spec in json_schema.items():\n col_names.append(col_name)\n col_spec.append((spec[\"offset_start\"] - 1, spec[\"offset_end\"]))\n\n dtypes[col_name] = spec[\"column_type\"]\n\n tcad_data = pd.read_fwf(\n f\"data/tcad/{table}.txt\",\n names=col_names,\n colspecs=col_spec,\n dtype=dtypes,\n chunksize=config.CSV_LOAD_CHUNKSIZE,\n index_col=[\"prop_id\", \"prop_val_yr\", \"py_owner_id\", \"sup_num\"],\n )\n\n export_archive = f\"{out}/{table}.zip\"\n with zipfile.ZipFile(export_archive, \"w\") as property_file:\n total_row_count = 0\n for idx, df in enumerate(tcad_data):\n\n json_filename = f\"{table}.{idx}.json\"\n with property_file.open(json_filename, \"w\") as property_json:\n row_count = df.shape[0]\n console.print(\n f\"Total Exported: {total_row_count} rows Current File: {json_filename}\", # noqa\n end=\"\\r\", # noqa\n )\n console.print(\"\", end=\"\\r\")\n\n json_io = io.TextIOWrapper(property_json)\n df.to_json(json_io, orient=\"table\")\n\n total_row_count += row_count\n\n console.print(\n f\":white_check_mark: Exported: {total_row_count} rows\", emoji=True\n )\n\n console.print(\n f\"Export of {table} to {export_archive} complete! :tada:\", emoji=True\n )\n\n\n@cli.command(\"load_json\")\n@click.option(\"--table\", type=str, required=True)\ndef load_json(table):\n \"\"\"\n Import db table to db\n \"\"\"\n from rich import print\n\n load_json_to_db(table)\n\n print(f\"{table} import complete! :tada:\")\n\n\ndef load_json_to_db(\n table_name,\n filename=None,\n data_path=\"data/out\",\n chunksize=config.CSV_LOAD_CHUNKSIZE,\n schema=\"tcad\",\n):\n import pathlib\n import sqlalchemy as sa\n from rich.console import Console\n from rich.progress import track\n\n console = Console()\n\n if not filename:\n filename = f\"{table_name}.zip\"\n\n data_zip_path = pathlib.Path(data_path) / pathlib.Path(filename)\n\n db = get_db(schema=schema)\n\n with zipfile.ZipFile(data_zip_path, \"r\") as data_file, db as transaction:\n\n table = transaction.get_table(table_name.lower())\n\n table.delete()\n\n for json_file in track(\n [f for f in data_file.namelist() if f.endswith(\".json\")],\n description=f\"Loading {table_name}...\",\n ):\n with data_file.open(json_file, \"r\") as table_json:\n json_io = io.TextIOWrapper(table_json)\n\n tcad_data = pd.read_json(json_io, orient=\"table\")\n tcad_index = tcad_data.index.to_frame()\n\n col_types = {}\n\n for col, dtype in tcad_index.dtypes.to_dict().items():\n col_types[col] = sa.Text\n if dtype == np.int64:\n col_types[col] = sa.BigInteger\n\n col_types[\"data\"] = sa.dialects.postgresql.JSONB\n\n table.insert_many(\n list(_yield_rows(tcad_data, tcad_index)),\n ensure=True,\n types=col_types,\n )\n\n console.print(f\"Load of {table_name} table complete! :tada:\", emoji=True)\n\n\ndef _yield_rows(tcad_data, tcad_index):\n for index, values in tcad_data.iterrows():\n row = tcad_index.loc[index, :].to_dict()\n row[\"data\"] = values.to_dict()\n yield row\n","repo_name":"almostprod/property-app","sub_path":"src/property_etl/tcad.py","file_name":"tcad.py","file_ext":"py","file_size_in_byte":4945,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"}
+{"seq_id":"13418987726","text":"import numpy as np\n\nclass Adagrad:\n def __init__(self, parameters, learn_rate=1e-3, k=.9):\n self.k = k\n self.learn_rate = learn_rate\n self.parameters = parameters\n\n self.gs = []\n\n for parameter_dictionary in parameters:\n array_shape = parameter_dictionary[\"array\"].shape\n self.gs.append(np.zeros(array_shape))\n\n def update_weights(self):\n counter = 0\n\n for parameter in self.parameters:\n self.gs[counter] = self.gs[counter] * self.k + parameter[\"grad\"] ** 2\n parameter[\"array\"] = parameter[\"array\"] - self.learn_rate / (np.sqrt(self.gs[counter]) + 1e-99) * parameter[\"grad\"]\n\n counter += 1","repo_name":"yeff-bridges/CodeSamples","sub_path":"MachineLearning/FromScratch/Optimizers/Adagrad.py","file_name":"Adagrad.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"43593834074","text":"#테스트 케이스 입력받음\r\nT = int(input())\r\n\r\n#델타값 선정\r\ndx = [0,0,-1,1]\r\ndy = [-1,1,0,0]\r\n\r\n#BFS 메서드 작성\r\ndef BFS(x,y):\r\n #queue는 1이 있는 곳을 좌표값으로 받을 예정(각 지점을 스코프로 지나가므로 좌표로 탐색)\r\n queue = [(x,y)]\r\n \r\n #방문 지점을 체크 처리하기\r\n graph[x][y] = 0\r\n \r\n #queue가 비워질 때까지 반복\r\n while queue:\r\n \r\n #queue의 첫번째 좌표 값을 x,y에 할당\r\n x,y = queue.pop(0)\r\n \r\n #델타탐색(좌,우,하,상)4번\r\n for i in range(4):\r\n nx = x + dx[i]\r\n ny = y + dy[i]\r\n \r\n #만약 그래프의 델타탐색 방향이 범위(N,M)를 넘어가면 pass\r\n if nx < 0 or nx >= N or ny < 0 or ny >= M :\r\n continue\r\n \r\n #델타탐색 좌표가 1이라면 quque에 좌표값(x,y)로 삽입(연결된 부분 모두 0으로 처리해야 하므로)\r\n if graph[nx][ny] == 1:\r\n queue.append((nx,ny))\r\n #해당 지점은 0으로 변경\r\n graph[nx][ny] = 0\r\n \r\n \r\n#테스트 케이스 수 만큼 그래프 작성 및 BFS 호출\r\nfor i in range(T):\r\n \r\n #결과값 선언(애벌레 마릿수)\r\n count = 0\r\n \r\n #그래프의 크기와 배추의 갯수 입력받음\r\n N,M,num = map(int,input().split())\r\n \r\n #기본 그래프 생성(리스트 표현식 미스났었음)\r\n graph = [[0]*M for _ in range(N)]\r\n \r\n #배추심기(x,y)좌표 값으로 받아서 심음\r\n for i in range(num):\r\n x,y = map(int, input().split())\r\n graph[x][y] = 1\r\n \r\n #그래프 탐색 시작(0,0)~(N,M)까지\r\n for a in range(N):\r\n for b in range(M):\r\n \r\n #탐색지점이 1일 경우 BFS 시작, 결과값 변경\r\n if graph[a][b] == 1:\r\n BFS(a,b)\r\n count += 1\r\n print(count)","repo_name":"deep-blue-dream/Solve-it-Baekjoon","sub_path":"백준/Silver/1012. 유기농 배추/유기농 배추.py","file_name":"유기농 배추.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"7491569868","text":"from Apple import Apple\n\n\nclass Asus(Apple):\n def __init__(self, model: str, screen_diagonal: int or float, screen_rate: int, cpu: str, gpu: int, ssd: int,\n ram: int, logo: bool, operating_system: str):\n super().__init__(model, screen_diagonal, screen_rate, cpu, gpu, ssd, ram, logo)\n self._operating_system = operating_system\n self.__price = 0\n\n def __model_price(self):\n if self._model not in [\"TUF Gaming\", \"Rog\", \"ProArt\"]:\n raise TypeError(\"Error! Check your data and try again!\")\n elif self._model == \"TUF Gaming\":\n self.__price += 200\n elif self._model == \"Rog\":\n self.__price += 250\n elif self._model == \"ProArt\":\n self.__price += 300\n\n def __screen_diagonal_price(self):\n if self._screen_diagonal not in [13.4, 15, 16]:\n raise TypeError(\"Error! Check your data and try again!\")\n elif self._screen_diagonal == 13.4:\n self.__price += 50\n elif self._screen_diagonal == 15:\n self.__price += 100\n elif self._screen_diagonal == 16:\n self.__price += 175\n\n def __screen_rate_price(self):\n if self._screen_rate not in [60, 120, 144]:\n raise TypeError(\"Error! Check your data and try again!\")\n elif self._screen_rate == 60:\n self.__price += 35\n elif self._screen_rate == 120:\n self.__price += 55\n elif self._screen_rate == 144:\n self.__price += 75\n\n def __cpu_price(self):\n if self._cpu not in [\"Ryzen 7\", \"Intel i7\", \"Ryzen 9\", \"Intel i9\"]:\n raise TypeError(\"Error! Check your data and try again!\")\n elif self._cpu == \"Ryzen 7\":\n self.__price += 50\n elif self._cpu == \"Intel i7\":\n self.__price += 100\n elif self._cpu == \"Ryzen 9\":\n self.__price += 150\n elif self._cpu == \"Intel i9\":\n self.__price += 200\n\n def __gpu_price(self):\n if self._gpu not in [8, 16, 32]:\n raise TypeError(\"Error! Check your data and try again!\")\n elif self._gpu == 8:\n self.__price += 75\n elif self._gpu == 16:\n self.__price += 150\n elif self._gpu == 32:\n self.__price += 300\n\n def __ssd_price(self):\n if self._ssd not in [512, 1024, 2048, 4096]:\n raise TypeError(\"Error! Check your data and try again!\")\n elif self._ssd == 512:\n self.__price += 45\n elif self._ssd == 1024:\n self.__price += 75\n elif self._ssd == 2048:\n self.__price += 105\n elif self._ssd == 4096:\n self.__price += 105\n\n def __ram_price(self):\n if self._ram not in [8, 16, 32, 64]:\n raise TypeError(\"Error! Check your data and try again!\")\n elif self._ram == 8:\n self.__price += 20\n elif self._ram == 16:\n self.__price += 40\n elif self._ram == 32:\n self.__price += 60\n elif self._ram == 64:\n self.__price += 80\n\n def __add_logo(self):\n if self._logo not in [True, False]:\n raise TypeError(\"Error! Check your data and try again!\")\n elif self._logo:\n self.__price += 15\n\n def __os_price(self):\n if self._operating_system not in [\"DOS\", \"Windows 11\"]:\n raise TypeError(\"Error! Check your data and try again!\")\n elif self._operating_system == \"Windows 11\":\n self.__price += 35\n\n @property\n def price(self):\n self.__model_price()\n self.__screen_diagonal_price()\n self.__screen_rate_price()\n self.__cpu_price()\n self.__gpu_price()\n self.__ssd_price()\n self.__ram_price()\n self.__add_logo()\n self.__os_price()\n return f\"Your laptop will be cost: {self.__price} $\\n\"\n\n @staticmethod\n def help():\n print(\"Create your own notebook by entering the parameters from the following:\\n\"\n \"Model: TUF Gaming, Rog, ProArt\\n\"\n \"Screen diagonal: 13.4, 15, 16\\n\"\n \"Screen rate: 60, 120, 144\\n\"\n \"CPU: Ryzen 7, Intel i7, Ryzen 9, Intel i9\\n\"\n \"GPU: 8, 16, 32\\n\"\n \"SSD: 512, 1024, 2048, 4096\\n\"\n \"RAM: 8, 16, 32, 64\\n\"\n \"Add Logo: True or False\\n\"\n \"Operating system: DOS, Windows 11\\n\")\n\n\nif __name__ == '__main__':\n first = Asus(\"TUF Gaming\", 15, 120, \"Intel i7\", 16, 2048, 32, False, \"DOS\")\n first.help()\n print(first.price)\n","repo_name":"Benedict3141592/Hillel_Homework_Groshev","sub_path":"HW_Lesson_10/Asus.py","file_name":"Asus.py","file_ext":"py","file_size_in_byte":4559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"43048310142","text":"'''\nEvaluate the value of an arithmetic expression in Reverse Polish Notation.\n\nValid operators are +, -, *, /. Each operand may be an integer or another expression.\n\nNote:\n\nDivision between two integers should truncate toward zero.\nThe given RPN expression is always valid. That means the expression would always evaluate to a result and there won't be any divide by zero operation.\n'''\n\n\n#\n# @lc app=leetcode id=150 lang=python3\n#\n# [150] Evaluate Reverse Polish Notation\n#\n\n# @lc code=start\nfrom collections import deque\n\n\nclass Solution:\n def evalRPN(self, tokens: List[str]) -> int:\n result = None\n q = deque([])\n for i in range(len(tokens)):\n if tokens[i] == \"+\":\n first = int(q.pop())\n second = int(q.pop())\n result = first + second\n q.append(result)\n\n elif tokens[i] == \"-\":\n first = int(q.pop())\n second = int(q.pop())\n result = second - first\n q.append(result)\n\n elif tokens[i] == \"*\":\n first = int(q.pop())\n second = int(q.pop())\n result = first * second\n q.append(result)\n\n elif tokens[i] == \"/\":\n first = int(q.pop())\n second = int(q.pop())\n if first * second < 0 and second % first != 0: # critical: differentiate when negative\n result = second // first + 1 # truncted toward zero\n else:\n result = second // first\n q.append(result)\n\n else:\n q.append(tokens[i])\n\n if result == None: # corner case, no operator found\n return int(tokens[-1])\n return result\n\n # @lc code=end\n\n '''\n [\"3\",\"11\",\"5\",\"+\",\"-\"] expected: -13\n [\"3\",\"11\",\"+\",\"5\",\"-\"] expected: 9\n [\"4\",\"-2\",\"/\",\"2\",\"-3\",\"-\",\"-\"] expected: -7\n\n (1) result also to be appended\n (2) a / b when negative AND non-integer will be farther from zero; when positive closer to zero\n '''\n","repo_name":"joyceyu6/coding_courses","sub_path":"2021_4_19_to372/Stack_150.evaluate-reverse-polish-notation.py","file_name":"Stack_150.evaluate-reverse-polish-notation.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"31848710254","text":"import sys\n\ninput_num = sys.stdin.readline\n\nnum = int(input_num())\na = [0]*num\n\nfor i in range(num):\n a[i] = int(input_num())\n\na.sort()\n\nprint(round(sum(a)/num))\nprint(a[int(num-1)//2])\nc = dict()\n\n \ncount = 1\n\nfor i in range(1, num+1):\n if(i 1:\n lih.sort()\n print(lih[1])\nelse:\n print(lih[0])\nprint(a[-1]-a[0])\n# for _ in range(num):\n# j = int(input_num())\n# print(f\"{a}+//+{j}\")\n# if (j < 0):\n# a[j+9000] += 1 \n# else:\n# a[j] += 1\n# for i in range(a):\n# if(a[i] > 5000):\n# b[i] = \n \n# print(sum(b)/num)\n# if(num%2==0):\n# print(b[num//2])\n# else:\n# print(b[num//2]+1)\n# print(a.indx(max(a)))\n# print(max(a))\n\n# for i in range(num):\n# j = int(input_num())\n# if(j<0):\n# j+=9000\n# a[i] = j\n\n# m = max(a)\n# b = [0]*(max(a)+1)\n# result = [0]*num\n# for i in range(a):\n# b[a[i]] += 1\n# for i in range(1, m+1):\n# b[i] += b[i-1]\n# for i in range(b):\n# result[] \n\n\n \n\n","repo_name":"twkim96/Baekjoon","sub_path":"2108_통계학.py","file_name":"2108_통계학.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"10006918934","text":"#!/usr/bin/env python\n\n# Author: Epihaius\n# Date: 2019-09-23\n# Last revision: 2020-10-08\n#\n# This is a basic example of how to use the sizer-based GUI system.\n# It specifically showcases how to handle a DirectScrolledList.\n\nfrom panda3d.core import *\nfrom direct.showbase.ShowBase import ShowBase\nfrom direct.gui.DirectGui import *\nfrom gui import *\n\n\nclass MyApp:\n\n def __init__(self):\n\n # initialize the Panda3D showbase\n self.showbase = showbase = ShowBase()\n\n # the root node of all DirectGui widgets needs to be pixel2d in order to work\n # with the automatic layout system\n gui_root = showbase.pixel2d\n\n # initialize the GUI system\n self.gui = gui = GUI(showbase)\n\n # Build the GUI layout\n\n # add a horizontally expanding title bar\n title = \"Panda3D: scrolled list layout example\"\n label = DirectLabel(parent=gui_root, text=title, frameSize=(0, 0, -20, 30),\n text_scale=20, borderWidth=(6, 6), relief=DGG.SUNKEN)\n widget = Widget(label)\n borders = (10, 10, 20, 10)\n # by default, the title bar will take up all of the width and height of its\n # cell (the default value for the `alignments` parameter of the `Sizer.add`\n # method is `(\"expand\", \"expand\")`), but the cell itself still needs to be\n # able to take up the entire width of the window; this is done by setting\n # the horizontal proportion (which gets applied to the cell's column) to a\n # value bigger than zero\n gui.sizer.add(widget, proportions=(1., 0.), borders=borders)\n\n # add a horizontally growable sizer that will be proportionately resized\n # vertically and horizontally\n sizer = Sizer(\"horizontal\")\n borders = (10, 10, 20, 10)\n gui.sizer.add(sizer, proportions=(1., 1.), borders=borders)\n\n # add a vertically growable subsizer to the previous sizer;\n # set the vertical gap between each two of its cells to 10 pixels\n # (this is a more convenient alternative to setting the same borders\n # for all but the last of its cells, i.e. `(0, 0, 10, 0)`)\n btn_sizer = Sizer(\"vertical\", gaps=(0, 10))\n borders = (0, 20, 0, 0)\n sizer.add(btn_sizer, borders=borders)\n\n # add a couple of horizontally expanding buttons to the subsizer;\n # they will have the same width, determined by the initially largest button\n text = \"My Button\"\n button = DirectButton(parent=gui_root, text=text, text_scale=20, borderWidth=(2, 2))\n widget = Widget(button)\n btn_sizer.add(widget)\n text = \"Insert list into frame\"\n button = DirectButton(parent=gui_root, text=text, text_scale=20,\n borderWidth=(2, 2), command=self.__insert_list)\n widget = Widget(button)\n btn_sizer.add(widget)\n # add vertical space with a fixed size\n btn_sizer.add((0, 30))\n text = \"A third button\"\n button = DirectButton(parent=gui_root, text=text, text_scale=20, borderWidth=(2, 2))\n widget = Widget(button)\n btn_sizer.add(widget)\n\n # add some horizontally stretching space, so that widgets added after it\n # will be pushed to the right\n sizer.add((0, 0), proportions=(1., 0.))\n\n # add a frame resizable in both directions and taking up two thirds of\n # the available horizontal space (because of the ratio of the proportions\n # used for the frame and the stretching space that was previously added)\n self.frame = frame = DirectFrame(parent=gui_root, frameColor=(.5, .6, .7, 1.))\n widget = Widget(frame)\n sizer.add(widget, proportions=(2., 1.))\n\n # assign a sizer to the frame to manage the layout of its child widgets\n self.frame_sizer = frame_sizer = Sizer(\"vertical\")\n widget.sizer = frame_sizer\n\n # add a horizontally expanding label with right-aligned text to the frame\n text = \"right-aligned text\"\n label = DirectLabel(parent=frame, text=text,\n text_scale=20, text_align=TextNode.A_right)\n widget = Widget(label)\n borders = (10, 10, 20, 10)\n frame_sizer.add(widget, proportions=(1., 0.), borders=borders)\n\n # add a non-resizing, right-aligned button to the frame\n text = \"Button in frame \"\n button = DirectButton(parent=frame, text=text, text_scale=20, borderWidth=(2, 2))\n widget = Widget(button)\n borders = (0, 10, 10, 20)\n frame_sizer.add(widget, alignments=(\"max\", \"min\"), borders=borders)\n\n # add a non-resizing input field, centered horizontally within its cell,\n # which itself is assigned all of the width available to it, by setting\n # its horizontal proportion to 1.0\n field = DirectEntry(parent=gui_root, text_scale=20, focus=1)\n widget = Widget(field)\n gui.sizer.add(widget, proportions=(1., 0.), alignments=(\"center\", \"min\"))\n\n # add a horizontally expanding status bar\n status_text = \"GUI ready and awaiting input\"\n label = DirectLabel(parent=gui_root, text=status_text, text_pos=(20, -10),\n textMayChange=1, frameSize=(0, 0, -10, 10), text_scale=20,\n text_align=TextNode.A_left)\n widget = Widget(label)\n borders = (10, 10, 10, 20)\n gui.sizer.add(widget, proportions=(1., 0.), borders=borders)\n\n # let the GUI system create the layout\n gui.layout()\n\n # run the app\n showbase.run()\n\n def __insert_list(self):\n\n scrolled_list = DirectScrolledList(\n parent=self.frame,\n\n decButton_text=\"Dec\",\n decButton_text_scale=20,\n decButton_borderWidth=(4, 4),\n\n incButton_text=\"Inc\",\n incButton_text_scale=20,\n incButton_borderWidth=(4, 4),\n\n frameColor=(1., 0., 0., .5),\n forceHeight=29 # item height\n )\n\n list_widget = ScrolledListWidget(\n scrolled_list,\n # the width of the scroll buttons will take up 20 % of the available\n # space, since they are surrounded by space that is stretched using\n # a proportion of 1.: .25 / (1. + .25) = 1/5 = .2\n scrollbtn_proportion=.25,\n scrollbtn_borders=(5, 5, 10, 10),\n itemframe_borders=(5, 5, 0, 0),\n margins=(10, 10) # left and right borders around the items in the frame\n )\n\n b1 = DirectButton(text=(\"Button1\", \"click!\", \"roll\", \"disabled\"),\n borderWidth=(4, 4), relief=2, text_scale=20)\n\n b2 = DirectButton(text=(\"Feel free to remove me\", \"Goodbye!\",\n \"Yeah I'm still here\", \"Not now\"), borderWidth=(4, 4), relief=2,\n text_scale=20, command=lambda: self.__remove_item(list_widget, b2))\n\n list_widget.add_item(b1)\n list_widget.add_item(b2)\n\n checkbtn = DirectCheckButton(text=\"CheckButton\",\n text_scale=20, boxPlacement=\"right\", borderWidth=(3, 3), indicator_text_scale=20,\n indicator_text_pos=(0, 4), indicator_borderWidth=(2, 2), boxBorder=1)\n list_widget.add_item(checkbtn)\n\n l1 = DirectLabel(text=\"Test1\", text_scale=20)\n l2 = DirectLabel(text=\"Test2\", text_scale=20)\n l3 = DirectLabel(text=\"Test3\", text_scale=20)\n\n list_widget.add_item(l1)\n list_widget.add_item(l2)\n list_widget.add_item(l3)\n\n for fruit in ['apple', 'pear', 'banana', 'orange']:\n l = DirectLabel(text=fruit, text_scale=20)\n list_widget.add_item(l)\n\n borders = (10, 10, 5, 5)\n # add the list to the frame, below the right-aligned text label, using index=1\n self.frame_sizer.add(list_widget, proportions=(1., 1.), borders=borders, index=1)\n\n # update the GUI layout\n self.gui.layout()\n\n def __remove_item(self, list_widget, item):\n\n list_widget.remove_item(item)\n\n # update the GUI layout\n self.gui.layout()\n\n\nMyApp()\n","repo_name":"Epihaius/DirectGui-layout-system","sub_path":"scrolled_list.py","file_name":"scrolled_list.py","file_ext":"py","file_size_in_byte":7979,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"}
+{"seq_id":"22675022084","text":"import argparse\nfrom text_mutation_generation import *\nimport pandas as pd\nimport traceback\nimport re\nfrom tqdm import tqdm\nfrom typing import List, Tuple\n\n\ndef getSentencesToBeProcessed(input_file: str) -> List[str]:\n \"\"\"\n This function will read from the input_file path passed in and return\n a list of all the sentences to be read. Each sentence will have whitespace\n stripped from the end.\n\n Parameters\n ----------\n input_file : str\n The path to the file containing a list of sentences we are interested\n in processing. The file should contain one sentence on each line.\n\n Returns\n -------\n List[str]\n A list of all the sentences in the input file..\n \"\"\"\n with open(input_file, 'r') as f:\n allSentences = []\n for line in f:\n allSentences.append(line.strip())\n\n return allSentences\n\n\ndef getCleanedSentences(sentences: List[str], goal_length: int) -> Tuple[List[str], List[str]]:\n \"\"\"\n This function will take a list of sentences and return two lists. The first\n list will contain goal_length sentences that hav ebeen cleaned for processing,\n while the second list will contain the sentences that will not be ready for\n processing.\n\n Parameters\n ----------\n sentences : List[str]\n A list of sentences to be cleaned.\n goal_length : int\n The length of the sentences we want to return.\n\n Returns\n -------\n Tuple[List[str], List[str]]\n A tuple containing two lists. The first list contains the goal_length\n sentences that have been cleaned for processing, while the second list\n contains the sentences that will not be ready for processing.\n \"\"\"\n print(f\"Getting {goal_length} clean sentences for processing.\")\n cleanedSentences = []\n\n while len(cleanedSentences) < goal_length and len(sentences) > 0:\n currentClean = re.sub(\n \"-*\\s*\\([A-Z][A-Z]\\)\\s*\", \"\", sentences.pop(0).strip()).strip()\n currentClean = re.sub(\"\\s*-\\s\", \" \", currentClean).strip()\n if re.search(\"[0-9]+.\", currentClean) or currentClean.isspace() or len(currentClean) == 0:\n continue\n cleanedSentences.append(currentClean)\n\n assert len(\n cleanedSentences) == goal_length, f'Expected number of cleaned sentences to be {goal_length}, got {len(cleanedSentences)}'\n\n return cleanedSentences, sentences\n\n\ndef processSentences(sentences: List[str], output_file_name: str, input_file: str, max_length: int = 1000) -> None:\n \"\"\"\n This function will process the sentences in the list and write them to 3 output\n files. One output file will contain sentences with substitution mutations,\n one will contain sentences with deletion mutations, and one will contain\n sentences with repetition mutations. The naming convention for the output files\n will be(output_file_name + \"_mutation.csv\"). The output files will contain\n 4 columns: original text, mutated texts and the index tags and mutation tags.\n\n Parameters\n ----------\n sentences: List[str]\n A list of sentences to be processed.\n output_file_name: str\n The name of the output file.\n input_file: str\n The path to the input file. We will rewrie this file with all of\n\n\n \"\"\"\n cols = ['original_text', 'mutated_text', 'index_tags', 'mutation_tags']\n\n try:\n subs_df = pd.read_csv(output_file_name + \"_substitutions.csv\")\n dels_df = pd.read_csv(output_file_name + \"_deletions.csv\")\n reps_df = pd.read_csv(output_file_name + \"_repetitions.csv\")\n except FileNotFoundError:\n print(\"No output file found. Creating new output file.\")\n subs_df = pd.DataFrame(\n columns=cols)\n dels_df = pd.DataFrame(\n columns=cols)\n reps_df = pd.DataFrame(\n columns=cols)\n\n # Get max_length sentences to process. We need them to be clean ones.\n cleanedSentences, notCleanedSentences = getCleanedSentences(\n sentences, max_length)\n\n with open(input_file, 'w') as f:\n for sentence in tqdm(notCleanedSentences, desc='Writing remaining sentences back to input file.'):\n f.write(sentence + '\\n')\n\n for cleanedSentence in tqdm(cleanedSentences, desc='Mutating Sentences'):\n try:\n # Keep Generating Deletion Permutations Until we get one\n # with at least one valid word.\n del_tags = []\n while 'O' not in del_tags:\n new_sentence_del, del_index_tags, del_tags = mutate_selectively(\n cleanedSentence, \"del\", del_prob=0.2, remove_punc=False)\n dels_df = pd.concat([dels_df, pd.DataFrame(\n {'original_text': [cleanedSentence], 'mutated_text': [new_sentence_del], 'index_tags': [del_index_tags], 'mutated_tags': [del_tags]})])\n\n # Get Mutated Sentence with Repetititon\n new_sentence_rep, rep_index_tags, rep_tags = mutate_selectively(\n cleanedSentence, \"rep\", rep_prob=0.2, max_reps=3, remove_punc=False)\n reps_df = pd.concat([reps_df, pd.DataFrame(\n {'original_text': [cleanedSentence], 'mutated_text': [new_sentence_rep], 'index_tags': [rep_index_tags], 'mutated_tags': [rep_tags]})])\n\n # Get Mutated Sentence with Substitutions\n new_sentence_subs, sub_index_tags, sub_tags = mutate_selectively(\n cleanedSentence, \"sub\", sub_prob=0.2, remove_punc=False)\n\n # Check that the number of words in new_sentence_subs is the same\n # as the number of tags in sub_tags\n if len(new_sentence_subs.split()) != len(sub_index_tags):\n raise Exception(\n \"Number of words in new_sentence_subs and sub_tags do not match.\")\n\n subs_df = pd.concat([subs_df, pd.DataFrame(\n {'original_text': [cleanedSentence], 'mutated_text': [new_sentence_subs], 'index_tags': [sub_index_tags], 'mutated_tags': [sub_tags]})])\n except IndexError:\n traceback.print_exc()\n print(sentence)\n continue\n\n subs_df.to_csv(output_file_name + \"_substitutions.csv\",\n index=False)\n dels_df.to_csv(output_file_name + \"_deletions.csv\",\n index=False)\n reps_df.to_csv(output_file_name + \"_repetitions.csv\",\n index=False)\n\n\nif __name__ == '__main__':\n # Arg Parsing. This lets us go from the command line.\n # Demo: python create_training_data.py --input_file data/unprocessedSentences.txt --output_file data/output.txt --max_length=5\n parser = argparse.ArgumentParser(\n description='Create training data for the repetition task.')\n parser.add_argument('--input_file', type=str,\n help='Path to the input file.', required=True)\n parser.add_argument('--output_file', type=str,\n help='Path to the output file.', required=True)\n parser.add_argument('--max_length', type=int,\n help='Maximum number of lines to parse.', default=None)\n\n args = parser.parse_args()\n sentences = getSentencesToBeProcessed(args.input_file)\n processed_sentences = processSentences(\n sentences, args.output_file, args.input_file, args.max_length)\n","repo_name":"cehrett/running_records","sub_path":"repetition_data_generation/create_text_training_data.py","file_name":"create_text_training_data.py","file_ext":"py","file_size_in_byte":7268,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"}
+{"seq_id":"27570501975","text":"import os\nimport cv2\nimport yaml\nimport pdfkit\nimport base64\nimport subprocess as sp\nimport json\n\nfrom flask import current_app, render_template\n\n\ndef exif_info(input_video_path, output_dir):\n exif_file_path = f\"{output_dir}/exif.json\"\n exif_tool_cmd = [\"exiftool\", \"-j\", input_video_path,\n \">>\", exif_file_path]\n\n output = sp.run(exif_tool_cmd, capture_output=True)\n json_dict = json.loads(output.stdout.decode(\"utf-8\"))[0]\n\n exif_dict = {}\n key_list = [\"FileType\", \"Duration\", \"FileSize\", \"BitDepth\", \"VideoFrameRate\", \"Rotation\", \"XResolution\", \"YResolution\"] \n\n for key in json_dict:\n if key in key_list:\n exif_dict[key] = json_dict[key]\n\n with open(exif_file_path, 'w') as file:\n file.write(json.dumps(exif_dict, indent=2))\n file.close()\n\n return exif_file_path\n\n\ndef extract_frames_from_video(\n input_video_path, \n output_frames_path, \n fps, \n quality):\n convertCMD = [\"ffmpeg\", '-i', input_video_path,\n '-vf', f'fps={fps}', '-qscale:v', str(quality), output_frames_path]\n\n proc = sp.Popen(convertCMD)\n\n try:\n outs, errs = proc.communicate()\n except TimeoutError:\n proc.kill()\n\n\ndef generate_panorama_img(frames_dir, output_dir, file_name):\n image_names = os.listdir(frames_dir)\n images = []\n\n for image_name in image_names:\n img = cv2.imread(f\"{frames_dir}/{image_name}\")\n img = cv2.resize(img, (0, 0), None, 0.2, 0.2)\n images.append(img)\n\n stitcher = cv2.Stitcher.create(cv2.STITCHER_PANORAMA)\n # stitcher.setPanoConfidenceThresh(0.0)\n\n (status, result) = stitcher.stitch(images)\n\n error = ''\n if status == cv2.STITCHER_OK:\n print(\"Panorama success\")\n cv2.imwrite(f\"{output_dir}/{file_name}\", result)\n else:\n if status == cv2.STITCHER_ERR_NEED_MORE_IMGS:\n error = \"Need more images - ERR_NEED_MORE_IMGS\"\n elif status == cv2.STITCHER_ERR_HOMOGRAPHY_EST_FAIL:\n error = \"Failed - ERR_NEED_MORE_IMGS\"\n elif status == cv2.STITCHER_ERR_CAMERA_PARAMS_ADJUST_FAIL:\n error = \"Failed - STITCHER_ERR_CAMERA_PARAMS_ADJUST_FAIL\"\n\n return error\n\ndef generate_yaml_params(params_dict, folder_name):\n yaml_file_path = f\"{current_app.config['OUTPUT_FOLDER']}/{folder_name}/params.yaml\"\n\n with open(yaml_file_path, 'w') as file:\n documents = yaml.dump(params_dict, file, sort_keys=False)\n\n return yaml_file_path\n\n\ndef image_file_path_to_base64_string(filepath: str) -> str:\n with open(filepath, 'rb') as f:\n return base64.b64encode(f.read()).decode()\n\n\ndef generate_pdf_report(\n panorama_img_path,\n folder_name,\n original_uploaded_file_name,\n param_frame_rate,\n param_output_format,\n param_quality,\n param_is_exif_info_captured,\n created_at):\n exif_json = f\"{current_app.config['OUTPUT_FOLDER']}/{folder_name}/exif.json\"\n exif_json_dict = {}\n if param_is_exif_info_captured:\n with open(exif_json, 'r') as file:\n exif_json_dict = json.loads(file.read())\n\n key_list = [\"FileType\", \"Duration\", \"FileSize\", \"BitDepth\", \"VideoFrameRate\", \"Rotation\", \"XResolution\", \"YResolution\"]\n rendered = render_template(\n \"report_template.html\",\n img_string=image_file_path_to_base64_string(panorama_img_path),\n logo=image_file_path_to_base64_string(\"templates/pg-icon.png\"),\n original_uploaded_file_name=original_uploaded_file_name,\n param_frame_rate=param_frame_rate,\n param_output_format=param_output_format,\n param_is_exif_info_captured=param_is_exif_info_captured,\n param_quality=param_quality,\n exif_json_dict=exif_json_dict,\n key_list=key_list,\n created_at=created_at)\n pdf = pdfkit.from_string(\n rendered, f\"{current_app.config['OUTPUT_FOLDER']}/{folder_name}/report.pdf\")\n","repo_name":"saravanaselvan/pg-video-convertor-api","sub_path":"services/video_report/video_report.py","file_name":"video_report.py","file_ext":"py","file_size_in_byte":3929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"800439824","text":"from test import testEqual\ndef longest_substring(s):\n '''\n Function that will find the longest substring in alphabetical order\n '''\n start = 0\n end = 1\n new_start = 0\n new_end = 1\n max_len = 1\n \n for i in range(len(s)-1):\n if s[i]>s[i+1]:\n if (new_end - new_start) > max_len:\n max_len = (new_end - new_start)+1\n start = new_start\n end = new_end\n new_start = i+1\n new_end = i+2\n else:\n new_end +=1\n if (new_end - new_start) > max_len:\n max_len = (new_end - new_start)+1\n start = new_start\n end = new_end\n\n return s[start:end]\n\ns = 'azcbobobegghakl' \ntestEqual(longest_substring(s),'beggh')\ntestEqual(longest_substring('c'),'c')\ntestEqual(longest_substring('ahtajkybcdeg'),'bcdeg')\n\n\n\n","repo_name":"banarasi/codingProblems","sub_path":"longestSubString.py","file_name":"longestSubString.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"12332569353","text":"from ..parser import load_file\nfrom . import paths\nfrom .paths import (\n key_sets,\n UoD,\n all_paths,\n possibilities,\n sources,\n known,\n)\nimport sys\nfrom ..commands import register_commands\n\n\ndef handle_refinement(path, Q, P, verbose=False):\n \"\"\"\n Given a specification file and protocol names Q and P, check whether Q refines P\n\n Args:\n path: File containing protocols\n Q: Name of protocol that should refine P\n P: Name of protocol that should be refined by Q\n verbose: Print more details\n \"\"\"\n spec = load_file(path)\n Q = spec.protocols[Q]\n P = spec.protocols[P]\n\n result = refines(UoD(), P.public_parameters.keys(), Q, P, verbose=verbose)\n if result[\"ok\"] == True:\n print(\" {} Refines {}\".format(Q.name, P.name))\n return True\n else:\n print(result)\n return False\n\n\nregister_commands({\"refinement\": handle_refinement})\n\n\ndef subsumes(U, params, a, b, verbose=False):\n \"\"\"Path a subsumes path b\"\"\"\n if verbose:\n print(\"path a: \", a)\n print(\"path b: \", b)\n for p in params:\n sources_a = sources(a, p)\n sources_b = sources(b, p)\n if sources_a != sources_b:\n if verbose:\n print(\"sources don't match: {} != {}\".format(sources_a, sources_b))\n return False\n\n for r in U.roles:\n for keys in key_sets(a):\n if verbose:\n print(keys)\n known_a = known(a, keys, r).intersection(params)\n known_b = known(b, keys, r).intersection(params)\n if known_a != known_b:\n if verbose:\n print(\n \"{}'s knowledge doesn't match: {} != {}\".format(\n r.name, known_a, known_b\n )\n )\n return False\n elif verbose:\n print(\"{} knows: {}\".format(r.name, known_a))\n if len(b) > 1:\n b2 = b[:-1]\n return any(subsumes(U, params, a[:end], b2, verbose) for end in range(len(a)))\n else:\n return True\n\n\ndef refines(U, params, Q, P, verbose=False):\n \"\"\"Check that Q refines P\"\"\"\n\n U_Q = U + UoD.from_protocol(Q)\n U_P = U + UoD.from_protocol(P)\n\n p_keys = set()\n q_keys = set()\n for m in U_P.messages:\n p_keys.update(m.keys)\n for m in U_Q.messages:\n q_keys.update(m.keys)\n if not p_keys >= q_keys:\n return {\n \"ok\": False,\n \"p_keys\": p_keys,\n \"q_keys\": q_keys,\n \"diff\": p_keys.symmetric_difference(q_keys),\n \"reason\": \"{} uses keys that do not appear in {}\".format(Q.name, P.name),\n }\n\n paths_Q = all_paths(U_Q, verbose=verbose, reduction=False)\n paths_P = all_paths(U_P, verbose=verbose, reduction=False)\n\n longest_Q = longest_P = []\n for q in paths_Q:\n if len(q) > len(longest_Q):\n longest_Q = q\n for p in paths_P:\n if len(p) > len(longest_P):\n longest_P = p\n\n if verbose:\n print(\"{}: {} paths, longest path: {}\".format(P.name, len(paths_P), longest_P))\n # print(paths_P)\n print(\"{}: {} paths, longest path: {}\".format(Q.name, len(paths_Q), longest_Q))\n # print(paths_Q)\n\n checked = 0\n for q in paths_Q:\n # print(\"q: \", q)\n match = None\n for p in paths_P:\n # print(\"p: \", p)\n if subsumes(U_P, params, q, p, False):\n match = p\n # print(\"p branches: \", branches(U_P, p))\n # print(\"q branches: \", branches(U_Q, q))\n if not possibilities(U_P, p) or possibilities(U_Q, q):\n break # only try again if p has branches but q doesn't\n if match == None:\n return {\n \"ok\": False,\n \"path\": q,\n \"reason\": \"{} has path that does not subsume any path in {}\".format(\n Q.name, P.name\n ),\n }\n if possibilities(U_P, match) and not possibilities(U_Q, q):\n # subsumes(U_P, params, q, match, True)\n return {\n \"ok\": False,\n \"path\": q,\n \"match\": match,\n \"reason\": \"path in {} has branches, but path in {} does not\".format(\n P.name, Q.name\n ),\n }\n checked += 1\n if verbose:\n print(\n \"\\r checked: {} of {} paths ({:.1f}%)\".format(\n checked, len(paths_Q), checked / len(paths_Q) * 100\n ),\n end=\"\",\n )\n if verbose:\n print()\n return {\"ok\": True}\n","repo_name":"SimonHolzknecht/Masters_Dissertation","sub_path":"src/bspl/verification/refinement.py","file_name":"refinement.py","file_ext":"py","file_size_in_byte":4681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"15635757542","text":"from django.shortcuts import render, redirect\nfrom projects.models import Project\nfrom projects.forms import ProjectForm\nfrom django.contrib.auth.decorators import login_required\n\n\n@login_required\ndef list_projects(request):\n projects_list = Project.objects.filter(members=request.user)\n context = {\n \"projects_list\": projects_list,\n }\n return render(request, \"projects/list.html\", context)\n\n\n@login_required\ndef show_project(request, pk):\n project_detail = Project.objects.get(pk=pk)\n context = {\n \"project_detail\": project_detail,\n }\n return render(request, \"projects/detail.html\", context)\n\n\n@login_required\ndef create_project(request):\n if request.method == \"POST\":\n form = ProjectForm(request.POST)\n if form.is_valid():\n project = form.save()\n return redirect(\"show_project\", pk=project.id)\n else:\n form = ProjectForm()\n\n context = {\"form\": form}\n\n return render(request, \"projects/create.html\", context)\n","repo_name":"kariscourey/hr-project-alpha","sub_path":"projects/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"13814154103","text":"'''\nN개 자연수의 자릿수의 합을 구하고, 그 합이 최대인 자연수를 출력 하는 프로그램을 작성하시오.\n \n** details\n첫 줄에 자연수의 개수 N(3<=N<=100)이 주어지고, 그 다음 줄에 N개의 자연수가 주어진다.\n각 자연수의 크기는 10,000,000를 넘지 않는다.\n\n** input\n3\n125 15232 97\n\n**output\n97\n(9+7=16으로 최대이므로 출력)\n'''\n\n# make func : digit_sum\n# 방법_1 : int를 활용\ndef digit_sum(number):\n sum = 0\n\n while number > 0:\n sum += number%10\n number = number//10\n\n return sum\n\n\n# 방법_2 : string을 활용\ndef digit_sum_by_str(strNumber):\n sum = 0\n\n for i in str(strNumber):\n sum+=int(i)\n\n return sum\n\n# --------------------------------------------\n\nnInput = int(input())\nInputNumList = list(map(int, input().split()))\n\nmaxSum = -1\nmaxNumber = -1\n\nfor number in InputNumList:\n digitSum = digit_sum(number)\n if digitSum > maxSum:\n maxSum = digitSum\n maxNumber = number\n\nprint(maxNumber)\n","repo_name":"donggyuu/algorithm","sub_path":"algorithm-python/chp1_sum_of_digit.py","file_name":"chp1_sum_of_digit.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"9942038893","text":"import sys\nimport random as rng\nimport numpy as np\n\nboard = [[0]*3]*3\nboard = np.array(board)\ntur = 2\n\n\ndef winner(board,a):\n board_check_1 = board == a\n row = np.any(np.sum(board_check_1,axis = 0) == 3)\n column = np.any(np.sum(board_check_1,axis = 1) == 3)\n diag = np.trace(board_check_1) == 3\n off_diag = np.trace(np.flip(board_check_1,1)) == 3\n if row or column or diag or off_diag == True:\n return a\n elif np.all(board != 0):\n return 'Tie'\n\ndef cheater(i,j):\n if board[i,j] != 0:\n return True\n return False\n\ni = 0\nj = 0\n\ndef player1(board,tur):\n if np.all(board == 0):\n return rng.randint(0,2),rng.randint(0,2)\n for i in [2,1,0]:\n for j in [2,1,0]:\n if cheater(i,j) == False:\n return i,j\n\ndef player2(board,tur):\n if np.all(board == 0):\n return 0,0\n board_check_1 = board == 1\n row = np.any(np.sum(board_check_1,axis = 0) == 2)\n column = np.any(np.sum(board_check_1,axis = 1) == 2)\n diag = np.trace(board_check_1) == 2\n off_diag = np.trace(np.flip(board_check_1,1)) == 2\n if row or column or diag or off_diag == True:\n return a\n \n\ndef The_game(player1,player2):\n tur = 1\n while winner(board,1) == None and winner(board,2) == None:\n if tur ==1:\n tur = 2\n elif tur == 2:\n tur = 1\n if tur == 1:\n i,j = player1(board,tur)\n if tur == 2:\n i,j = player2(board,tur)\n\n if cheater(i,j) == True:\n return tur, False\n board[i,j] = tur\n print(board)\n if winner(board,1) == True:\n return 1, True\n elif winner(board,2) == True:\n return 2, True\n return 0, True\n \n\nprint(The_game(player1,player1))\n","repo_name":"JonathanMelcher/UTTT","sub_path":"Tic_tac_toe/The_arena_ttt.py","file_name":"The_arena_ttt.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"8737994820","text":"# Imports\r\nimport pygame\r\nimport random\r\n\r\n# Initialize game engine\r\npygame.init()\r\n\r\n\r\n# Window\r\n\r\nWIDTH = 1600\r\nHEIGHT = 1000\r\nSIZE = (WIDTH, HEIGHT)\r\nTITLE = \"Wonder Woman & The Battle For The Universe\"\r\nscreen = pygame.display.set_mode(SIZE)\r\npygame.display.set_caption(TITLE)\r\n\r\n\r\n# Timer\r\nclock = pygame.time.Clock()\r\nrefresh_rate = 60\r\n\r\n\r\n# Colors\r\nRED = (255, 0, 0)\r\nWHITE = (255, 255, 255)\r\nBLACK = (0, 0, 0)\r\nYELLOW = (255, 255, 0)\r\nGREEN = (100, 255, 100)\r\nBLUE = (30, 112, 219)\r\n\r\n\r\n# Fonts\r\nFont = pygame.font.Font\r\nFONT_SM = Font(None, 24)\r\nFONT_MD = Font(None, 32)\r\nFONT_LG = Font(None, 64)\r\nFONT_XL = Font(\"assets/fonts/spacerangerboldital.ttf\", 96)\r\ncomic_font = Font(\"assets/fonts/comic_font.ttf\", 165)\r\nbattle_font = Font(\"assets/fonts/battle_font.ttf\", 98)\r\ncomic2_font = Font(\"assets/fonts/comic_font.ttf\", 80)\r\njedi_font = Font(\"assets/fonts/Starjedi.ttf\", 35)\r\n\r\n# Images\r\nload = pygame.image.load\r\nwonder_woman_img = load(\"assets/images/wonder_woman.png\").convert_alpha()\r\nlaser_img = load(\"assets/images/laserGreen.png\").convert_alpha()\r\nenemy_img = load(\"assets/images/shipYellow_manned.png\").convert_alpha()\r\nenemy2_img = load(\"assets/images/shipPink_manned.png\").convert_alpha()\r\nenemy3_img = load(\"assets/images/shipGreen_manned.png\").convert_alpha()\r\nbomb_img = load(\"assets/images/laserRed.png\").convert_alpha()\r\ntitle_img = load(\"assets/images/wonder_woman_title.png\").convert_alpha()\r\nbackground = load(\"assets/images/space_background.png\").convert_alpha()\r\nwhole_background_img = load(\"assets/images/wonder_woman_background1.png\").convert_alpha()\r\nheart_img = load(\"assets/images/heart_img.png\").convert_alpha()\r\nalien_heart_img = load(\"assets/images/alien_life_img.png\").convert_alpha()\r\nlost_background_img = load(\"assets/images/lost_background.png\").convert_alpha()\r\nleft_wonder_woman_img = load(\"assets/images/left_wonder_woman.png\").convert_alpha()\r\nmega_mob_img = load(\"assets/images/small_mega_mob_img.gif\").convert()\r\nmega_bomb_img = load(\"assets/images/mega_bomb.png\").convert_alpha()\r\nwin_background_img = load(\"assets/images/win_background.png\").convert_alpha()\r\nbonus_img = load(\"assets/images/alien_life_img.png\").convert_alpha()\r\npink_damaged_img = load(\"assets/images/shipPink_damage.png\").convert_alpha()\r\ngreen_damaged_img = load(\"assets/images/shipGreen_damage2.png\").convert_alpha()\r\nyellow_damaged_img = load(\"assets/images/shipYellow_damage2.png\").convert_alpha()\r\n\r\n# Sounds\r\nsound = pygame.mixer.Sound\r\nEXPLOSION = sound('assets/sounds/explosion.ogg')\r\npew = sound('assets/sounds/pew.ogg')\r\nhurt = sound('assets/sounds/hurt.ogg')\r\n\r\nplaying_music = \"assets/sounds/background.ogg\"\r\n\r\nlost_music = \"assets/sounds/my_own_music.ogg\"\r\nstarting_music = \"assets/sounds/starting_music.ogg\"\r\nwinning_music = \"assets/sounds/winning_music.ogg\"\r\n\r\n\r\n# Stages\r\nSTART = 0\r\nPLAYING = 1\r\nLOST = 2\r\nWON = 3\r\nEND = 4\r\n\r\n# Game classes\r\nclass Wonder_woman(pygame.sprite.Sprite):\r\n def __init__(self, image):\r\n super().__init__()\r\n\r\n self.image = image\r\n self.mask = pygame.mask.from_surface(self.image)\r\n self.rect = image.get_rect()\r\n \r\n\r\n self.speed = 3\r\n\r\n def move_left(self):\r\n self.rect.x -= self.speed\r\n \r\n def move_right(self):\r\n self.rect.x += self.speed\r\n\r\n def shoot(self):\r\n print()\r\n print(\"Pew!\")\r\n\r\n laser = Laser(laser_img)\r\n laser.rect.centerx = self.rect.centerx\r\n laser.rect.centery = self.rect.top\r\n lasers.add(laser)\r\n \r\n def update(self):\r\n global stage\r\n '''check screen edges'''\r\n if self.rect.left < 0:\r\n self.rect.left = 0\r\n elif self.rect.right > WIDTH: \r\n self.rect.right = WIDTH\r\n\r\n '''check bombs'''\r\n hit_list = pygame.sprite.spritecollide(self, bombs, True, pygame.sprite.collide_mask)\r\n \r\n if len(hit_list) > 0:\r\n print()\r\n print(\"LOST A LIFE!\")\r\n hurt.play()\r\n player.score += -2\r\n player.strength_bar += -1\r\n\r\n '''check powerups'''\r\n bonus_hit_list = pygame.sprite.spritecollide(self, powerups, True, pygame.sprite.collide_mask)\r\n \r\n for hit in bonus_hit_list:\r\n print()\r\n print(\"Gained your lives\")\r\n hit.apply(self)\r\n\r\n \r\n '''check mobs'''\r\n mob_hit_list = pygame.sprite.spritecollide(self, mobs, False, pygame.sprite.collide_mask)\r\n\r\n \r\n for hit in mob_hit_list:\r\n player.score += -3\r\n self.kill()\r\n stage = LOST\r\n\r\n \r\nclass Laser(pygame.sprite.Sprite):\r\n def __init__(self, image):\r\n super().__init__()\r\n\r\n self.image = image\r\n self.mask = pygame.mask.from_surface(self.image)\r\n self.rect = image.get_rect()\r\n self.speed = 5\r\n\r\n\r\n\r\n def update(self):\r\n self.rect.y -= self.speed\r\n\r\n if self.rect.bottom < 0:\r\n self.kill()\r\n\r\n \r\nclass Bomb(pygame.sprite.Sprite):\r\n def __init__(self, image):\r\n super().__init__()\r\n\r\n self.image = image\r\n self.mask = pygame.mask.from_surface(self.image)\r\n self.rect = image.get_rect()\r\n self.speed = 3\r\n \r\n\r\n\r\n def update(self):\r\n self.rect.y += self.speed\r\n hit_list = pygame.sprite.spritecollide(self, lasers, True, pygame.sprite.collide_mask)\r\n \r\n if self.rect.bottom < 0:\r\n self.kill()\r\n\r\n if len(hit_list) > 0:\r\n self.kill()\r\n print()\r\n print(\"You got a bomb!\")\r\n\r\n \r\nclass Mob(pygame.sprite.Sprite):\r\n def __init__(self, x, y, image):\r\n super().__init__()\r\n\r\n self.image = image\r\n self.mask = pygame.mask.from_surface(self.image)\r\n self.rect = image.get_rect()\r\n self.rect.x = x\r\n self.rect.y = y\r\n \r\n def drop_bomb(self):\r\n print()\r\n print(\"Bwwamp!\")\r\n\r\n bomb = Bomb(bomb_img)\r\n bomb.rect.centerx = self.rect.centerx\r\n bomb.rect.centery = self.rect.top\r\n bombs.add(bomb)\r\n\r\n def update(self):\r\n hit_list = pygame.sprite.spritecollide(self, lasers, True, pygame.sprite.collide_mask)\r\n if len(hit_list) > 0:\r\n player.score += 2\r\n self.kill()\r\n print()\r\n print(\"BOOM!\")\r\n \r\n \r\nclass Fleet():\r\n def __init__(self, mobs):\r\n self.mobs = mobs\r\n self.speed = 5\r\n self.drop = 30\r\n self.moving_right = True\r\n self.drop_speed = 20\r\n self.bomb_rate = 60 # lower the number = faster the bomb\r\n self.cleared = False\r\n self.defeated = False\r\n\r\n \r\n def move(self):\r\n hits_edge = False\r\n \r\n for m in mobs:\r\n if self.moving_right:\r\n m.rect.x += self.speed\r\n\r\n if m.rect.right >= WIDTH:\r\n hits_edge = True\r\n else:\r\n m.rect.x -= self.speed\r\n\r\n if m.rect.left <= 0:\r\n hits_edge = True\r\n \r\n if hits_edge:\r\n self.reverse()\r\n self.move_down()\r\n \r\n def reverse(self):\r\n self.moving_right = not self.moving_right\r\n \r\n def move_down(self):\r\n for m in mobs:\r\n m.rect.y += self.drop\r\n \r\n def choose_bomber(self):\r\n rand = random.randrange(self.bomb_rate)\r\n mob_list = mobs.sprites()\r\n\r\n if len(mob_list) > 0 and rand == 0:\r\n bomber = random.choice(mob_list)\r\n bomber.drop_bomb()\r\n\r\n def change_speed(self):\r\n if len(mobs) == 12:\r\n for m in mobs:\r\n self.speed = 10\r\n \r\n if len(mobs) == 6:\r\n for m in mobs:\r\n self.speed = 17\r\n \r\n if len(mobs) == 1:\r\n for m in mobs:\r\n self.speed = 35\r\n for l in lasers:\r\n self.speed = 10\r\n \r\n def update(self):\r\n self.move()\r\n self.choose_bomber()\r\n self.change_speed()\r\n\r\nclass HealthPowerUp(pygame.sprite.Sprite):\r\n def __init__(self, x, y, image):\r\n super().__init__()\r\n\r\n self.image = image\r\n self.mask = pygame.mask.from_surface(self.image)\r\n self.rect = image.get_rect()\r\n self.rect.x = x\r\n self.rect.y = y\r\n self.speed = 7\r\n\r\n def apply(self, wonder_woman):\r\n player.strength_bar = 3\r\n\r\n def update(self):\r\n self.rect.y += self.speed\r\n\r\n if self.rect.top > HEIGHT:\r\n self.kill()\r\n \r\n \r\n# Game helper functions\r\ndef show_title_screen():\r\n '''text'''\r\n space_txt = comic_font.render(\"Press Space\", True, WHITE)\r\n\r\n\r\n '''blit images/text'''\r\n screen.blit(whole_background_img, [0,0])\r\n screen.blit(space_txt, [WIDTH/2 - space_txt.get_width()/2 , HEIGHT/2 - space_txt.get_height()/2])\r\n\r\n\r\ndef show_won_screen():\r\n '''text'''\r\n won_end_txt = comic_font.render(\"YOU WON!\", True, WHITE)\r\n score_txt = battle_font.render(\"SCORE = \" + str(player.score), 1, WHITE)\r\n end_time_txt = battle_font.render(\"TIME = \" + str(ticks//refresh_rate), 1, WHITE)\r\n high_score_txt1 = jedi_font .render(\"Your perfect score will be recorded. Enter your name.\", True, WHITE)\r\n\r\n '''screen blit'''\r\n screen.blit(win_background_img, [0,0])\r\n screen.blit(won_end_txt, [WIDTH/2 - won_end_txt.get_width()/2 , HEIGHT/6 - won_end_txt.get_height()/6])\r\n screen.blit(score_txt, [WIDTH/2 - score_txt.get_width()/2 , 660])\r\n screen.blit(end_time_txt, [WIDTH/2 - end_time_txt.get_width()/2 , 760])\r\n show_high_score()\r\n stage = END\r\n\r\ndef show_lost_screen():\r\n '''text'''\r\n lost_end_txt = comic2_font.render(\"THE ALIENS HAVE WON!\", True, WHITE)\r\n score_txt = battle_font.render(\"SCORE = \" + str(player.score), 1, WHITE)\r\n end_time_txt = battle_font.render(\"TIME = \" + str(ticks//refresh_rate), 1, WHITE)\r\n high_score_txt2 = jedi_font .render(\"Your score will not be recorded.\", True, WHITE)\r\n\r\n '''screen blit'''\r\n screen.blit(lost_background_img, [0,0])\r\n screen.blit(lost_end_txt, [WIDTH/2 - lost_end_txt.get_width()/2 , HEIGHT/6 - lost_end_txt.get_height()/6])\r\n screen.blit(score_txt, [WIDTH/2 - score_txt.get_width()/2 , HEIGHT/3 - score_txt.get_height()/3])\r\n screen.blit(end_time_txt, [WIDTH/2 - end_time_txt.get_width()/2 , HEIGHT/2 - end_time_txt.get_height()/2])\r\n screen.blit(high_score_txt2, [WIDTH/2 - high_score_txt2.get_width()/2 , 870])\r\n stage = END\r\n \r\ndef show_stats():\r\n timer = ticks//refresh_rate \r\n \r\n '''text'''\r\n timer_txt = battle_font.render(str(timer), 1, BLUE)\r\n score_txt = battle_font.render(str(player.score), 1, RED)\r\n \r\n '''blit text'''\r\n screen.blit(timer_txt, [1500, 20])\r\n screen.blit(score_txt, [10, 20])\r\n \r\n '''Changing elements'''\r\n if player.strength_bar == 3:\r\n screen.blit(heart_img, [100,10])\r\n screen.blit(heart_img, [200, 10])\r\n screen.blit(heart_img, [300, 10])\r\n\r\n elif player.strength_bar == 2:\r\n screen.blit(heart_img, [100,10])\r\n screen.blit(heart_img, [200, 10])\r\n\r\n elif player.strength_bar == 1:\r\n screen.blit(heart_img, [100,10])\r\n \r\ndef record_high_score():\r\n if player.score == 38:\r\n input_file = open(\"high_score.txt\",\"a\")\r\n name = input(\"enter your name: \")\r\n print(\"Your Highscore has been recorded\")\r\n print(name,file=input_file)\r\n input_file.close()\r\n file = open('high_score.txt', 'r') \r\n names = file.readlines()\r\n file.close()\r\n\r\ndef show_high_score():\r\n file = open('high_score.txt', 'r') \r\n names = file.read().splitlines()\r\n perfect_players_txt = jedi_font.render(\"Last five perfect scorers: \" + str(names[-5:]), True, WHITE)\r\n screen.blit(perfect_players_txt, [WIDTH/3 - perfect_players_txt.get_width()/3 , 890])\r\n file.close()\r\n\r\ndef set_music(track):\r\n if pygame.mixer.music.get_busy():\r\n pygame.mixer.music.stop()\r\n\r\n if track != None: \r\n pygame.mixer.music.load(track)\r\n pygame.mixer.music.play(-1)\r\n\r\ndef setup():\r\n global stage, done, ticks\r\n global player, wonder_woman, lasers, mobs, fleet, bombs, powerups\r\n \r\n ''' Make game objects '''\r\n wonder_woman = Wonder_woman(wonder_woman_img)\r\n wonder_woman.rect.centerx = WIDTH/2\r\n wonder_woman.rect.bottom = HEIGHT\r\n \r\n ''' Make sprite groups '''\r\n player = pygame.sprite.GroupSingle()\r\n player.add(wonder_woman)\r\n\r\n lasers = pygame.sprite.Group()\r\n bombs = pygame.sprite.Group()\r\n \r\n mob1 = Mob(0, 400, enemy_img)\r\n mob2 = Mob(200, 400, enemy_img)\r\n mob3 = Mob(400, 400, enemy_img)\r\n mob4 = Mob(600, 400, enemy_img)\r\n mob5 = Mob(800, 400, enemy_img)\r\n mob6 = Mob(1000, 400, enemy_img)\r\n mob7 = Mob(1200, 400, enemy_img)\r\n '''enemy 2'''\r\n mob8 = Mob(100, 200, enemy2_img)\r\n mob9 = Mob(300, 200, enemy2_img)\r\n mob10 = Mob(500, 200, enemy2_img)\r\n mob11 = Mob(700, 200, enemy2_img)\r\n mob12 = Mob(900, 200, enemy2_img)\r\n mob13 = Mob(1100, 200, enemy2_img)\r\n '''enemy 3'''\r\n mob14 = Mob(200, 0, enemy3_img)\r\n mob15 = Mob(400, 0, enemy3_img)\r\n mob16 = Mob(600, 0, enemy3_img)\r\n mob17 = Mob(800, 0, enemy3_img)\r\n mob18 = Mob(1000, 0, enemy3_img)\r\n\r\n '''final enemy'''\r\n mega_mob = Mob(800, -400, mega_mob_img)\r\n \r\n mobs = pygame.sprite.Group()\r\n \r\n mobs.add(mob1,mob2,mob3,mob4,mob5,mob6,mob7)\r\n mobs.add(mob8,mob9,mob10,mob11,mob12,mob13)\r\n mobs.add(mob14,mob15,mob16,mob17,mob18, mega_mob)\r\n\r\n fleet = Fleet(mobs)\r\n\r\n '''bonus'''\r\n \r\n powerup1 = HealthPowerUp(800, -2000, bonus_img)\r\n powerups = pygame.sprite.Group()\r\n powerups.add(powerup1)\r\n \r\n '''stats'''\r\n ticks = 0\r\n player.score = 0\r\n player.strength_bar = 3\r\n \r\n ''' set stage '''\r\n stage = START\r\n done = False\r\n '''music'''\r\n \r\n set_music(starting_music)\r\n \r\n\r\n# Game loop\r\nsetup()\r\n\r\nwhile not done:\r\n # Input handling (React to key presses, mouse clicks, etc.)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n done = True\r\n elif event.type == pygame.KEYDOWN:\r\n if stage == START:\r\n if event.key == pygame.K_SPACE:\r\n set_music(playing_music)\r\n stage = PLAYING\r\n elif stage == PLAYING:\r\n if event.key == pygame.K_SPACE:\r\n wonder_woman.shoot()\r\n pew.play()\r\n elif stage == LOST:\r\n if event.key == pygame.K_SPACE:\r\n stage = END\r\n setup()\r\n elif stage == WON:\r\n if event.key == pygame.K_SPACE:\r\n stage = END\r\n setup()\r\n elif stage == END():\r\n if event.key == pygame.K_SPACE:\r\n setup()\r\n\r\n \r\n pressed = pygame.key.get_pressed()\r\n \r\n \r\n # Game logic (Check for collisions, update points, etc.)\r\n if stage == PLAYING:\r\n \r\n if pressed[pygame.K_LEFT]:\r\n wonder_woman.move_left()\r\n elif pressed[pygame.K_RIGHT]:\r\n wonder_woman.move_right()\r\n\r\n player.update()\r\n lasers.update()\r\n fleet.update()\r\n mobs.update()\r\n bombs.update()\r\n ticks += 1\r\n powerups.update()\r\n \r\n\r\n \r\n if len(mobs) == 0:\r\n set_music(winning_music)\r\n record_high_score()\r\n stage = WON\r\n \r\n if player.strength_bar <= 0:\r\n set_music(lost_music)\r\n stage = LOST\r\n\r\n\r\n \r\n # Drawing code (Describe the picture. It isn't actually drawn yet.)\r\n screen.blit(background, [0,0])\r\n lasers.draw(screen)\r\n bombs.draw(screen)\r\n player.draw(screen)\r\n mobs.draw(screen)\r\n powerups.draw(screen)\r\n \r\n show_stats()\r\n \r\n \r\n if stage == START:\r\n show_title_screen()\r\n elif stage == LOST:\r\n show_lost_screen()\r\n elif stage == WON:\r\n show_won_screen()\r\n\r\n \r\n \r\n # Update screen (Actually draw the picture in the window.)\r\n pygame.display.flip()\r\n\r\n\r\n # Limit refresh rate of game loop \r\n clock.tick(refresh_rate)\r\n\r\n\r\n# Close window and quit\r\npygame.quit()\r\n","repo_name":"colorfulthunder57/wonder-woman-battle","sub_path":"wonder_woman_battle.py","file_name":"wonder_woman_battle.py","file_ext":"py","file_size_in_byte":16437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"15908932336","text":"import numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.preprocessing import MinMaxScaler\n\nfrom common_functions import create_model\nfrom data_preprocessing import create_final_ds, create_tf_dataset\n\n\"\"\"\nThis was used to generate some first models and get a better feel for the data\n\"\"\"\n\n# uses Early Stopping\nEPOCHS = 70\n# MODEL CONFIG\nnodes_lstm = 20\ndropout = 0.1\nlearning_rate = 0.001\nmetric = \"r_square\"\nbatch_size = 32\nseq_length = 1\n\nmodel_filename = f\"rmse_{nodes_lstm}_001_{seq_length}_{batch_size}_01.h5\"\n\n\ntrain_ds, val_ds, test_ds, train_df, test_df, val_df = create_final_ds(\n \"SHA\", [\"SHA\", \"WSH\"], \"SHA_nit\", batch_size=batch_size, seq_length=seq_length, interval=\"24h\")\n\nmodel, early_stopping = create_model(nodes_lstm, None, dropout, metric, learning_rate)\n\n\ndef train_model():\n history = model.fit(train_ds, epochs=EPOCHS,\n validation_data=val_ds)\n\n model.save(model_filename)\n\n # list all data in history\n print(history.history.keys())\n # visualize history for accuracy\n plt.plot(history.history['r_square'])\n plt.plot(history.history['val_r_square'])\n plt.title('model MSE')\n plt.ylabel('MSE')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()\n\n # visualize history for loss\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()\n\n\ndef visualize_model():\n model.evaluate(train_ds.take(1))\n model.load_weights(model_filename)\n print(model.evaluate(test_ds))\n print(model.evaluate(train_ds))\n print(model.evaluate(val_ds))\n # visualize specific length of data\n \"\"\"predictions = []\n labels = []\n for batch in test_ds.take(2000):\n prediction = model.predict(batch)\n\n prediction = list(prediction)\n for ind, p in enumerate(prediction):\n predictions.append(np.max(p))\n\n y = [arr.numpy() for arr in batch][1]\n labels.append(y[0])\n\n x = range(0, len(predictions))\n print(predictions)\n print(labels)\"\"\"\n\n for ds, df in ((train_ds, train_df), (val_ds, val_df), (test_ds, test_df)):\n # visualize\n predictions = model.predict(ds).flatten()\n labels = np.array(df[\"SHA_nit\"])\n\n x = range(0, len(predictions))\n x1 = range(0, len(labels))\n\n print(predictions)\n print(labels)\n\n sns.set()\n fig, ax1 = plt.subplots()\n\n ax1.plot(x1, labels, color=\"blue\", label=\"actual\", linewidth=1)\n ax1.plot(x, predictions, color=\"orange\", label=\"Predictions\", linewidth=1.5)\n plt.title(\"Train Data\")\n plt.legend()\n\n plt.show()\n\n\ndef visualize_whole_dataset_model():\n model.evaluate(train_ds.take(1))\n model.load_weights(model_filename)\n\n full_ds = train_ds.concatenate(val_ds)\n full_ds = full_ds.concatenate(test_ds)\n full_df = train_df.append(val_df)\n full_df = full_df.append(test_df)\n # visualize\n predictions = model.predict(full_ds).flatten()\n labels = np.array(full_df[\"SHA_nit\"])\n\n x = range(0, len(predictions))\n x1 = range(0, len(labels))\n\n print(predictions)\n print(labels)\n\n sns.set()\n fig, ax1 = plt.subplots()\n\n ax1.plot(x1, labels, color=\"blue\", label=\"actual\", linewidth=1)\n ax1.plot(x, predictions, color=\"orange\", label=\"Predictions\", linewidth=1.5)\n plt.title(\"Train data | val data | test data\")\n plt.legend()\n\n for i in range(3):\n plt.axvline(len(train_df) + i, color=\"r\")\n for i in range(3):\n plt.axvline(len(train_df) + len(val_df) + i, color=\"r\")\n\n plt.show()\n\n\ndef extract_feature_importance():\n model.evaluate(test_ds.take(1))\n model.load_weights(\"RMSE_interval=24h.h5\")\n\n feature_df = pd.DataFrame(columns=[\"Feature removed\", \"loss\", \"RMSE\"])\n\n loss, metric = model.evaluate(train_ds)\n feature_df.loc[0] = [\"Normal\"] + [loss] + [metric]\n\n for ind, feature in enumerate(train_df.columns[1:]):\n # shuffle the feature\n n_df = train_df.copy()\n np.random.shuffle(n_df[feature])\n\n feature_train = n_df.drop([\"nit\"], axis=1)\n\n feature_scaler = MinMaxScaler(feature_range=(0, 1))\n feature_scaler.fit(feature_train.to_numpy())\n feature_train_scaled = feature_scaler.transform(feature_train)\n target_train = np.array(n_df[\"nit\"], ndmin=2).T\n\n feature_dataset = create_tf_dataset(feature_train_scaled, target_train, batch_size=32, seq_length=8)\n\n loss, metric = model.evaluate(feature_dataset)\n feature_df.loc[ind + 1] = [feature] + [loss] + [metric]\n\n feature_df.to_pickle(\"features.pkl\")\n print(feature_df)\n\n\ndef calculate_important_features():\n feature_df = pd.read_pickle(\"features.pkl\")\n\n norm = feature_df[\"RMSE\"][0]\n differences = [norm]\n divided = [norm]\n\n for rmse in feature_df[\"RMSE\"][1:]:\n differences.append(rmse - norm)\n divided.append(rmse / norm)\n\n feature_df[\"Difference\"] = differences\n feature_df[\"Divided\"] = divided\n feature_df.sort_values(\"Difference\", inplace=True, ignore_index=True, ascending=False)\n feature_df.to_pickle(\"feature_importance.pkl\")\n\n\ndef show_important_features():\n feature_df = pd.read_pickle(\"feature_importance.pkl\")\n print(feature_df)\n\n\ntrain_model()\nextract_feature_importance()\ncalculate_important_features()\nshow_important_features()","repo_name":"henri-climber/LSTM-time-series-prediction-Kenya","sub_path":"first_models.py","file_name":"first_models.py","file_ext":"py","file_size_in_byte":5509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"2935484666","text":"import pandas as pd\nfrom ibapi.client import EClient\nfrom ibapi.wrapper import EWrapper\nfrom ibapi.contract import Contract\nfrom ibapi.order import Order\nimport threading\nimport time\nimport datetime\n\n\nclass TradingApp(EWrapper, EClient):\n def __init__(self):\n EClient.__init__(self, self)\n self.bars = None\n self.nextValidOrderId = None\n self.event_connect = threading.Event()\n self.event_datadone = threading.Event()\n self.event_position_change = threading.Event()\n\n def nextValidId(self, orderId: int):\n super().nextValidId(orderId)\n self.nextValidOrderId = orderId\n self.event_connect.set()\n\n def nextOrderId(self):\n oid = self.nextValidOrderId\n self.nextValidOrderId += 1\n return oid\n\n def error(self, reqId, errorCode, errorString):\n print(\"Error {} {} {}\".format(reqId, errorCode, errorString))\n\n def contractDetails(self, reqId, contractDetails):\n print(\"redID: {}, contract:{}\".format(reqId, contractDetails))\n\n def historicalData(self, reqId, bar):\n self.bars.append((bar.date, bar.open, bar.high, bar.low, bar.close,\n float(bar.volume), float(bar.wap)))\n\n def historicalDataEnd(self, reqId: int, start: str, end: str):\n # df = pd.DataFrame(self.bars, columns=\"date open high low close volume wap\".split()).set_index('date')\n self.event_datadone.set()\n\n def get_barsDF(self, duration: str) -> pd.DataFrame:\n self.event_datadone.clear()\n self.bars = []\n contract = Contract()\n contract.symbol = \"SPY\"\n contract.secType = \"STK\"\n contract.currency = \"USD\"\n contract.exchange = \"SMART\"\n self.reqHistoricalData(reqId=1,\n contract=contract,\n endDateTime=\"\",\n durationStr=duration,\n barSizeSetting='1 min',\n whatToShow='TRADES',\n useRTH=1,\n formatDate=1,\n keepUpToDate=False,\n chartOptions=[])\n self.event_datadone.wait()\n df = pd.DataFrame(self.bars, columns=\"date open high low close volume wap\".split())\n return df\n\n def placeBUYOrder(self, quantity=1) -> int:\n contract = Contract()\n contract.symbol = \"SPY\"\n contract.secType = \"STK\"\n contract.currency = \"USD\"\n contract.exchange = \"SMART\"\n order = Order()\n order.action = \"BUY\"\n order.orderType = \"MKT\"\n order.totalQuantity = 1\n oid = self.nextOrderId()\n self.placeOrder(oid, contract, order)\n return oid\n\n def placeSELLOrder(self, quantity=1) -> int:\n contract = Contract()\n contract.symbol = \"SPY\"\n contract.secType = \"STK\"\n contract.currency = \"USD\"\n contract.exchange = \"SMART\"\n order = Order()\n order.action = \"SELL\"\n order.orderType = \"MKT\"\n order.totalQuantity = quantity\n oid = self.nextOrderId()\n self.placeOrder(oid, contract, order)\n return oid\n\n def position(self, account: str, contract: Contract, position: float, avgCost: float):\n super().position(account, contract, position, avgCost)\n print(\"Position.\", \"Account:\", account, \"Symbol:\", contract.symbol, \"SecType:\",\n contract.secType, \"Currency:\", contract.currency,\n \"Position:\", position, \"Avg cost:\", avgCost)\n\n def positionEnd(self):\n super().positionEnd()\n print(\"PositionEnd\")\n self.event_position_change.set()\n\n\n\nif __name__ == '__main__':\n app = TradingApp()\n\n df = app.get_bars('2700 S')\n print(df)\n\n\n\n time.sleep(5)\n app.disconnect()\n\n\n\n print('DONE DONE DONE')","repo_name":"webclinic017/trade-9","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"6466682727","text":"def leader(arr,n):\n max_element=arr[n-1]\n leader_list=[arr[n-1]]\n j=n-1\n while j>=0:\n if arr[j]>max_element:\n leader_list.insert(0,arr[j])\n max_element=arr[j]\n \n j=j-1\n print(*leader_list)\n\nt=int(input())\nwhile t>0:\n n=int(input())\n arr=list(map(int,input().split()))\n leader(arr,n)\n t=t-1\n","repo_name":"bikashg3/GeeksforGeeks","sub_path":"Arrays/7.Leaders_in_array/leader.py","file_name":"leader.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"20570514032","text":"import time\n\nfrom selenium import webdriver\n\ndef testmetricks(link):\n driver = webdriver.Chrome(\"chromedriver.exe\")\n driver.get(link)\n time.sleep(3)\n driver.execute_cdp_cmd('Performance.enable', {})\n metrics = driver.execute_cdp_cmd('Performance.getMetrics', {})\n driver.quit()\n t ={}\n for m in metrics[\"metrics\"]:\n if m[\"name\"] in (\n 'ScriptDuration', 'TaskDuration', 'TaskOtherDuration', 'ThreadTime', 'ProcessTime', 'JSHeapUsedSize',\n 'JSHeapTotalSize', 'FirstMeaningfulPaint', 'DomContentLoaded', 'NavigationStart'):\n t[m[\"name\"]] = m[\"value\"]\n return t\n","repo_name":"houmsss/DjangoDiploma","sub_path":"NoCodePlatform/NoCode/static/NoCode/PyScripts/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"29059433117","text":"import decimal\n\nfrom billing.dcsaap.backend.core.utils.check import format_material_thresholds\n\n\nclass TestMaterialThreshold:\n @staticmethod\n def cleanup_threshold(threshold):\n result = {}\n for key, value in threshold.items():\n if value is not None:\n result[key] = value\n return result\n\n def format_thresholds(self, material_threshold):\n thresholds = format_material_thresholds(material_threshold)\n return list(map(self.cleanup_threshold, thresholds))\n\n def test_invalid_threshold(self):\n # invalid expression\n material_threshold = '2a+b'\n thresholds = self.format_thresholds(material_threshold)\n assert len(thresholds) == 1\n assert thresholds[0]['error'] == 'invalid expression: unexpected variable, expression: 2a+b'\n\n # invalid variables count\n material_threshold = '2+2'\n thresholds = self.format_thresholds(material_threshold)\n assert len(thresholds) == 1\n assert thresholds[0]['error'] == 'expression has invalid count of variables: 0'\n\n material_threshold = '2*a+b'\n thresholds = self.format_thresholds(material_threshold)\n assert len(thresholds) == 1\n assert thresholds[0]['error'] == 'expression has invalid count of variables: 2'\n\n # no threshold specified\n material_threshold = 'INvalid' # avoiding parsing `in` as token\n thresholds = self.format_thresholds(material_threshold)\n assert len(thresholds) == 1\n assert thresholds[0]['error'] == 'no threshold specified'\n\n # invalid min threshold\n material_threshold = 'a:100f,1000f'\n thresholds = self.format_thresholds(material_threshold)\n assert len(thresholds) == 1\n assert thresholds[0]['error'] == 'invalid min threshold'\n\n # invalid max threshold\n material_threshold = 'a:1000f'\n thresholds = self.format_thresholds(material_threshold)\n assert len(thresholds) == 1\n assert thresholds[0]['error'] == 'invalid max threshold'\n\n def test_multiple(self):\n material_threshold = 'a:1 b:2 c:3'\n thresholds = self.format_thresholds(material_threshold)\n assert len(thresholds) == 3\n\n expected = [\n {'name': 'a', 'expression': 'a', 'threshold_max': decimal.Decimal(1)},\n {'name': 'b', 'expression': 'b', 'threshold_max': decimal.Decimal(2)},\n {'name': 'c', 'expression': 'c', 'threshold_max': decimal.Decimal(3)},\n ]\n assert thresholds == expected\n\n def test_min_max(self):\n material_threshold = 'amount:100,1000'\n thresholds = self.format_thresholds(material_threshold)\n assert len(thresholds) == 1\n\n expected = [\n {\n 'name': 'amount',\n 'expression': 'amount',\n 'threshold_min': decimal.Decimal(100),\n 'threshold_max': decimal.Decimal(1000),\n }\n ]\n assert thresholds == expected\n\n def test_expression(self):\n material_threshold = '(amount*30)/10000:100000'\n thresholds = self.format_thresholds(material_threshold)\n assert len(thresholds) == 1\n\n expected = [\n {\n 'name': 'amount',\n 'expression': '(amount*30)/10000',\n 'threshold_max': decimal.Decimal(100000),\n }\n ]\n assert thresholds == expected\n\n def test_unit(self):\n material_threshold = 'amount:100000:\"английские фунты\"'\n thresholds = self.format_thresholds(material_threshold)\n assert len(thresholds) == 1\n\n expected = [\n {\n 'name': 'amount',\n 'expression': 'amount',\n 'threshold_max': decimal.Decimal(100000),\n 'units': 'английские фунты',\n }\n ]\n assert thresholds == expected\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"billing/tests/small/core/utils/test_check.py","file_name":"test_check.py","file_ext":"py","file_size_in_byte":3938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"73841682962","text":"from tkinter import filedialog\nimport pathlib\nimport logging\nimport os\n\nclass CapBase(object):\n\n def __init__(self, base_location=None):\n logging.basicConfig(level=logging.INFO)\n self.logger = logging.getLogger(__name__)\n #self.logger.setLevel(logging.INFO)\n self.logger.setLevel(logging.DEBUG)\n #self.logger.setLevel(logging.WARNING)\n\n self.packetBase = []\n self.base_loc = str(base_location).strip()\n\n #self.logger.debug(\"Testing logger debug message\")\n #print(\"Logger Level: %s\", str(self.logger.getEffectiveLevel()))\n\n if base_location is None:\n #Check for config file\n p = pathlib.Path('base_loc_config.conf')\n try:\n if os.stat(str(p)).st_size == 0:\n self.logger.warning(\"base_loc_config file is empty\")\n self.base_loc == filedialog.askdirectory(initialdir='', title='Select Base Location home-dir')\n with p.open('a+') as f:\n f.write(self.base_loc)\n f.close()\n else:\n with p.open('r') as rf:\n for line in rf:\n self.base_loc = line.strip()\n if self.base_loc == '':\n self.base_loc == filedialog.askdirectory(initialdir='', title='Select Base Location home-dir')\n else:\n self.logger.debug(\"Loaded CapBase path: %s\", self.base_loc)\n # self.logger.info(\"test info\")\n # self.logger.warning(\"test warning\")\n #print('test')\n except:\n self.logger.warning(\"base_loc_config does not exist. Create base_loc_config file\")\n #If config file doesn't exist create it\n self.base_loc = filedialog.askdirectory(initialdir='')\n with p.open('a+') as f:\n f.write(self.base_loc)\n f.close()\n\n def add_lib_to_base(self, newMetaCapLib):\n self.packetBase.append(newMetaCapLib)\n\n def set_base_location(self, base_location):\n self.base_loc = str(base_location).strip()\n return\n\n def get_base_loc(self):\n return self.base_loc\n","repo_name":"irvinhomem/TunnelFeatureExtractor","sub_path":"CapBase.py","file_name":"CapBase.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"3"}
+{"seq_id":"8358223476","text":"import ssl\nimport subprocess\nimport sys\nimport json\nfrom cryptography.fernet import Fernet\n\nglobal schedulerapi, variables\n\ndef install(package):\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", package])\n\n\n# Import required Python libraries according to Python version\ntry:\n from urllib.request import Request, urlopen # Python 3\nexcept ImportError:\n from urllib2 import Request, urlopen # Python 2\n\ntry:\n import cryptography\nexcept ImportError:\n install('cryptography')\n import cryptography\n\n# Get user credentials and convert to json\nschedulerapi.connect()\nsessionId = str(schedulerapi.getSession())\nconnectionInfo = schedulerapi.getConnectionInfo()\nciLogin = str(connectionInfo.getLogin())\nciPasswd = str(connectionInfo.getPassword())\nciUrl = str(connectionInfo.getUrl())\nuser_credentials = {\n 'sessionId': sessionId,\n 'ciLogin': ciLogin,\n 'ciPasswd': ciPasswd,\n 'ciUrl': ciUrl\n}\nuser_credentials_json = json.dumps(user_credentials)\n\n# Encrypt user data into a binary file\nkey = Fernet.generate_key()\nf = Fernet(key)\nmessage = user_credentials_json.encode()\nencrypted = f.encrypt(message)\nuser_data_file = 'user_data.enc'\nwith open(user_data_file, 'wb') as f:\n f.write(encrypted)\nvariables.put(\"USER_KEY\", key.decode())\nvariables.put(\"USER_DATA_FILE\", user_data_file)\n\n# Get workflows variables\nPA_CATALOG_REST_URL = variables.get(\"PA_CATALOG_REST_URL\")\nPYTHON_ENTRYPOINT = variables.get(\"PYTHON_ENTRYPOINT\")\nYAML_FILE = variables.get(\"YAML_FILE\")\n\nPA_MAAS_RESOURCES_URL = \"/buckets/ai-model-as-a-service/resources/\"\npython_file_url = PA_CATALOG_REST_URL + PA_MAAS_RESOURCES_URL + PYTHON_ENTRYPOINT + \"/raw\"\nyaml_file_url = PA_CATALOG_REST_URL + PA_MAAS_RESOURCES_URL + YAML_FILE + \"/raw\"\nprint(\"python_file_url: \", python_file_url)\nprint(\"yaml_file_url: \", yaml_file_url)\n\n# Download the two configuration file \"ml_service\" for the service definition\nreq_py = Request(python_file_url)\nreq_py.add_header('sessionid', sessionId)\nif python_file_url.startswith('https'):\n context = ssl._create_unverified_context()\n python_file = urlopen(req_py, context=context).read()\nelse:\n python_file = urlopen(req_py).read()\npython_content = python_file.decode('utf-8')\npython_file_name = PYTHON_ENTRYPOINT + \".py\"\nwith open(python_file_name, 'w') as f:\n f.write(python_content)\n\n# Download the configuration file \"ml_service-api\" for the swagger specification\nreq_yaml = Request(yaml_file_url)\nreq_yaml.add_header('sessionid', sessionId)\nif yaml_file_url.startswith('https'):\n context = ssl._create_unverified_context()\n yaml_file = urlopen(req_yaml, context=context).read()\nelse:\n yaml_file = urlopen(req_yaml).read()\nyaml_file_content = yaml_file.decode('utf-8')\nyaml_file_name = YAML_FILE + \".yaml\"\nwith open(yaml_file_name, 'w') as f:\n f.write(yaml_file_content)","repo_name":"ow2-proactive/proactive-examples","sub_path":"ModelAsService/resources/catalog/MaaS_Pre_Script.py","file_name":"MaaS_Pre_Script.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"}
+{"seq_id":"33899461344","text":"import mod_keys as k\nfrom sys import stdin\n\nMACHINE_SIZE = 300\nMAX_LEGAL_VAL = 299\n\n\ndef read_next_line():\n line = \"\"\n while True:\n line = input()\n if line:\n return line.split()\n\n\ndef increment_cur(cur, incr, list):\n cur += incr\n next_line = None\n if cur > (len(list) - 1):\n next_line = read_next_line()\n list += next_line\n return cur, list\n\n\ndef linker_first_pass():\n\n user_input = []\n\n first_line = read_next_line()\n user_input += first_line\n\n cur = 0\n mod_count = int(user_input[cur])\n cur, user_input = increment_cur(cur, 1, user_input)\n\n sym_table = {}\n base_accum = 0\n\n mods = {k.MOD_COUNT: int(mod_count), k.MODS: []}\n for mod_index in range(mod_count):\n mod = {k.DEF: {}, k.USE: {}, k.INSTRUCTIONS: {}}\n\n def_list, user_input, cur = parse_def(\n user_input, cur, sym_table, base_accum, mod_index)\n mod[k.DEF] = def_list\n\n use_list, user_input, cur = parse_use(user_input, cur)\n mod[k.USE] = use_list\n\n instruction_list, user_input, cur, base_accum = parse_instructions(\n user_input, cur, base_accum, mod_index, mod_count)\n mod[k.INSTRUCTIONS] = instruction_list\n mods[k.MODS].append(mod)\n\n return mods, sym_table\n\n\ndef parse_def(user_input, cur, sym_table, base_accum, mod_index):\n SYM_MULT_DEF_ERR = \"Error: This variable is multiply defined; last value used.\"\n def_count = int(user_input[cur])\n cur, user_input = increment_cur(cur, 1, user_input)\n def_list = {k.DEF_COUNT: def_count, k.DEF_LIST: {}}\n for _ in range(def_count):\n sym = user_input[cur]\n cur, user_input = increment_cur(cur, 1, user_input)\n sym_val = user_input[cur]\n cur, user_input = increment_cur(cur, 1, user_input)\n def_list[k.DEF_LIST][sym] = int(sym_val)\n\n if sym in sym_table:\n sym_table[sym][k.SYM_ERR] = SYM_MULT_DEF_ERR\n else:\n sym_table[sym] = {k.SYM_VAL: None,\n k.SYM_DEF_MOD: None, k.SYM_ERR: None}\n\n sym_table[sym][k.SYM_VAL] = int(sym_val) + base_accum\n sym_table[sym][k.SYM_DEF_MOD] = mod_index\n return def_list, user_input, cur\n\n\ndef parse_use(user_input, cur):\n use_count = int(user_input[cur])\n cur, user_input = increment_cur(cur, 1, user_input)\n use_list = {k.USE_COUNT: use_count, k.USE_LIST: {}}\n for _ in range(use_count):\n sym = user_input[cur]\n cur, user_input = increment_cur(cur, 1, user_input)\n sym_use_rel_addr = user_input[cur]\n cur, user_input = increment_cur(cur, 1, user_input)\n if sym_use_rel_addr in use_list[k.USE_LIST]:\n use_list[k.USE_LIST][sym_use_rel_addr][k.SYM_KEY] = sym\n use_list[k.USE_LIST][sym_use_rel_addr][k.SYM_MULT_USE_FLAG] = True\n else:\n use_list[k.USE_LIST][sym_use_rel_addr] = {\n k.SYM_KEY: sym,\n k.SYM_MULT_USE_FLAG: False\n }\n return use_list, user_input, cur\n\n\ndef parse_instructions(user_input, cur, base_accum, mod_index, mod_count):\n instruction_count = int(user_input[cur])\n cur, user_input = increment_cur(cur, 1, user_input)\n instruction_list = {\n k.INSTRUCTION_COUNT: instruction_count,\n k.BASE: base_accum,\n k.INSTRUCTION_LIST: []\n }\n base_accum += instruction_count\n for inst_index in range(instruction_count):\n inst_type = user_input[cur]\n cur, user_input = increment_cur(cur, 1, user_input)\n inst_word = int(user_input[cur])\n if (mod_index == (mod_count - 1)) and (inst_index == (instruction_count - 1)):\n pass\n else:\n cur, user_input = increment_cur(cur, 1, user_input)\n instruction_list[k.INSTRUCTION_LIST].append({\n k.TYPE: inst_type,\n k.WORD: inst_word,\n k.PROG_SYM_USED_FLAG: False,\n k.PROG_ERR: \"\",\n })\n return instruction_list, user_input, cur, base_accum\n\n\ndef process_ext_addr(old_addr, new_addr):\n first_digit = int(str(old_addr)[0])\n return (first_digit * 1000 + new_addr)\n\n\ndef format_sym_table_out(syms):\n syms_out = \"Symbol Table\\n\"\n for sym, sym_info in syms.items():\n syms_out += \"{}={} {}\\n\".format(sym,\n sym_info[k.SYM_VAL], sym_info[k.SYM_ERR])\n return syms_out\n\n\ndef format_mmap_out(mmap, sym_use_stat, sym_table):\n mmap_str = \"Memory Map\\n\"\n for index, item in enumerate(mmap):\n mmap_str += \"{}:\\t{}\\n\".format(str(index), item)\n mmap_str += '\\n'\n for sym in sym_use_stat:\n if sym_use_stat[sym] == False:\n sym_def_loc = sym_table[sym][k.SYM_DEF_MOD]\n warning = \"Warning: {} was defined in {} but never used.\\n\".format(\n sym, sym_def_loc)\n mmap_str += warning\n return mmap_str\n\n\ndef is_symbol_defined(sym, sym_table):\n return True if sym in sym_table else False\n\n\ndef undefined_sym_err(sym):\n USED_NOT_DEFINED_ERR = 'Error: ' + sym + \\\n ' was used but not defined. It has been given the value 111.'\n return USED_NOT_DEFINED_ERR\n\n\ndef resolve_new_addr(is_sym_defined, sym, inst_pair, sym_table, sym_use_stat):\n new_addr = None\n old_addr = inst_pair[k.WORD]\n if is_sym_defined:\n new_addr = sym_table[sym][k.SYM_VAL]\n sym_use_stat[sym] = True\n else:\n inst_pair[k.PROG_ERR] = undefined_sym_err(sym)\n new_addr = 111\n inst_pair[k.WORD] = process_ext_addr(old_addr, new_addr)\n return inst_pair\n\n\ndef modify_word_last_three_digits(word, replacement):\n return int(str(word)[0]) * 1000 + replacement\n\n\ndef process_use_list(use_list, inst_list, sym_table, sym_use_stat):\n MULT_SYM_USAGE_ERR = 'Error: Multiple symbols used here; last one used'\n for addr, sym_info in use_list.items():\n addr = int(addr)\n sym = sym_info[k.SYM_KEY]\n\n is_sym_multibly_used = sym_info[k.SYM_MULT_USE_FLAG]\n if is_sym_multibly_used:\n inst_list[addr][k.PROG_ERR] = MULT_SYM_USAGE_ERR\n\n old_addr = inst_list[addr][k.WORD]\n addr_cur = str(old_addr)\n\n is_sym_defined = is_symbol_defined(sym, sym_table)\n inst_list[addr] = resolve_new_addr(\n is_sym_defined, sym, inst_list[addr], sym_table, sym_use_stat)\n\n new_addr = str(inst_list[addr][k.WORD])[-3:]\n\n while addr_cur[-3:] != '777':\n next_index = int(addr_cur[-3:])\n next_addr = str(inst_list[next_index][k.WORD])\n inst_list[next_index][k.WORD] = process_ext_addr(\n int(next_addr), int(new_addr))\n if is_sym_multibly_used:\n inst_list[addr][k.PROG_ERR] = MULT_SYM_USAGE_ERR\n if not is_sym_defined:\n inst_list[next_index][k.PROG_ERR] = undefined_sym_err(sym)\n addr_cur = next_addr\n\n\ndef process_instructions(inst_list, mmap, base):\n EXCEED_MOD_SIZE_ERR = 'Error: Type R address exceeds module size; 0 (relative) used'\n EXCEED_MACHINE_SIZE_ERR = 'Error: A type address exceeds machine size; max legal value used'\n for progpair in inst_list:\n if progpair[k.TYPE] == 'R':\n if int(str(progpair[k.WORD])[-3:]) >= len(inst_list):\n progpair[k.PROG_ERR] = EXCEED_MOD_SIZE_ERR\n progpair[k.WORD] = modify_word_last_three_digits(\n progpair[k.WORD], 0)\n progpair[k.WORD] += base\n elif progpair[k.TYPE] == 'A':\n if int(str(progpair[k.WORD])[-3:]) >= MACHINE_SIZE:\n progpair[k.PROG_ERR] = EXCEED_MACHINE_SIZE_ERR\n progpair[k.WORD] = modify_word_last_three_digits(\n progpair[k.WORD], MAX_LEGAL_VAL)\n\n mmap.append(str(progpair[k.WORD]) + ' ' + progpair[k.PROG_ERR])\n\n\ndef linker_second_pass(mods, sym_table):\n mmap = []\n sym_use_stat = {}\n for sym in sym_table:\n sym_use_stat[sym] = False\n for mod in mods[k.MODS]:\n use_list = mod[k.USE][k.USE_LIST]\n prog = mod[k.INSTRUCTIONS]\n inst_list = prog[k.INSTRUCTION_LIST]\n if use_list:\n process_use_list(use_list, inst_list, sym_table, sym_use_stat)\n process_instructions(inst_list, mmap, prog[k.BASE])\n mmap_out = format_mmap_out(mmap, sym_use_stat, sym_table)\n return mmap_out\n\n\ndef main():\n mods, sym_table = linker_first_pass()\n print('\\n' + format_sym_table_out(sym_table))\n mmap_out = linker_second_pass(mods, sym_table)\n print(mmap_out)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"vic-shihang-li/os-two-pass-linker","sub_path":"deliverables/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"26961795325","text":"\"\"\"\n279. Perfect Squares\nGiven a positive integer n, find the least number of perfect square numbers (for example, 1, 4, 9, 16, ...) which sum to n.\n\nFor example, given n = 12, return 3 because 12 = 4 + 4 + 4; given n = 13, return 2 because 13 = 4 + 9.\n\nCredits:\nSpecial thanks to @jianchao.li.fighter for adding this problem and creating all test cases.\n\nHide Company Tags Google\nShow Tags\nShow Similar Problems\n\n\"\"\"\nclass Solution(object):\n def numSquares(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n\n Solution: This is a bit tricky, you have get the min of all the perfect square combinations\n So be careful in returning the output (Came up).\n\n First time I did a mistake to return the first found combination of perfect squares\n \"\"\"\n dp = [0, 1]\n if n < 2:\n return dp[n]\n\n for i in range(2, n+1):\n temp = i\n for j in range(1, i):\n if i < j*j:\n break\n temp = min(temp, 1+dp[i-(j*j)])\n\n dp.append(temp)\n\n #print dp\n return dp[-1]","repo_name":"tejamupparaju/LeetCode_Python","sub_path":"leet_code279.py","file_name":"leet_code279.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"25156522877","text":"import os\nimport subprocess\ndir = '/net/birdstore/Active_Atlas_Data/data_root/pipeline_data/'\nanimals = os.listdir(dir)\nfor animali in animals:\n path = dir+animali\n folders = os.listdir(path)\n for folderi in folders:\n if folderi in ['histogram','www','neuroglancer_data']:\n os.chmod(os.path.join(path,folderi),0o775)\n # subprocess.Popen([\"sudo\", \"chmod\", \"0775\", os.path.join(path,folderi)], stdout=subprocess.PIPE, shell=True)\n else:\n os.chmod(os.path.join(path,folderi),0o770)\n # subprocess.Popen([\"sudo\", \"chmod\", \"0770\", os.path.join(path,folderi)], stdout=subprocess.PIPE, shell=True)","repo_name":"ActiveBrainAtlas2/preprocessing-pipeline","sub_path":"in_development/Will/showcase/bird_store_clean_up/change_permission.py","file_name":"change_permission.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"36111948687","text":"from novaclient import api_versions\nfrom novaclient.tests.unit import utils\nfrom novaclient.tests.unit.v2 import fakes\nfrom novaclient.v2 import flavor_access\n\n\nclass FlavorAccessTest(utils.TestCase):\n def setUp(self):\n super(FlavorAccessTest, self).setUp()\n self.cs = fakes.FakeClient(api_versions.APIVersion(\"2.0\"))\n\n def test_list_access_by_flavor_private(self):\n kwargs = {'flavor': self.cs.flavors.get(2)}\n r = self.cs.flavor_access.list(**kwargs)\n self.assert_request_id(r, fakes.FAKE_REQUEST_ID_LIST)\n self.cs.assert_called('GET', '/flavors/2/os-flavor-access')\n for a in r:\n self.assertIsInstance(a, flavor_access.FlavorAccess)\n\n def test_add_tenant_access(self):\n flavor = self.cs.flavors.get(2)\n tenant = 'proj2'\n r = self.cs.flavor_access.add_tenant_access(flavor, tenant)\n self.assert_request_id(r, fakes.FAKE_REQUEST_ID_LIST)\n\n body = {\n \"addTenantAccess\": {\n \"tenant\": \"proj2\"\n }\n }\n\n self.cs.assert_called('POST', '/flavors/2/action', body)\n for a in r:\n self.assertIsInstance(a, flavor_access.FlavorAccess)\n\n def test_remove_tenant_access(self):\n flavor = self.cs.flavors.get(2)\n tenant = 'proj2'\n r = self.cs.flavor_access.remove_tenant_access(flavor, tenant)\n self.assert_request_id(r, fakes.FAKE_REQUEST_ID_LIST)\n\n body = {\n \"removeTenantAccess\": {\n \"tenant\": \"proj2\"\n }\n }\n\n self.cs.assert_called('POST', '/flavors/2/action', body)\n for a in r:\n self.assertIsInstance(a, flavor_access.FlavorAccess)\n\n def test_repr_flavor_access(self):\n flavor = self.cs.flavors.get(2)\n tenant = 'proj3'\n r = self.cs.flavor_access.add_tenant_access(flavor, tenant)\n\n def get_expected(flavor_access):\n return (\"\" %\n (flavor_access.flavor_id, flavor_access.tenant_id))\n\n for a in r:\n self.assertEqual(get_expected(a), repr(a))\n","repo_name":"openstack/python-novaclient","sub_path":"novaclient/tests/unit/v2/test_flavor_access.py","file_name":"test_flavor_access.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","stars":381,"dataset":"github-code","pt":"3"}
+{"seq_id":"71881789840","text":"\n\n\nimport pygame,sys\n\nclass Player(pygame.sprite.Sprite):\n def __init__(self):\n super().__init__()\n self.image = pygame.Surface((30,40))\n self.image.fill(\"black\")\n self.rect = self.image.get_rect(center = (100,100))\n \n self.gravity = 0\n \n def apply_gravity(self):\n self.gravity += 0.5\n self.rect.y += self.gravity\n\n if self.rect.bottom >= 500:\n self.rect.bottom = 500\n \n def player_input(self):\n keys = pygame.key.get_pressed()\n if keys[pygame.K_UP] and self.rect.bottom == 500:\n self.gravity = -15\n if keys[pygame.K_LEFT]:\n self.rect.x -= 5\n if keys[pygame.K_RIGHT]:\n self.rect.x += 5\n \n \n def update(self):\n self.apply_gravity()\n self.player_input()\n \n\nclass Ground(pygame.sprite.Sprite):\n def __init__(self):\n super().__init__()\n self.image = pygame.Surface((600,100))\n self.image.fill(\"brown\")\n self.rect = self.image.get_rect(topleft = (0,500))\n\n# create ground\n# create 2d player that can jump - simulate gravity\n\npygame.init()\nclock = pygame.time.Clock()\n\nwidth, height = 600, 600\nscreen = pygame.display.set_mode((width,height))\npygame.display.set_caption(\"octorun\")\n\n\nplayer = Player()\nplayer_group = pygame.sprite.Group()\nplayer_group.add(player)\n\nground = Ground()\nground_group = pygame.sprite.Group()\nground_group.add(ground)\n\n\n\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n \n\n \n screen.fill(\"light blue\")\n\n player_group.draw(screen)\n player_group.update()\n\n ground_group.draw(screen)\n\n pygame.display.flip()\n\n clock.tick(60)\n","repo_name":"monkeMuk/Python","sub_path":"Pygame/Tutorials/Collisions/2D_gravity_basic.py","file_name":"2D_gravity_basic.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"4322690768","text":"\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\n\r\n\r\nprint('getting data')\r\nurl = 'https://www.webelements.com/'\r\n\r\n\r\nr = requests.get(url).content\r\n\r\nwith open('PThtml.txt','w') as pt: #saves pt html to file so i dont have to request\r\n pt.write(str(r))\r\n\r\n\r\nwith open( 'PThtml.txt','r')as PT:\r\n htm = PT.read()\r\n\r\nsoup = BeautifulSoup(htm, 'html.parser')\r\ntb = soup.table.tbody\r\n\r\nhrefs = []\r\n\r\nfor i in tb.findAll('a', href = True):\r\n hrefs.append(i['href'])\r\n#print(hrefs)\r\n\r\ntable = []\r\nfor i in hrefs:\r\n\r\n r = requests.get(url+i).content\r\n sp = BeautifulSoup(r, 'html.parser')\r\n\r\n for h in sp.findAll('ul', {'class':'ul_facts_table'}):\r\n with open('PTdata.csv','a+') as file:\r\n\r\n for g in h.findAll('li'):\r\n file.write(g.text+',')\r\n file.write('\\n')\r\n\r\n #print(g.text)\r\n\r\n\r\n\r\n#make PT class and element class\r\n#for each row of the pt pass element data to the element class\r\n#populate grid with Main Data\r\n#on click pull up popup\r\n#make argument of gui file chance to use it like api\r\n'''in other file'''\r\n","repo_name":"augustwindham/Periodic-Table-App","sub_path":"Pt_db.py","file_name":"Pt_db.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"39660626198","text":"import copy\nimport numpy as np\nuse_cuda = True\nif use_cuda:\n import cupy as cp\n to_cpu = cp.asnumpy\n cp.cuda.set_allocator(cp.cuda.MemoryPool().malloc)\nelse:\n cp = np\n to_cpu = lambda x: x\nimport open3d as o3\nfrom probreg import cpd\nfrom probreg import callbacks\nimport utils\nimport time\nfrom utils import estimate_normals\n\n# load source and target point cloud\nsource_mesh = o3.io.read_triangle_mesh('data/down/pcd_1.obj')\ntarget_mesh = o3.io.read_triangle_mesh('data/down/pcd_2.obj')\n# transform target point cloud\nth = np.deg2rad(30.0)\nsource = o3.geometry.PointCloud()\nsource.points = o3.utility.Vector3dVector(np.asarray(source_mesh.vertices, np.float32))\ntarget = o3.geometry.PointCloud()\ntarget.points = o3.utility.Vector3dVector(np.asarray(target_mesh.vertices, np.float32))\n# transform target point cloud\nth = np.deg2rad(30.0)\n\nsource_pt = cp.asarray(source.points, dtype=cp.float32)\ntarget_pt = cp.asarray(target.points, dtype=cp.float32)\n\nsource2 = source.voxel_down_sample(voxel_size=0.4)\ntarget2 = target.voxel_down_sample(voxel_size=0.4)\n\nsource_pt2 = cp.asarray(source2.points, dtype=cp.float32)\ntarget_pt2 = cp.asarray(target2.points, dtype=cp.float32)\nprint((np.asarray(source_mesh.vertices).shape), len(np.asarray(target_mesh.vertices)))\nprint(len(np.asarray(source.points)), len(np.asarray(target.points)))\nprint(len(np.asarray(source2.points)), len(np.asarray(target2.points)))\n\nprint(\"start reg\")\n\n# compute cpd registration\nacpd = cpd.AffineCPD(source_pt, use_cuda=use_cuda)\ntf_param, _, _ = acpd.registration(target_pt)\nresult = tf_param.transform(source_pt)\n\n# result = tf_param.transform(result)\nmesh = o3.geometry.TriangleMesh()\nnp_vertices = to_cpu(result)\nnp_triangles = np.array(source_mesh.triangles).astype(np.int32)\nmesh.vertices = o3.utility.Vector3dVector(np_vertices)\nmesh.triangles = o3.utility.Vector3iVector(np_triangles)\no3.io.write_triangle_mesh('data/down/pcd_1_rigid.obj', mesh)\n","repo_name":"Pangyk/point-cloud-registration","sub_path":"robot_curve/reg/cpd_rigid.py","file_name":"cpd_rigid.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"29179633000","text":"#! /usr/bin/python3\n# -*- coding: utf-8 -*-\n\n# Date: 2016.08.02\n# Filename: 13.py\n# Author: Timilong\n\n# 输入一个数\nnum = float(input(\"请输入一个数num: \"))\n\n# 判断这个数是奇数还是偶数\nif num %2 == 0:\n print(\"{num}是偶数\".format(num=num))\nelse:\n print(\"{num}是奇数\".format(num=num))\n\n\n# 打印\n","repo_name":"lxl0928/learning_python","sub_path":"code/python_lesson/runoob/13判断奇数偶数.py","file_name":"13判断奇数偶数.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"zh","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"}
+{"seq_id":"37793265932","text":"import os\nimport sys\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\nimport tensorflow as tf\nimport numpy as np\n\nsys.path.append('./tensorflow1/')\nsys.path.append('./tensorflow1/research')\nsys.path.append('./tensorflow1/research/slim')\n\nfrom object_detection.utils import label_map_util\n\nPATH_TO_FROZEN_GRAPH = './graphs/frozen_inference_graph.pb'\nPATH_TO_LABELS = './graphs/labelmap.pbtxt'\nNUM_CLASSES = 37\n\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\ncategory_index = label_map_util.create_category_index(categories)\n\ndef load_model():\n detection_graph = tf.Graph()\n with detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n with detection_graph.as_default():\n config = tf.ConfigProto()\n #config.gpu_options.allow_growth = True\n #config.gpu_options.per_process_gpu_memory_fraction = 0.4\n #config.log_device_placement = False\n config.intra_op_parallelism_threads = 0\n config.inter_op_parallelism_threads = 2\n config.allow_soft_placement=True\n sess = tf.Session(config=config, graph=detection_graph)\n return sess, detection_graph\n\ndef inference(sess, detection_graph, img_arr, average_distance_error=3):\n # print(img_arr)\n image_np_expanded = np.expand_dims(img_arr, axis=0)\n # Actual detection.\n image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n scores = detection_graph.get_tensor_by_name('detection_scores:0')\n classes = detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = detection_graph.get_tensor_by_name('num_detections:0')\n # Visualization of the results of a detection.\n (boxes, scores, classes, num_detections) = sess.run(\n [boxes, scores, classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n\n # Bellow we do filtering stuff\n captcha_array = []\n # loop our all detection boxes\n for i,b in enumerate(boxes[0]):\n for Symbol in range(NUM_CLASSES):\n if classes[0][i] == Symbol: # check if detected class equal to our symbols\n if scores[0][i] >= 0.50: # do something only if detected score more han 0.65\n # x-left # x-right\n mid_x = (boxes[0][i][1]+boxes[0][i][3])/2 # find x coordinates center of letter\n # to captcha_array array save detected Symbol, middle X coordinates and detection percentage\n captcha_array.append([category_index[Symbol].get('name'), mid_x, scores[0][i]])\n\n # rearange array acording to X coordinates datected\n for number in range(20):\n for captcha_number in range(len(captcha_array)-1):\n if captcha_array[captcha_number][1] > captcha_array[captcha_number+1][1]:\n temporary_captcha = captcha_array[captcha_number]\n captcha_array[captcha_number] = captcha_array[captcha_number+1]\n captcha_array[captcha_number+1] = temporary_captcha\n\n\n # Find average distance between detected symbols\n average = 0\n captcha_len = len(captcha_array)-1\n while captcha_len > 0:\n average += captcha_array[captcha_len][1]- captcha_array[captcha_len-1][1]\n captcha_len -= 1\n # Increase average distance error\n average = average/(len(captcha_array)+average_distance_error)\n\n \n captcha_array_filtered = list(captcha_array)\n captcha_len = len(captcha_array)-1\n while captcha_len > 0:\n # if average distance is larger than error distance\n if captcha_array[captcha_len][1]- captcha_array[captcha_len-1][1] < average:\n # check which symbol has higher detection percentage\n if captcha_array[captcha_len][2] > captcha_array[captcha_len-1][2]:\n del captcha_array_filtered[captcha_len-1]\n else:\n del captcha_array_filtered[captcha_len]\n captcha_len -= 1\n\n # Get final string from filtered CAPTCHA array\n captcha_string = \"\"\n for captcha_letter in range(len(captcha_array_filtered)):\n captcha_string += captcha_array_filtered[captcha_letter][0]\n return captcha_string\n","repo_name":"Apidwalin/Server-Client","sub_path":"captchaModel/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"70231025043","text":"import pytest\nfrom time import sleep\nfrom datetime import (\n\tdate,\n\ttimedelta\n)\nfrom utilities import XLUtility\nfrom pageObjects.common_functions.common_methods import CommonMethods\n\n\n# This test checks the functionality of creating a session\n@pytest.mark.usefixtures(\"one_time_setup\")\nclass Test_TC101_101_CreateSession():\n\n\t@pytest.fixture(autouse=True)\n\tdef classSetup(self, one_time_setup):\n\t\tself.logIn()\n\n\tdef test_create_session(self):\n\t\tcommon = CommonMethods(self.driver)\n\t\tself.log.info(\"starting test {}...\".format(__name__))\n\t\tself.driver.set_window_size(411, 823)\n\n\t\ttoday_date = date.today()\n\t\tcurrent_weekday = today_date.weekday()\n\n\t\t# delete any existing session\n\t\tcommon.mobile_delete_existing_session()\n\n\t\t# delete the sessions from client side\n\t\tclient_name = XLUtility.readData(self.path, 'session_mobile_data', 2, 3)\n\t\tsleep(1)\n\t\tself.login_page_obj.clk_navigation_btn()\n\t\tsleep(1)\n\t\tself.client_page_obj.clk_all_clients_mobile()\n\t\tsleep(1)\n\t\tself.client_page_obj.mobile_sel_client_name(client_name)\n\t\tsleep(1)\n\t\tself.client_page_obj.clk_view_client_mobile()\n\t\tsleep(1)\n\t\tself.notes_page_obj.clk_session_notes()\n\t\tsleep(1)\n\t\tcommon.delete_mobile_prior_session_note()\n\t\tsleep(2)\n\t\tself.login_page_obj.clk_navigation_btn()\n\t\t# Start creating session\n\t\tsleep(1)\n\t\tself.login_page_obj.clk_mobile_calendar()\n\t\tsleep(1)\n\t\tself.calendar_page_obj.click_add_session()\n\t\tsleep(1)\n\t\t# Complete the Session details form\n\n\t\t# Select Client\n\t\tsleep(1)\n\t\tself.calendar_page_obj.input_clientname(client_name)\n\t\tsleep(1)\n\n\t\t# Select Room\n\t\troom = XLUtility.readData(self.path, 'session_mobile_data', 2, 6)\n\t\tself.calendar_page_obj.sel_room(room)\n\n\t\t# Select Date and Time\n\t\tsleep(1)\n\n\t\tif current_weekday < 3:\n\t\t\tN = 3 - current_weekday\n\t\t\tmeeting_date = today_date + timedelta(days=N)\n\t\t\tself.date_time = str(meeting_date) + \" 9:00am\"\n\n\t\tif current_weekday >= 3:\n\t\t\tN = 10 - current_weekday\n\t\t\tmeeting_date = today_date + timedelta(days=N)\n\t\t\tself.date_time = str(meeting_date) + \" 9:00am\"\n\t\tself.calendar_page_obj.txt_date_time(self.date_time)\n\t\tsleep(1)\n\n\t\t# Select Service type (CBT, Counselling, etc.)\n\t\tservice = XLUtility.readData(self.path, 'session_mobile_data', 2, 5)\n\t\tself.calendar_page_obj.sel_service(service)\n\n\t\t# Click on Create Session\n\t\tsleep(1)\n\t\tself.calendar_page_obj.clk_create_session()\n\t\tsleep(1)\n\t\tself.calendar_page_obj.clk_btn_calendar_view()\n\t\tsleep(1)\n\t\tself.calendar_page_obj.clk_btn_calendar_week_view()\n\n\t\tif current_weekday < 3 or current_weekday == 6:\n\t\t\tself.calendar_page_obj.clk_mobile_session_info()\n\n\t\telse:\n\t\t\t# self.calendar_page_obj.clk_move_to_next_week()\n\t\t\tself.calendar_page_obj.clk_mobile_session_info()\n\t\tsleep(1)\n\t\tmobile_session_details = self.calendar_page_obj.mobile_session_details()\n\n\t\t# View the detail of the session created\n\t\tsleep(1)\n\t\tself.calendar_page_obj.clk_mobile_more_information()\n\t\tsleep(1)\n\t\tself.calendar_page_obj.clk_delete_session()\n\t\tsleep(1)\n\t\tself.calendar_page_obj.clk_delete_session_warn()\n\t\texp_date_time = \"Thu, \" + meeting_date.strftime(\"%b %-d\") + \" - 9:00am to 10:00am\"\n\n\t\tif exp_date_time in mobile_session_details:\n\t\t\t\t\tself.log.info(\"{} passed!\".format(__name__))\n\t\t\t\t\tassert True\n\t\telse:\n\t\t\t\t\tself.driver.save_screenshot(\n\t\t\t\t\t\tself.pathScreenShot + \"Test_TC101_101_CreateSession\" + self.dateFormat + \".png\"\n\t\t\t\t\t)\n\t\t\t\t\tself.log.info(\"{} failed!\".format(__name__))\n\t\t\t\t\tassert False\n","repo_name":"harry-100/qa-automation-framework","sub_path":"testCases/calendar/full_calendar/mobile/TC_101_101_mobile_create_session_test.py","file_name":"TC_101_101_mobile_create_session_test.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"22567970843","text":"import time\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom core.config import Config\n\ndef wait_for_engine(db_url: str, retries: int = 3) -> None:\n engine = None\n while retries > 0:\n try:\n engine = create_engine(db_url)\n engine.connect()\n break\n except Exception:\n retries -= 1\n time.sleep(5)\n if engine is None:\n raise Exception(\"Failed to connect to PostgreSQL\")\n\nengine_url = f\"postgresql://{Config.POSTGRES_USER}:{Config.POSTGRES_PASSWORD}@postgresql:5432/{Config.POSTGRES_DB}\"\nwait_for_engine(engine_url)\n\nengine = create_engine(engine_url)\nSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)\nBase = declarative_base()\n\n\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()","repo_name":"acekun141/fastapi-celery-rabbitmq-docker","sub_path":"api/db/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"44178777173","text":"from __future__ import annotations\nimport queue\nimport threading\nfrom typing import NoReturn, Optional, Callable, Any\n\n\nclass ThreadPool:\n def __init__(self, max_workers: Optional[int]):\n self.max_workers = max_workers\n self.tasks = queue.Queue()\n self.workers = []\n\n def submit(self, func: Optional[Callable], *args: Optional[Any],\n thread_name: Optional[str] = None, **kwargs: Optional[Any]) -> ThreadPool:\n self.tasks.put((func, args, kwargs, thread_name))\n return self\n\n def start(self) -> NoReturn:\n for i in range(self.max_workers):\n worker = threading.Thread(target=self._worker)\n worker.daemon = True\n worker.start()\n self.workers.append(worker)\n\n def _worker(self) -> NoReturn:\n while True:\n func, args, kwargs, thread_name = self.tasks.get()\n try:\n if thread_name:\n threading.current_thread().name = thread_name\n func(*args, **kwargs)\n except Exception as e:\n print(e)\n finally:\n self.tasks.task_done()\n\n def wait_completion(self) -> NoReturn:\n self.tasks.join()\n","repo_name":"StrawberryCake-Fish/ClockIn-Py","sub_path":"src/utils/pool.py","file_name":"pool.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"12617547793","text":"\"\"\"\nThe main function for the XSS linter.\n\"\"\"\n\n\nimport argparse\nimport importlib\nimport os\nimport sys\nfrom functools import reduce\n\nfrom xsslint.reporting import SummaryResults\nfrom xsslint.rules import RuleSet\nfrom xsslint.utils import is_skip_dir\n\n\ndef _load_config_module(module_path):\n cwd = os.getcwd()\n if cwd not in sys.path:\n # Enable config module to be imported relative to wherever the script was run from.\n sys.path.append(cwd)\n return importlib.import_module(module_path)\n\n\ndef _build_ruleset(template_linters):\n \"\"\"\n Combines the RuleSets from the provided template_linters into a single, aggregate RuleSet.\n\n Arguments:\n template_linters: A list of linting objects.\n\n Returns:\n The combined RuleSet.\n \"\"\"\n return reduce(\n lambda combined, current: combined + current.ruleset,\n template_linters,\n RuleSet()\n )\n\n\ndef _process_file(full_path, template_linters, options, summary_results, out):\n \"\"\"\n For each linter, lints the provided file. This means finding and printing\n violations.\n\n Arguments:\n full_path: The full path of the file to lint.\n template_linters: A list of linting objects.\n options: A list of the options.\n summary_results: A SummaryResults with a summary of the violations.\n out: output file\n\n \"\"\"\n num_violations = 0\n directory = os.path.dirname(full_path)\n file_name = os.path.basename(full_path)\n try:\n for template_linter in template_linters:\n results = template_linter.process_file(directory, file_name)\n results.print_results(options, summary_results, out)\n except BaseException as e:\n raise Exception(f\"Failed to process path: {full_path}\") from e\n\n\ndef _process_os_dir(directory, files, template_linters, options, summary_results, out):\n \"\"\"\n Calls out to lint each file in the passed list of files.\n\n Arguments:\n directory: Directory being linted.\n files: All files in the directory to be linted.\n template_linters: A list of linting objects.\n options: A list of the options.\n summary_results: A SummaryResults with a summary of the violations.\n out: output file\n\n \"\"\"\n for current_file in sorted(files, key=lambda s: s.lower()):\n full_path = os.path.join(directory, current_file)\n _process_file(full_path, template_linters, options, summary_results, out)\n\n\ndef _process_os_dirs(starting_dir, template_linters, options, summary_results, out):\n \"\"\"\n For each linter, lints all the directories in the starting directory.\n\n Arguments:\n starting_dir: The initial directory to begin the walk.\n template_linters: A list of linting objects.\n options: A list of the options.\n summary_results: A SummaryResults with a summary of the violations.\n out: output file\n\n \"\"\"\n skip_dirs = options.get('skip_dirs', ())\n for root, dirs, files in os.walk(starting_dir):\n if is_skip_dir(skip_dirs, root):\n del dirs\n continue\n dirs.sort(key=lambda s: s.lower())\n _process_os_dir(root, files, template_linters, options, summary_results, out)\n\n\ndef _lint(file_or_dir, template_linters, options, summary_results, out):\n \"\"\"\n For each linter, lints the provided file or directory.\n\n Arguments:\n file_or_dir: The file or initial directory to lint.\n template_linters: A list of linting objects.\n options: A list of the options.\n summary_results: A SummaryResults with a summary of the violations.\n out: output file\n\n \"\"\"\n\n if file_or_dir is not None and os.path.isfile(file_or_dir):\n _process_file(file_or_dir, template_linters, options, summary_results, out)\n else:\n directory = \".\"\n if file_or_dir is not None:\n if os.path.exists(file_or_dir):\n directory = file_or_dir\n else:\n raise ValueError(f\"Path [{file_or_dir}] is not a valid file or directory.\")\n _process_os_dirs(directory, template_linters, options, summary_results, out)\n\n summary_results.print_results(options, out)\n\n\ndef main():\n \"\"\"\n Used to execute the linter. Use --help option for help.\n\n Prints all violations.\n \"\"\"\n epilog = \"For more help using the xss linter, including details on how to\\n\"\n epilog += \"understand and fix any violations, read the docs here:\\n\"\n epilog += \"\\n\"\n # pylint: disable=line-too-long\n epilog += \" https://edx.readthedocs.org/projects/edx-developer-guide/en/latest/conventions/preventing_xss.html#xss-linter\\n\"\n\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description='Checks that templates are safe.',\n epilog=epilog,\n )\n parser.add_argument(\n '--list-files', dest='list_files', action='store_true',\n help='Only display the filenames that contain violations.'\n )\n parser.add_argument(\n '--rule-totals', dest='rule_totals', action='store_true',\n help='Display the totals for each rule.'\n )\n parser.add_argument(\n '--summary-format', dest='summary_format',\n choices=['eslint', 'json'], default='eslint',\n help='Choose the display format for the summary.'\n )\n parser.add_argument(\n '--verbose', dest='verbose', action='store_true',\n help='Print multiple lines where possible for additional context of violations.'\n )\n parser.add_argument(\n '--config', dest='config', action='store', default='xsslint.default_config',\n help='Specifies the config module to use. The config module should be in Python package syntax.'\n )\n parser.add_argument('path', nargs=\"?\", default=None, help='A file to lint or directory to recursively lint.')\n\n args = parser.parse_args()\n config = _load_config_module(args.config)\n options = {\n 'list_files': args.list_files,\n 'rule_totals': args.rule_totals,\n 'summary_format': args.summary_format,\n 'verbose': args.verbose,\n 'skip_dirs': getattr(config, 'SKIP_DIRS', ())\n }\n template_linters = getattr(config, 'LINTERS', ())\n if not template_linters:\n raise ValueError(f\"LINTERS is empty or undefined in the config module ({args.config}).\")\n\n ruleset = _build_ruleset(template_linters)\n summary_results = SummaryResults(ruleset)\n _lint(args.path, template_linters, options, summary_results, out=sys.stdout)\n","repo_name":"openedx/edx-platform","sub_path":"scripts/xsslint/xsslint/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6503,"program_lang":"python","lang":"en","doc_type":"code","stars":6774,"dataset":"github-code","pt":"3"}
+{"seq_id":"35023496761","text":"import numpy as np\nimport random\nfrom copy import deepcopy\nfrom settings import _AI_LEVEL, _O_COLOR\nfrom Piece.Bishop import Bishop\nfrom Piece.King import King\nfrom Piece.Queen import Queen\nfrom Piece.Knight import Knight\nfrom Piece.Rook import Rook\nfrom Piece.Pawn import Pawn\n\nclass Board:\n\n def __init__(self, pieceslist):\n self.checkState = False\n self.piecesOnBoard = pieceslist\n self.board = np.array([\"**\" for i in range(64)]).reshape((8, 8))\n self.pieceslist = pieceslist\n\n self.blackscore = 0\n self.whitescore = 0\n\n for all in self.pieceslist:\n self.board[all.position[0], all.position[1]] = all.__repr__()\n if all.color == 'w':\n self.whitescore += all.points\n\n elif all.color == 'b':\n self.blackscore += all.points\n\n def refresh_to(self, pieceslist):\n self.piecesOnBoard = pieceslist\n self.board = np.array([\"**\" for i in range(64)]).reshape((8, 8))\n self.pieceslist = pieceslist\n\n self.blackscore = 0\n self.whitescore = 0\n\n for all in self.pieceslist:\n self.board[all.position[0], all.position[1]] = all.__repr__()\n if all.color == 'w':\n self.whitescore += all.points\n\n elif all.color == 'b':\n self.blackscore += all.points\n\n def update_board(self):\n\n self.board = np.array([\"**\" for i in range(64)]).reshape((8, 8))\n\n for all in self.pieceslist:\n self.board[all.position[0], all.position[1]] = all.__repr__()\n\n def KickfromList_Position(self, pos):\n for pees in self.pieceslist:\n if pees.position == pos:\n\n if pees.color == 'w':\n self.whitescore -= pees.points\n elif pees.color == 'b':\n self.blackscore -= pees.points\n\n self.pieceslist.remove(pees)\n\n return str(pees)\n\n def KickfromList(self, pees):\n\n if pees.color == 'w':\n self.whitescore -= pees.points\n elif pees.color == 'b':\n self.blackscore -= pees.points\n\n self.pieceslist.remove(pees)\n\n return str(pees)\n\n def Add_to_List(self, Piece1):\n self.pieceslist.append(Piece1)\n if Piece1.color == 'w':\n self.whitescore += Piece1.points\n elif Piece1.color == 'b':\n self.blackscore += Piece1.points\n\n '''\n all moves are passed including hit and mmove arrays not advanced mvoes\n return array for both arrays in after check\n '''\n\n def Board_isCheck(self, King1, moveslist, hitslist, ListPieces):\n pass\n\n moves, hits = [], []\n nogo = []\n\n pieces = [\n pieces for pieces in self.pieceslist if pieces.color != King1.color]\n\n pm = []\n for p in pieces:\n parr = p.moves(ListPieces)\n pm.extend(parr[0])\n pm.extend(parr[1])\n\n for m in moveslist:\n if m not in pm:\n moves.append(m)\n else:\n nogo.append(m)\n\n for m in hitslist:\n if m not in pm:\n hits.append(m)\n else:\n nogo.append(m)\n\n return moves, hits, nogo\n\n def Board_isCheck_Non_King_Move(self, piece, newPos, oldPos, hitMove=False):\n\n myKing = None\n for aPiece in self.pieceslist:\n if aPiece.color == piece.color and aPiece.name == \"K\":\n myKing = aPiece\n break\n\n if (myKing == None):\n return True\n\n # # IF KING IS ALREADY BEING ATTACKED WITHOUT MOVING TO NEW POSITION\n enemyPieces = []\n for aPiece in self.pieceslist:\n if aPiece.color != piece.color:\n enemyPieces.append(aPiece)\n\n # for aPiece in enemyPieces:\n # enemyHitMoves = aPiece.moves(self.pieceslist)[1]\n # if myKing.position in enemyHitMoves:\n # return True\n\n # NOW CHECK AFTER MOVING TO NEW POSITION\n piece.position = newPos\n\n canFilter = False\n numHits = 0\n\n for aPiece in enemyPieces:\n enemyHitMoves = aPiece.moves(self.pieceslist)[1]\n if myKing.position in enemyHitMoves:\n\n # CHECKING SCENARIO FOR HIT MOVE\n if hitMove == True:\n canFilter = True\n numHits += 1\n\n else:\n piece.position = oldPos\n return False\n\n piece.position = oldPos\n\n if canFilter == True and numHits == 1:\n return True\n elif canFilter == True and numHits > 1:\n return False\n elif canFilter == False:\n return True\n\n def get_board(self):\n return self.board\n\n def mark_moves(self, listmoves):\n for i in listmoves:\n if i != None:\n self.board[i[0], i[1]] = \"$$\"\n\n def get_moves(self, color):\n\n moves = []\n for pieces in self.piecesOnBoard:\n if pieces != None:\n if pieces.color == color:\n moves.append(pieces.moves(self.piecesOnBoard))\n\n return moves\n\n def Get_Heuristic_difference(self, color):\n if color == 'w':\n return self.whitescore - self.blackscore\n\n if color == 'b':\n return self.blackscore - self.whitescore\n\n def createBoardCopy(self, listofpieces, MyBoard):\n\n newList = []\n for one in listofpieces:\n if one.name == \"B\":\n newList.append(Bishop(one.color, one.position))\n elif one.name == \"K\":\n PP = King(one.color, one.position)\n PP.hasMoved = one.hasMoved\n newList.append(PP)\n elif one.name == \"k\":\n newList.append(Knight(one.color, one.position))\n elif one.name == \"P\":\n PP = Pawn(one.color, one.position)\n PP.FirstTurn = one.FirstTurn\n newList.append(PP)\n elif one.name == \"Q\":\n newList.append(Queen(one.color, one.position))\n elif one.name == \"R\":\n PP = Rook(one.color, one.position)\n PP.hasMoved = one.hasMoved\n newList.append(PP)\n\n B = Board(newList)\n\n if B.board.dtype != None:\n B.board = np.array([\"**\" for i in range(64)]).reshape((8, 8))\n B.checkState = MyBoard.checkState\n\n return B\n\n # parrallelizeable\n def generate_all_move_pees(self, pees, board):\n c = pees.color\n\n listofpieces = [all for all in board.pieceslist]\n\n listofboards = []\n Nmoves = pees.moves(listofpieces)\n\n for m in Nmoves[0]:\n B = self.createBoardCopy(listofpieces, board)\n\n for n in B.pieceslist:\n\n if n.position == pees.position:\n n.Move_To(m)\n # B.update_board()\n self.isAIKingDying(B)\n if not B.checkState:\n listofboards.append(B)\n break\n\n for m in Nmoves[1]:\n\n B = self.createBoardCopy(listofpieces, board)\n\n for n in B.pieceslist:\n if n.position == pees.position:\n B.KickfromList_Position(m)\n n.Move_To(m)\n # B.update_board()\n self.isAIKingDying(B)\n if not B.checkState:\n listofboards.append(B)\n break\n\n return listofboards\n\n def isAIKingDying(self, board):\n MYPieces = [all for all in board.pieceslist if all.color == 'b']\n \n for piece in board.pieceslist:\n if piece.color == 'w' and piece.name == 'K':\n enemyKing = piece\n break\n\n for piece in MYPieces:\n allMoves = piece.moves(board.pieceslist)\n\n if allMoves != None:\n if allMoves[1] != None and allMoves[1] != []:\n for mov in allMoves[1]:\n if mov == enemyKing.position:\n board.checkState = True\n return\n\n board.checkState = False\n\n def generate_all_possible_moves(self, color, board):\n\n listofboards = []\n colorpieces = [all for all in board.pieceslist if all.color == color]\n # print(colorpieces)\n\n for p in colorpieces:\n # print(\"yeet\")\n listofboards.extend(board.generate_all_move_pees(p, board))\n\n return listofboards\n # return sorted(listofboards,key = lambda x : x.Get_Heuristic_difference(color))\n\n def Random_AI_Move(self, color, board):\n B = board.generate_all_possible_moves(color, board)\n b = random.choice(B)\n\n return b\n\n def Intelligent_AI_Move(self, color=_O_COLOR, depth=_AI_LEVEL['MOVES_DEPTH']):\n\n newBoard = self.createBoardCopy(self.pieceslist, self)\n return MIN_MAX(depth, self)\n\n def Game_End(self):\n if self.whitescore < 1000 or self.blackscore < 1000:\n return True\n return False\n\n def Winner(self):\n if (self.blackscore > self.whitescore) and _O_COLOR == 'b':\n print(\"YOU LOSE\")\n\n if (self.blackscore < self.whitescore) and _O_COLOR == 'b':\n print(\"YOU WIN!!!!\")\n\n if (self.blackscore < self.whitescore) and _O_COLOR == 'w':\n print(\"YOU LOSE\")\n\n if (self.blackscore > self.whitescore) and _O_COLOR == 'w':\n print(\"YOU WIN!!!!\")\n\n\ndef MIN_MAX(depth, board, MoveChoice=True, alpha=-np.inf, beta=np.inf):\n if depth == 0 or board.Game_End():\n return None, board.Get_Heuristic_difference(_O_COLOR)\n\n moves = board.generate_all_possible_moves(_O_COLOR, board)\n random.shuffle(moves)\n try:\n BestGeneratedMoves = random.choice(moves)\n except:\n print(\"YOU WON, AI LOST\")\n exit(0)\n\n if MoveChoice: # true for max\n val_max = -np.inf\n\n for move in moves:\n active_value = MIN_MAX(depth-1, move, False, alpha, beta)[1]\n\n if active_value > val_max:\n val_max = active_value\n BestGeneratedMoves = move\n alpha = max(alpha, active_value)\n if beta <= alpha:\n break\n return BestGeneratedMoves, val_max\n else:\n val_min = np.inf\n for move in moves:\n active_value = MIN_MAX(depth-1, move, True, alpha, beta)[1]\n\n if active_value < val_min:\n val_min = active_value\n BestGeneratedMoves = moves\n beta = min(beta, active_value)\n if beta <= alpha:\n break\n return BestGeneratedMoves, val_min\n","repo_name":"aliimran2000/Chess-Engine-AI","sub_path":"chess-ai/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":10809,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"3"}
+{"seq_id":"39250702645","text":"class Student():\n\n @property\n def first_name(self):\n try:\n return self.__first_name\n except AttributeError:\n return 0\n\n @first_name.setter\n def first_name(self, first):\n if type(first) is str:\n self.__first_name = first\n else:\n raise TypeError('please enter a string value for first_name')\n \n @property\n def last_name(self):\n try:\n return self.__last_name\n except AttributeError:\n return 0\n\n @last_name.setter\n def last_name(self, last):\n if type(last) is str:\n self.__last_name = last\n else:\n raise TypeError('please enter a string value for first_name')\n\n @property\n def age(self):\n try:\n return self.__age\n except AttributeError:\n return 0\n\n @age.setter\n def age(self, age):\n if type(age) is int:\n self.__age = age\n else:\n raise TypeError('please enter age as integer')\n\n @property\n def cohort(self):\n try:\n return self.cohort\n except:\n return 0\n \n @cohort.setter\n def cohort(self, cohort_number):\n if type(cohort_number) is int:\n self.__cohort = cohort_number\n else:\n raise TypeError('cohort must be an integer')\n \n @property\n def full_name(self):\n try:\n return f\"{self.first_name} {self.last_name}\"\n except AttributeError:\n return 0\n \n def __str__(self):\n return f\"{self.full_name} is {self.age} years old and is in Cohort {self.cohort}\"\n \n\nmike = Student()\nmike.first_name = \"Mike\"\nmike.last_name = \"Ellis\"\nmike.age = 35\nmike.cohort_number = 39\n\nprint(mike)\n\n \n","repo_name":"mister-michael/classProperties","sub_path":"solidStudent/student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"36437037965","text":"def droppedRequests(requestTime):\n # Write your code here\n fails = 0\n maps = {}\n for i in range(len(requestTime)):\n\n k = requestTime[i]\n\n #3 a second\n if k not in maps:\n maps[k] = 1\n else:\n maps[k] += 1\n if maps[k] >= 4:\n fails += 1\n\n print(maps[k])\n\n #20 in 10 seconds\n if i >= 20:\n sub = requestTime[(i-20):i+1]\n print(i)\n print(sub)\n if (sub[-1] - sub[0]) < 10:\n fails += 1\n\n #60 in a minute\n if i >= 60:\n sub = requestTime[(i-60):i+1]\n if (sub[-1] - sub[0]) < 60:\n fails += 1\n return fails\n","repo_name":"sinoyuco/leetcode_solutions","sub_path":"array/dropped_requests.py","file_name":"dropped_requests.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"73527114321","text":"\"\"\"Выведите на экран все числа от 5 до 25 след обр\nчисла кратные 3 и 6 - пропустить\nкратно 5 напечатать строку Н кратно 5, где Н текущее число\nвсе остальные напечатать как есть\"\"\"\nfor i in range(5, 26):\n if i % 3 == 0 and i % 6 == 0:\n continue\n if i % 5 == 0:\n print(i, \"кратно 5\")\n else:\n print(i)","repo_name":"Zyoger/My-First-Repository","sub_path":"Python/UDEMI lesson/Exercise29.py","file_name":"Exercise29.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"44018636179","text":"# -*- coding: utf-8 -*-\nimport json\nimport sys\n\nkey = sys.argv[1]\nwith open(key, 'r') as f:\n data = f.read()\n\nbb = json.loads(data)\ncc = bb['records']\nfor dd in cc:\n type = dd['type']\n name = dd['name'] + '.' + key\n value = dd['value']\n status = dd['status']\n line = dd['line']\n\n msg = \"%s,%s,%s,%s,%s\\n\" % (name, type, value, status, line)\n out = key + '.log'\n with open(out, 'a+') as f:\n f.write(msg)\n","repo_name":"sydt2014/bigdata-deploy","sub_path":"modules/ansible/scripts/switch_dict.py","file_name":"switch_dict.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"24444375054","text":"from turtle import Turtle, Screen\r\nimport random\r\n\r\ncolors = ['red', 'green', 'blue', 'orange', 'yellow', 'pink']\r\nscreen = Screen()\r\nuser_bet = screen.textinput(title='Make your bet!', prompt=\"Which colour of turtle will win? : ('red',\"\r\n \" 'green', 'blue', 'orange', 'yellow', 'pink')\")\r\nscreen.setup(800, 800)\r\nclo = -1\r\nposition = -50\r\nturtles = []\r\n\r\nfor _ in range(0, 6):\r\n clo += 1\r\n position += 50\r\n tur = Turtle(shape='turtle')\r\n tur.color(colors[clo])\r\n tur.penup()\r\n tur.goto(x=-350, y=-125 + position)\r\n turtles.append(tur)\r\n\r\nis_on = True\r\nwhile is_on:\r\n for tur in turtles:\r\n distance = random.randint(0, 10)\r\n tur.forward(distance)\r\n if tur.xcor() > 350:\r\n winner = tur.pencolor()\r\n is_on = False\r\n if user_bet != winner:\r\n Turtle().write(f\"The winner is {winner} turtle.\\nYour choice is {user_bet} turtle.You lose!\",\r\n align=\"center\", font=(\"Arial\", 20, \"normal\"))\r\n else:\r\n Turtle().write(f\"The winner is {winner} turtle.\\nYou win!\",\r\n align=\"center\", font=(\"Arial\", 20, \"normal\"))\r\nTurtle().hideturtle()\r\n\r\nscreen.exitonclick()\r\n","repo_name":"robenleaos30/Turtle-Running-Game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"7976364362","text":"import os\nimport random\n\n\ndef nvu_split(im_dir:str, save_path:str,train_per=0.9):\n f = open(os.path.join(save_path, 'split.csv'), mode='w')\n im_list = os.listdir(im_dir)\n im_list_len = len(im_list)\n random.shuffle(im_list)\n train_num = int(im_list_len * train_per)\n for i in range(train_num):\n s = \"%s,%d\\n\"%(im_list[i], 0)\n f.write(s)\n for i in range(train_num, im_list_len):\n s = \"%s,%d\\n\" % (im_list[i], 1)\n f.write(s)\n f.close()\n\nif __name__ == '__main__':\n nvu_split(\"K:/Dehaze/nyu_depth_v2/rgb\", \"K:\\Dehaze/nyu_depth_v2\")","repo_name":"mikuzip01/DOC-Net","sub_path":"utils/NVUV2_train_test_split.py","file_name":"NVUV2_train_test_split.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"40770800036","text":"\"\"\"empty message\n\nRevision ID: e6999daf4674\nRevises: a07ccb02144f\nCreate Date: 2021-11-17 13:09:08.372978\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'e6999daf4674'\ndown_revision = 'a07ccb02144f'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('items', sa.Column('discount', sa.Integer(), nullable=False))\n op.add_column('items', sa.Column('condition', sa.String(length=15), nullable=False))\n op.add_column('items', sa.Column('count', sa.Integer(), nullable=False))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('items', 'count')\n op.drop_column('items', 'condition')\n op.drop_column('items', 'discount')\n # ### end Alembic commands ###\n","repo_name":"benthere914/Acquire-Market-Place","sub_path":"migrations/versions/20211117_130908_.py","file_name":"20211117_130908_.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"11931923442","text":"#!/usr/bin/env python3\nimport click\nimport os\n\ndef is_next_to_open_parenth(line, idx):\n for i in range(idx, len(line)):\n if line[i] == ' ':\n continue\n if line[i] == '(':\n return True\n else:\n return False\n\ndef correct_print(fpath):\n fpath_out = fpath + '_out'\n with open(fpath, 'r') as fi:\n with open(fpath_out, 'w') as fo:\n for line in fi:\n idx = line.find('print')\n if idx==-1:\n fo.write(line)\n continue\n # deal with 'print'\n idx += len('print')\n if is_next_to_open_parenth(line, idx):\n fo.write(line)\n continue\n # need to insert parenthese\n line_out = line[:idx] + '(' + line[idx:-1] + ')' + line[-1]\n fo.write(line_out)\n\n@click.command()\n@click.argument('filepath', type=click.STRING)\ndef main(filepath):\n if not os.path.exists(filepath):\n click.echo(f'invalid file path: {filepath}')\n return\n correct_print(filepath)\n\nif __name__ == '__main__':\n main()","repo_name":"seanwu-ec/misc_utilities","sub_path":"print_to_py3.py","file_name":"print_to_py3.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"21971683928","text":"# implement a function called main that prompts the user for input, calls convert on that input, and prints the result. You’re welcome, but not required, to prompt the user explicitly, as by passing a str of your own as an argument to input. Be sure to call main at the bottom of your file.\n\n# from lib2to3.pytree import convert\n\n# main()\ndef main():\n str = input()\n str = convert(str)\n print(str)\n\n# fuction to convert all :) ans :( to smile or sad emoji\ndef convert(str):\n # replace all occurance of emoticons\n while \":)\" in str or \":(\" in str :\n if (\":)\" in str):\n str = str.replace(\":)\", \"🙂\")\n if (\":(\" in str):\n str = str.replace(\":(\", \"🙁\")\n\n return str\n\n# calling main()\nmain()","repo_name":"Ank1taS/CS50-python","sub_path":"pset0/makingFace.py","file_name":"makingFace.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"29221118179","text":"def fib(n):\r\n \"\"\"Calculates the nth Fibonacci number.\"\"\"\r\n if n < 0:\r\n raise Exception(f\"Input {n} must be non-negative\")\r\n elif n == 0 or n == 1:\r\n return n\r\n else:\r\n return fib(n - 1) + fib(n - 2)\r\n\r\n\r\ndef fib_step_count(n):\r\n \"\"\"Calculates the number of steps required to calculate the nth Fibonacci number using the recursive approach.\"\"\"\r\n if n < 0:\r\n raise Exception(f\"Input {n} must be non-negative\")\r\n elif n == 0 or n == 1:\r\n return 1\r\n else:\r\n return fib_step_count(n - 1) + fib_step_count(n - 2) + 1\r\n\r\n\r\n# Example usage:\r\nif __name__ == \"__main__\":\r\n num = int(input(\"Enter Number : \"))\r\n print(f\"Fibonacci({num}) = {fib(num)}\")\r\n print(f\"Total step count: {fib_step_count(num)}\")\r\n\r\n\r\n'''\r\nEnter Number : 6\r\nFibonacci(6) = 8\r\nTotal step count: 25\r\n'''\r\n \r\n\r\n\r\n","repo_name":"Chinmayk2002/DAABTML","sub_path":"ASSIG1_PART1.py","file_name":"ASSIG1_PART1.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"42590324191","text":"import random\r\n\r\n\r\nnumber =random.randint(1,10)\r\n\r\n\r\nwhile True:\r\n try :\r\n guess = int(input('Guess the the number :'))\r\n if(1 List[List[str]]:\n # Use two hash tables to keep track of name and emails\n # We need two because same name might not be same emails - aka key conflict\n # So we have one hash with key/val of a number and an array of respective emails\n # And a second hash with key/val of a name and an array of numbers, representing key for first hash\n # Maybe a third hash to keep track of seen emails and which number they went to\n \n # Helper func to combine lists\n def combine(arr):\n # arr will give us the keys to combine in sorted order.\n # We will combine everything with the first occurrence and delete the rest\n n = len(arr)\n ptr = 1\n \n while ptr < n:\n # Add everything from current pointer's email list to the one at 0th index\n for email in email_list[arr[ptr]]:\n # Add email to lowest key value\n email_list[arr[0]].append(email)\n # Change tracker\n tracker[email] = arr[0]\n \n # Now delete all traces of that key\n del email_list[arr[ptr]]\n del mapper[arr[ptr]]\n \n # Increment\n ptr += 1\n \n \n \n # Hash one - key is an ID with an array of emails that corresponds to that specific ID\n email_list = collections.defaultdict(list)\n \n # Hash two - correlates key values to its respective string name\n mapper = {}\n \n # Hash three - Keeps track of which ID the emails were sent to for quicker lookup\n tracker = {}\n \n # print(\"Hashes initialized\")\n # Iterate through each array in accounts\n curr_key = 0\n for acc in accounts:\n # For each one, we want to separate name from emails\n name = acc[0]\n emails = acc[1:]\n \n # print(\"Account name: {}\\nassociated emails: {}\".format(name, emails)) \n # We need to iterate through every email in emails and see if it's already\n # placed into someone's account. If so, we place that value into key instead\n connect = []\n for email in emails:\n if email in tracker:\n # print(\"Current email, {}, was found associated with another account key: {}\"\n # .format(email, tracker[email]))\n if tracker[email] not in connect:\n connect.append(tracker[email])\n \n \n if len(connect) == 1:\n key = connect[0]\n elif len(connect) > 1:\n connect.sort()\n combine(connect)\n key = connect[0]\n else:\n key = curr_key\n \n # print(\"Our key is currently: {}\".format(key))\n # Now we just need to place everything where it belongs\n # First place each email into email_list\n # As we do so, we'll add it to tracker, too\n for email in emails:\n # If it's in tracker, then we've already added it. No need to do it again\n if email not in tracker:\n email_list[key].append(email)\n tracker[email] = key\n # print(\"Email, {}, added to list of account key: {}. Tracker added\".format(email, key))\n \n # Then map the key to the name\n mapper[key] = name\n # print(\"Added key to map: {}\".format(mapper))\n curr_key += 1\n \n # At end of this for loop, we should have everything in email list and mapper for name\n # Just make our answer array and return it\n ans = []\n # print(\"Formulating final answer array\")\n for key in email_list:\n acc = []\n # First add the name\n acc.append(mapper[key])\n # print(\"Added name: {}\".format(mapper[key]))\n # Then add the emails after sorting\n email_list[key].sort()\n acc += email_list[key]\n # print(\"Adding emails to acc. Acc is now: {}\".format(acc))\n # Then add to ans\n ans.append(acc)\n # print(\"Appending to ans\")\n \n return ans\n ","repo_name":"PigsGoMoo/LeetCode","sub_path":"accounts-merge/accounts-merge.py","file_name":"accounts-merge.py","file_ext":"py","file_size_in_byte":4535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"12974790715","text":"from django.shortcuts import redirect, render\nfrom django.http import HttpResponse\nfrom .models import *\n# Create your views here.\n\ndef base(request):\n if request.method == \"POST\":\n x=request.POST['todo']\n Data= Todata(data=x)\n print(Data)\n Data.save()\n todata=Todata.objects.all()\n return render(request,'home.html',{'todata':todata})\n\ndef remove(request,i):\n x=Todata.objects.get(id=i)\n x.delete()\n todata=Todata.objects.all()\n return redirect('/')\n\ndef update(request,i):\n t=Todata.objects.get(id=i)# it will fetch id and data\n if request.method==\"POST\":\n x1=request.POST['u1']\n yy=Todata(data=x1)\n yy.save()\n return redirect('/')\n return render(request,'update.html',{'t':t})\n \n","repo_name":"prathaps123/TODO-APP-","sub_path":"todoapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"11577271151","text":"from typing import Dict, List, Union, Tuple\n\nfrom pavilion import parsers\nfrom pavilion import variables\nfrom pavilion.errors import TestConfigError, DeferredError, StringParserError, ParserValueError\n\nDEFERRED_PREFIX = '!deferred!'\nNO_DEFERRED_ALLOWED = [\n 'schedule',\n 'build',\n 'scheduler',\n 'chunk',\n 'only_if',\n 'not_if',\n]\n\n\ndef test_config(config, var_man):\n \"\"\"Recursively resolve the variables in the value strings in the given\n configuration.\n\n Deferred Variable Handling\n When a config value references a deferred variable, it is left\n unresolved and prepended with the DEFERRED_PREFIX. To complete\n these, use deferred().\n\n :param dict config: The config dict to resolve recursively.\n :param variables.VariableSetManager var_man: A variable manager. (\n Presumably a permutation of the base var_man)\n :return: The resolved config,\n \"\"\"\n\n resolved_dict = {}\n\n for section in config:\n try:\n resolved_dict[section] = section_values(\n component=config[section],\n var_man=var_man,\n allow_deferred=section not in NO_DEFERRED_ALLOWED,\n key_parts=(section,),\n )\n except (StringParserError, ParserValueError) as err:\n raise TestConfigError(\"Error parsing '{}' section\".format(section), err)\n\n for section in ('only_if', 'not_if'):\n try:\n if section in config:\n resolved_dict[section] = mapping_keys(\n base_dict=resolved_dict.get(section, {}),\n var_man=var_man,\n section_name=section)\n except (StringParserError, ParserValueError) as err:\n raise TestConfigError(\"Error parsing key '{}' section\".format(section), err)\n\n return resolved_dict\n\n\ndef deferred(config, var_man):\n \"\"\"Resolve only those values prepended with the DEFERRED_PREFIX. All\n other values are presumed to be resolved already.\n\n :param dict config: The configuration\n :param variables.VariableSetManager var_man: The variable manager. This\n must not contain any deferred variables.\n \"\"\"\n\n if var_man.deferred:\n deferred_name = [\n \".\".join([part for part in var_parts if part is not None])\n for var_parts in var_man.deferred\n ]\n\n raise RuntimeError(\n \"The variable set manager must not contain any deferred \"\n \"variables, but contained these: {}\"\n .format(deferred_name)\n )\n\n config = section_values(config, var_man, deferred_only=True)\n for section in ('only_if', 'not_if'):\n if section in config:\n config[section] = mapping_keys(\n base_dict=config.get(section, {}),\n var_man=var_man,\n section_name=section,\n deferred_only=True)\n\n return config\n\n\ndef mapping_keys(base_dict, var_man, section_name, deferred_only=False) -> dict:\n \"\"\"Some sections of the test config can have Pavilion Strings for\n keys. Resolve the keys of the given dict.\n\n :param dict[str,str] base_dict: The dict whose keys need to be resolved.\n :param variables.VariableSetManager var_man: The variable manager to\n use to resolve the keys.\n :param str section_name: The name of this config section, for error\n reporting.\n :param bool deferred_only: Resolve only deferred keys, otherwise\n mark deferred keys as deferred.\n :returns: A new dictionary with the updated keys.\n \"\"\"\n\n new_dict = type(base_dict)()\n for key, value in base_dict.items():\n new_key = section_values(\n component=key,\n var_man=var_man,\n allow_deferred=True,\n deferred_only=deferred_only,\n key_parts=(section_name + '[{}]'.format(key),))\n\n # The value will have already been resolved.\n new_dict[new_key] = value\n\n return new_dict\n\n\ndef section_values(component: Union[Dict, List, str],\n var_man: variables.VariableSetManager,\n allow_deferred: bool = False,\n deferred_only: bool = False,\n key_parts: Union[None, Tuple[str]] = None):\n \"\"\"Recursively resolve the given config component's value strings\n using a variable manager.\n\n :param component: The config component to resolve.\n :param var_man: A variable manager. (Presumably a permutation of the\n base var_man)\n :param allow_deferred: Allow deferred variables in this section.\n :param deferred_only: Only resolve values prepended with\n the DEFERRED_PREFIX, and throw an error if such values can't be\n resolved. If this is True deferred values aren't allowed anywhere.\n :param Union[tuple[str],None] key_parts: A list of the parts of the\n config key traversed to get to this point.\n :return: The component, resolved.\n :raises: RuntimeError, TestConfigError\n \"\"\"\n\n if key_parts is None:\n key_parts = tuple()\n\n if isinstance(component, dict):\n resolved_dict = type(component)()\n for key in component.keys():\n resolved_dict[key] = section_values(\n component[key],\n var_man,\n allow_deferred=allow_deferred,\n deferred_only=deferred_only,\n key_parts=key_parts + (key,))\n\n return resolved_dict\n\n elif isinstance(component, list):\n resolved_list = type(component)()\n for i in range(len(component)):\n resolved_list.append(\n section_values(\n component[i], var_man,\n allow_deferred=allow_deferred,\n deferred_only=deferred_only,\n key_parts=key_parts + (i,)\n ))\n return resolved_list\n\n elif isinstance(component, str):\n\n if deferred_only:\n # We're only resolving deferred value strings.\n\n if component.startswith(DEFERRED_PREFIX):\n component = component[len(DEFERRED_PREFIX):]\n\n try:\n resolved = parsers.parse_text(component, var_man)\n except DeferredError:\n raise RuntimeError(\n \"Tried to resolve a deferred config component, \"\n \"but it was still deferred: {}\"\n .format(component)\n )\n except StringParserError as err:\n raise TestConfigError(\n \"Error resolving value '{}' in config at '{}':\\n\"\n \"{}\\n{}\"\n .format(component, '.'.join(map(str, key_parts)),\n err.message, err.context))\n return resolved\n\n else:\n # This string has already been resolved in the past.\n return component\n\n else:\n if component.startswith(DEFERRED_PREFIX):\n # This should never happen\n raise RuntimeError(\n \"Tried to resolve a pavilion config string, but it was \"\n \"started with the deferred prefix '{}'. This probably \"\n \"happened because Pavilion called setup.config \"\n \"when it should have called deferred.\"\n .format(DEFERRED_PREFIX)\n )\n\n try:\n resolved = parsers.parse_text(component, var_man)\n except DeferredError:\n if allow_deferred:\n return DEFERRED_PREFIX + component\n else:\n raise TestConfigError(\n \"Deferred variable in value '{}' under key \"\n \"'{}' where it isn't allowed\"\n .format(component, '.'.join(map(str, key_parts))))\n except StringParserError as err:\n raise TestConfigError(\n \"Error resolving value '{}' in config at '{}':\\n\"\n \"{}\\n{}\"\n .format(component,\n '.'.join([str(part) for part in key_parts]),\n err.message, err.context))\n else:\n return resolved\n elif component is None:\n return None\n else:\n raise TestConfigError(\"Invalid value type '{}' for '{}' when \"\n \"resolving strings. Key parts: {}\"\n .format(type(component), component, key_parts))\n\n\ndef cmd_inheritance(test_cfg):\n \"\"\"Extend the command list by adding any prepend or append commands,\n then clear those sections so they don't get added at additional\n levels of config merging.\"\"\"\n\n for section in ['build', 'run']:\n config = test_cfg.get(section)\n if not config:\n continue\n new_cmd_list = []\n if config.get('prepend_cmds', []):\n new_cmd_list.extend(config.get('prepend_cmds'))\n config['prepend_cmds'] = []\n new_cmd_list += test_cfg[section]['cmds']\n if config.get('append_cmds', []):\n new_cmd_list.extend(config.get('append_cmds'))\n config['append_cmds'] = []\n test_cfg[section]['cmds'] = new_cmd_list\n\n return test_cfg\n","repo_name":"hpc/pavilion2","sub_path":"lib/pavilion/resolve.py","file_name":"resolve.py","file_ext":"py","file_size_in_byte":9314,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"3"}
+{"seq_id":"73071889042","text":"import os\nimport tempfile\n\nimport tensorflow as tf\nimport zipfile\nimport cloudpickle\nimport numpy as np\n\nimport baselines.common.tf_util as U\nfrom baselines.common.tf_util import load_state, save_state\nfrom baselines import logger\nfrom baselines.common.schedules import LinearSchedule\nfrom baselines.common.input import observation_input\n\nfrom baselines import deepq\nfrom baselines.deepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer\nfrom baselines.deepq.utils import ObservationInput\n\n\nclass ActWrapper(object):\n def __init__(self, act, act_params):\n self._act = act\n self._act_params = act_params\n\n @staticmethod\n def load(path):\n with open(path, \"rb\") as f:\n model_data, act_params = cloudpickle.load(f)\n act = deepq.build_act(**act_params)\n sess = tf.Session()\n sess.__enter__()\n with tempfile.TemporaryDirectory() as td:\n arc_path = os.path.join(td, \"packed.zip\")\n with open(arc_path, \"wb\") as f:\n f.write(model_data)\n\n zipfile.ZipFile(arc_path, 'r', zipfile.ZIP_DEFLATED).extractall(td)\n load_state(os.path.join(td, \"model\"))\n\n return ActWrapper(act, act_params)\n\n def __call__(self, *args, **kwargs):\n return self._act(*args, **kwargs)\n\n def save(self, path=None):\n \"\"\"Save model to a pickle located at `path`\"\"\"\n if path is None:\n path = os.path.join(logger.get_dir(), \"model.pkl\")\n\n with tempfile.TemporaryDirectory() as td:\n save_state(os.path.join(td, \"model\"))\n arc_name = os.path.join(td, \"packed.zip\")\n with zipfile.ZipFile(arc_name, 'w') as zipf:\n for root, dirs, files in os.walk(td):\n for fname in files:\n file_path = os.path.join(root, fname)\n if file_path != arc_name:\n zipf.write(file_path, os.path.relpath(file_path, td))\n with open(arc_name, \"rb\") as f:\n model_data = f.read()\n with open(path, \"wb\") as f:\n cloudpickle.dump((model_data, self._act_params), f)\n\n\ndef load(path):\n \"\"\"Load act function that was returned by learn function.\n\n Parameters\n ----------\n path: str\n path to the act function pickle\n\n Returns\n -------\n act: ActWrapper\n function that takes a batch of observations\n and returns actions.\n \"\"\"\n return ActWrapper.load(path)\n\n\ndef learn(\n env,\n actor_deque,\n action_pipes,\n q_func,\n lr=5e-4,\n max_timesteps=100000,\n buffer_size=50000,\n exploration_fraction=0.1,\n exploration_final_eps=0.02,\n train_freq=1,\n batch_size=32,\n print_freq=100,\n checkpoint_freq=10000,\n checkpoint_path=None,\n learning_starts=1000,\n gamma=1.0,\n target_network_update_freq=500,\n prioritized_replay=False,\n prioritized_replay_alpha=0.6,\n prioritized_replay_beta0=0.4,\n prioritized_replay_beta_iters=None,\n prioritized_replay_eps=1e-6,\n param_noise=False,\n callback=None):\n \"\"\"Train a deepq model.\n\n Parameters\n -------\n env: gym.Env\n environment to train on\n actor_deque: structure is --> (ac_num, obs, action, new_obs, rew, done)\n action_pipes: structure is --> pipes_conn1 = [pipes[i][1] for i in range(0, 2)]\n use --> action_pipes[actor_num].send(s) default is str\n 至于为什么一处为deque,一处为pipe. well, actor需要接受action来执行下一步,此前为阻塞状态.\n 而trainer是响应式的,无论哪个actor有数据都要进行计算,使用deque.empty()很方便,\n q_func: (tf.Variable, int, str, bool) -> tf.Variable\n the model that takes the following inputs:\n observation_in: object\n the output of observation placeholder\n num_actions: int\n number of actions\n scope: str\n reuse: bool\n should be passed to outer variable scope\n and returns a tensor of shape (batch_size, num_actions) with values of every action.\n lr: float\n learning rate for adam optimizer\n max_timesteps: int\n number of env steps to optimizer for\n buffer_size: int\n size of the replay buffer\n exploration_fraction: float\n fraction of entire training period over which the exploration rate is annealed\n exploration_final_eps: float\n final value of random action probability\n train_freq: int\n update the model every `train_freq` steps.\n set to None to disable printing\n batch_size: int\n size of a batched sampled from replay buffer for training\n print_freq: int\n how often to print out training progress\n set to None to disable printing\n checkpoint_freq: int\n how often to save the model. This is so that the best version is restored\n at the end of the training. If you do not wish to restore the best version at\n the end of the training set this variable to None.\n learning_starts: int\n how many steps of the model to collect transitions for before learning starts\n asyn 之下该参数修改为在replay_buffer的数据大小下开始?\n gamma: float\n discount factor\n target_network_update_freq: int\n update the target network every `target_network_update_freq` steps.\n prioritized_replay: True\n if True prioritized replay buffer will be used.\n prioritized_replay_alpha: float\n alpha parameter for prioritized replay buffer\n prioritized_replay_beta0: float\n initial value of beta for prioritized replay buffer\n prioritized_replay_beta_iters: int\n number of iterations over which beta will be annealed from initial value\n to 1.0. If set to None equals to max_timesteps.\n prioritized_replay_eps: float\n epsilon to add to the TD errors when updating priorities.\n callback: (locals, globals) -> None\n function called at every steps with state of the algorithm.\n If callback returns true training stops.\n\n Returns\n -------\n act: ActWrapper\n Wrapper over act function. Adds ability to save it and load it.\n See header of baselines/deepq/categorical.py for details on the act function.\n \"\"\"\n # Create all the functions necessary to train the model\n\n sess = tf.Session()\n sess.__enter__()\n\n def make_obs_ph(name):\n return ObservationInput(env.observation_space, name=name)\n\n act, train, update_target, debug = deepq.build_train(\n make_obs_ph=make_obs_ph,\n q_func=q_func,\n num_actions=env.action_space.n,\n optimizer=tf.train.AdamOptimizer(learning_rate=lr),\n gamma=gamma,\n grad_norm_clipping=10,\n param_noise=param_noise\n )\n\n act_params = {\n 'make_obs_ph': make_obs_ph,\n 'q_func': q_func,\n 'num_actions': env.action_space.n,\n }\n\n act = ActWrapper(act, act_params)\n\n # Create the replay buffer\n if prioritized_replay:\n replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)\n if prioritized_replay_beta_iters is None:\n prioritized_replay_beta_iters = max_timesteps\n beta_schedule = LinearSchedule(prioritized_replay_beta_iters,\n initial_p=prioritized_replay_beta0,\n final_p=1.0)\n else:\n replay_buffer = ReplayBuffer(buffer_size)\n beta_schedule = None\n # Create the schedule for exploration starting from 1. 探索率\n exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps),\n initial_p=1.0,\n final_p=exploration_final_eps)\n\n # Initialize the parameters and copy them to the target network.\n U.initialize()\n update_target()\n\n episode_rewards = [0.0]\n saved_mean_reward = None\n # obs = env.reset()\n reset = True\n done = None\n end = 100 # 传输一个非正常动作,结束训练\n\n with tempfile.TemporaryDirectory() as td:\n td = checkpoint_path or td\n model_file = os.path.join(td, \"model_tn\")\n model_saved = False\n if tf.train.latest_checkpoint(td) is not None:\n load_state(model_file)\n logger.log('Loaded model from {}'.format(model_file))\n model_saved = True\n\n # 在最大步数内训练\n t = 0\n while t <= max_timesteps:\n if callback is not None:\n if callback(locals(), globals()):\n break\n if actor_deque.empty() is True:\n pass\n # time.sleep()\n else:\n actor_information = actor_deque.get()\n if actor_information[2] is None: # 表示其为一轮开始\n ac_num = actor_information[0]\n new_obs = actor_information[3]\n done = False # important\n # print(\"ac_num \"+str(ac_num)+\" start\")\n else:\n ac_num = actor_information[0]\n obs = actor_information[1]\n action = actor_information[2]\n new_obs = actor_information[3]\n rew = actor_information[4]\n done = actor_information[5]\n replay_buffer.add(obs, action, rew, new_obs, float(done))\n if done: # done 与start是不会共存的\n # obs = env.reset()\n # episode_rewards.append(0.0)\n reset = True\n else:\n # Take action and update exploration to the newest value\n kwargs = {}\n if not param_noise:\n update_eps = exploration.value(t)\n update_param_noise_threshold = 0.\n else:\n update_eps = 0.\n update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n))\n kwargs['reset'] = reset\n kwargs['update_param_noise_threshold'] = update_param_noise_threshold\n kwargs['update_param_noise_scale'] = True\n action = act(np.array(new_obs)[None], update_eps=update_eps, **kwargs)[0]\n env_action = action\n reset = False\n action_pipes[ac_num-1].send(env_action) # 这里ac_num与pipe位置没有对齐\n # 经过learning_starts步后开始训练网络(先在buffer中存入一定量数据)\n # 每经过train_freq步进行一次梯度下降\n if t > learning_starts and t % train_freq == 0:\n # Minimize the error in Bellman's equation on a batch sampled from replay buffer.\n if prioritized_replay:\n experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) # 注意beta的用法\n (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience\n else:\n obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size)\n # np.ones_like() : Return an array of ones with the same shape and type as a given array.\n weights, batch_idxes = np.ones_like(rewards), None\n td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)\n if prioritized_replay:\n new_priorities = np.abs(td_errors) + prioritized_replay_eps\n replay_buffer.update_priorities(batch_idxes, new_priorities)\n\n if t > learning_starts and t % target_network_update_freq == 0:\n # Update target network periodically.\n update_target()\n\n # 下面是关于输出训练信息,以及保存网络参数的部分\n if print_freq is not None and t % print_freq == 0:\n logger.record_tabular(\"total_steps\", t)\n # logger.record_tabular(\"episodes\", num_episodes)\n # logger.record_tabular(\"mean 20 episode reward\", mean_100ep_reward)\n logger.record_tabular(\"% time spent exploring\", int(100 * exploration.value(t)))\n logger.dump_tabular()\n\n # checkpoint_freq轮数、mean reward增长才会保存模型\n if checkpoint_freq is not None and t > learning_starts and t % checkpoint_freq == 0:\n save_state(model_file)\n model_saved = True\n t += 1\n # 至此,训练结束\n # end = True\n for i in range(0, len(action_pipes)):\n action_pipes[i].send(end) # end = 100\n # 训练结束后保存最佳模型\n # if model_saved:\n # if print_freq is not None:\n # logger.log(\"Restored model with mean reward: {}\".format(saved_mean_reward))\n # load_state(model_file)\n # 返回一个ActWrapper,用来act.save(\"cartpole_model.pkl\")或其它的动作\n return act\n","repo_name":"yxBeginner/RL-and-Robot","sub_path":"deepq/asyn_sec/simple_multi_agent.py","file_name":"simple_multi_agent.py","file_ext":"py","file_size_in_byte":13539,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"3"}
+{"seq_id":"27416182053","text":"from work.auxiliary import data_functions\nimport cv2\n\nfrom work.segmentation import segmentation\nimport os\n\nfrom work.auxiliary.logger_settings import configure_logger\nimport logging\n\nLOG_PATH = os.path.abspath('logs')\nDATA_PATH = os.path.abspath('data')\n\nlog_path = data_functions.create_path(LOG_PATH, 'segmentation_logs')\n\nconfigure_logger(name=\"segmentation\",\n console_level='INFO',\n file_level='INFO',\n out_path=log_path)\n\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n orig_path = os.path.join(DATA_PATH, r'raw_data\\with_maskes\\image')\n #orig_path = os.path.join(DATA_PATH, r'segmentation\\src2')\n mask_path = os.path.join(DATA_PATH,r'raw_data\\with_maskes\\label')\n dest_path = os.path.join(DATA_PATH,r'raw_data\\with_maskes\\label-augmented')\n # mask_path = os.path.join(DATA_PATH,\n # r'unet_data\\training\\2019-10-20_19-26-14\\raw_pred')\n # dest_path = os.path.join(DATA_PATH,\n # r'unet_data\\training\\2019-10-20_19-26-14')\n\n is_binary_mask = True\n\n single_flag = False # segment single image or multiple\n\n ## setings for single\n #img_name = '38357-02789.png.jpg'\n img_name = '74714-32897.png.jpg'\n\n display_flag = True\n save_flag = 'stamped'\n save_segments = False\n\n # settings for multi segmentation\n img_list = None\n\n # img_list = [\n # '38360-00777.png.jpg',\n # '38360-02397.png.jpg',\n # '38360-25986.png.jpg',\n # '38360-27560.png.jpg',\n # '38360-46226.png.jpg',\n # '38360-68930.png.jpg',\n # ]\n\n # general settings for segmentation\n settings_dict = {'threshold': 0.1,\n \"pr_threshold\": 0.3,\n 'seg_type': \"felzenszwalb\",\n 'seg_params': dict(scale=1, sigma=0.8, min_size=40),\n 'gray_scale': False}\n\n # settings_dict = {'threshold': 0.6,\n # \"pr_threshold\": 0.2,\n # 'seg_type': \"slic\",\n # 'seg_params': dict(n_segments=2000,\n # compactness=0.1,\n # max_iter=100,\n # sigma=0,\n # spacing=None,\n # convert2lab=True,\n # enforce_connectivity=True,\n # min_size_factor=0.2,\n # max_size_factor=3,\n # slic_zero=False),\n # 'gray_scale': False}\n\n if single_flag:\n img_path = os.path.join(orig_path, img_name)\n mask_path = os.path.join(mask_path, img_name)\n\n sg = segmentation.SegmentationSingle(img_path=img_path,\n mask_path=mask_path,\n is_binary_mask=is_binary_mask,\n save_path=dest_path,\n create_save_dest_flag=True,\n **settings_dict)\n\n sg.apply_segmentation(save_flag=save_flag, display_flag=display_flag,\n save_segments=save_segments)\n sg.get_ontop_seg(display_flag=display_flag, save_flag=save_flag)\n if display_flag:\n cv2.waitKey(0)\n\n else:\n\n segmentation.segment_multi(img_path=orig_path,\n mask_path=mask_path,\n save_path=dest_path,\n is_binary_mask=is_binary_mask,\n settings_dict=settings_dict,\n img_list=img_list,\n create_stamp = save_flag)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"OrenChikli/Cherry_stem","sub_path":"work/segmentation_main.py","file_name":"segmentation_main.py","file_ext":"py","file_size_in_byte":3920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"9883916152","text":"n=int(input(\"this is my input\"))\nfor i in range(1,n+1):\n x=1\n flag=0\n print(\" \"*(n-i),end=' ')\n for j in range(1,2*i):\n print( x ,end=' ')\n if flag==0:\n x+=1\n else:\n x-=1\n if x==i:\n flag=1\n print()\n\n\n","repo_name":"miteshsuthar/pythonfiles","sub_path":"pattern8.py","file_name":"pattern8.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"42854468577","text":"from collections import Counter\n\nfrom ..base import TimerOptimizer, Route, Solution, Optimizer\nimport numpy as np\nimport random\nfrom typing import List, Set\n\nfrom .. import LocalInnerEdgeOptimizer\n\n\ndef perturbations_type1(route: Route, n_verticies: int, n_iter: int):\n unused_vertices = set(list(range(0, n_verticies))) - set(route)\n\n for _ in range(n_iter):\n # random vertex replace\n vertex1 = random.choice(list(unused_vertices))\n vertex2 = random.randint(0, len(route))\n\n unused_vertices.add(route[vertex2])\n route[vertex2] = vertex1\n unused_vertices.remove(vertex1)\n\n # random edge replace\n edge_start = random.randint(0, len(route))\n edge_end = random.randint(0, len(route))\n while edge_end == edge_start:\n edge_end = random.randint(0, len(route))\n\n if edge_start < edge_end:\n e = edge_start\n edge_start = edge_end\n edge_end = e\n edge = route[edge_start:edge_end + 1]\n edge.reverse()\n\n new_route = route[0:edge_start] + edge + route[edge_end + 1:]\n route = new_route\n\n return route\n\n\nclass ILS1(TimerOptimizer):\n def _find_solution(self):\n best_solution = Solution(np.inf, self.route)\n route: Route = self.route[:]\n vertices = len(self.distance_matrix)\n opt = LocalInnerEdgeOptimizer(self.distance_matrix, route)\n solution = opt()\n\n while True:\n if solution.cost < best_solution.cost:\n best_solution = Solution(solution.cost, solution.route[:])\n\n route = perturbations_type1(solution.route, vertices, 2)\n opt = LocalInnerEdgeOptimizer(self.distance_matrix, route)\n solution = opt()\n\n yield best_solution\n\n\ndef greedy_cycle(distance_matrix: np.ndarray, route: List[int], unused_vertices: Set, to_restore: int) -> List[int]:\n end_len = len(route) + to_restore\n unused_vertices = list(unused_vertices)\n\n while len(route) != end_len:\n v1 = route[0] # to na indeksach wszystko jest\n v2 = unused_vertices[0]\n v3 = route[1]\n dst = distance_matrix[v1][v2] + distance_matrix[v2][v3] - distance_matrix[v1][v3]\n best_move = (v1, v2, v3, dst)\n\n for i in range(len(route)):\n v1 = route[i - 1]\n v3 = route[i]\n\n for j in range(len(unused_vertices)):\n v2 = unused_vertices[j]\n dst = distance_matrix[v1][v2] + distance_matrix[v2][v3] - distance_matrix[v1][v3]\n\n if dst < best_move[2]:\n best_move = (i, v2, dst)\n\n route.insert(best_move[0], best_move[1])\n unused_vertices.remove(best_move[1])\n\n return route\n\n\ndef perturbations_type2(route: Route, distance_matrix, n_verticies: int, percent: float) -> Route:\n unused_vertices = set(list(range(0, n_verticies))) - set(route)\n before_number = len(route)\n\n destroy_n_verticies = int(n_verticies * percent)\n for _ in range(destroy_n_verticies):\n vertex = random.choice(route)\n unused_vertices.add(vertex)\n route.remove(vertex)\n\n destroy_n_edges = int((len(route) / 2) * percent)\n for _ in range(destroy_n_edges):\n edge_end = random.randint(0, len(route) - 1)\n\n v = route[edge_end]\n v_prev = route[edge_end - 1]\n\n unused_vertices.add(v)\n unused_vertices.add(v_prev)\n\n route.remove(v)\n route.remove(v_prev)\n\n to_restore = before_number - len(route)\n route = greedy_cycle(distance_matrix, route, unused_vertices, to_restore)\n\n return Route(route)\n\n\nclass ILS2b(TimerOptimizer):\n def _find_solution(self):\n route: Route = self.route[:]\n vertices = len(self.distance_matrix)\n best_solution = Solution(np.inf, self.route)\n opt = LocalInnerEdgeOptimizer(self.distance_matrix, route)\n optimal_solution = opt()\n solution = Solution(optimal_solution.cost, optimal_solution.route[:])\n\n while True:\n if solution.cost < best_solution.cost:\n best_solution = Solution(solution.cost, solution.route[:])\n\n route = perturbations_type2(optimal_solution.route, opt.distance_matrix, vertices, 0.07)\n cost = self.__calculate_cost(route)\n solution = Solution(cost, route[:])\n\n yield best_solution\n\n def __calculate_cost(self, route):\n return sum(\n self.distance_matrix[a, b]\n if a is not None and b is not None else None\n for a, b in zip(route, route[1:] + [route[0]])\n )\n\n\nclass ILS2a(TimerOptimizer):\n def _find_solution(self):\n vertices = len(self.distance_matrix)\n best_solution = Solution(np.inf, self.route)\n\n while True:\n route = perturbations_type2(best_solution.route, self.distance_matrix, vertices, 0.07)\n cost = self.__calculate_cost(route)\n solution = Solution(cost, route[:])\n\n if solution.cost < best_solution.cost:\n best_solution = Solution(solution.cost, solution.route[:])\n\n yield best_solution\n\n def __calculate_cost(self, route):\n return sum(\n self.distance_matrix[a, b]\n if a is not None and b is not None else None\n for a, b in zip(route, route[1:] + [route[0]])\n )\n","repo_name":"Zerkles/AEM","sub_path":"p4/optimizers/ils_optimizer/ils_optimizer.py","file_name":"ils_optimizer.py","file_ext":"py","file_size_in_byte":5351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"5489564347","text":"\"\"\"\nCreated on Sun Nov 1 19:48:48 2020\n@author: John Rachlin\n@file: evo_v4.py: An evolutionary computing framework (version 4)\nAssumes no Solutions class.\n\"\"\"\n\nimport random as rnd\nimport copy\nfrom functools import reduce\nimport csv\nimport pandas as pd\n\nclass Evo:\n\n def __init__(self):\n \"\"\" Population constructor \"\"\"\n self.pop = {} # The solution population eval -> solution\n self.fitness = {} # Registered fitness functions: name -> objective function\n self.agents = {} # Registered agents: name -> (operator, num_solutions_input)\n\n def size(self):\n \"\"\" The size of the current population \"\"\"\n return len(self.pop)\n\n def add_fitness_criteria(self, name, f):\n \"\"\" Register a fitness criterion (objective) with the\n environment. Any solution added to the environment is scored \n according to this objective \"\"\"\n self.fitness[name] = f\n \n def add_agent(self, name, op, k=1):\n \"\"\" Register a named agent with the population.\n The operator (op) function defines what the agent does.\n k defines the number of solutions the agent operates on. \"\"\"\n self.agents[name] = (op, k)\n\n def add_solution(self, sol):\n \"\"\" Add a solution to the population \"\"\"\n #eval = ((obj1, score1), (obj2, score2).....)\n eval = tuple((name, f(sol)) for name, f in self.fitness.items())\n self.pop[eval] = sol\n\n def run_agent(self, name):\n \"\"\" Invoke an agent against the population \"\"\"\n op, k = self.agents[name]\n picks = self.get_random_solutions(k)\n new_solution = op(picks)\n self.add_solution(new_solution)\n\n\n\n def evolve(self, n=1, dom=100, status=100):\n \"\"\" Run n random agents (default=1) \n dom defines how often we remove dominated (unfit) solutions\n status defines how often we display the current population \"\"\"\n\n agent_names = list(self.agents.keys())\n for i in range(n):\n pick = rnd.choice(agent_names)\n self.run_agent(pick)\n\n if i % dom == 0:\n self.remove_dominated()\n\n if i % status == 0:\n self.remove_dominated()\n print(\"Iteration:\", i)\n print(\"Population size:\", self.size())\n df = pd.DataFrame()\n for eval,sol in self.pop.items():\n df = df.append(dict(eval), ignore_index=True)\n df.insert(0, 'teamname', ['Oreo' for _ in range(len(df))], True)\n df.set_index('teamname', inplace=True)\n df.to_csv('solutions_{}.csv'.format(i))\n #print(type(self))\n #df = pd.DataFrame()\n #df['setups'] = list(self.fitness['setups'])\n #df['lowpriority'] = list(self.fitness['lowpriority'])\n #df['delays'] = list(self.fitness['delays'])\n #df.to_csv('solutions_{}.csv'.format(i))\n\n\n # Clean up the population\n self.remove_dominated()\n\n\n def get_random_solutions(self, k=1):\n \"\"\" Pick k random solutions from the population \"\"\"\n if self.size() == 0:\n return []\n else:\n solutions = tuple(self.pop.values())\n return [copy.deepcopy(rnd.choice(solutions)) for _ in range(k)]\n\n @staticmethod\n def _dominates(p, q):\n \"\"\" p = evaluation of solution: ((obj1, score1), (obj2, score2), ... )\"\"\"\n pscores = [score for _,score in p]\n qscores = [score for _,score in q]\n score_diffs = list(map(lambda x,y: y-x, pscores, qscores))\n min_diff = min(score_diffs)\n max_diff = max(score_diffs)\n return min_diff >= 0.0 and max_diff > 0.0\n\n\n @staticmethod\n def _reduce_nds(S, p):\n return S - {q for q in S if Evo._dominates(p,q)}\n\n\n def remove_dominated(self):\n \"\"\" Remove dominated solutions \"\"\"\n nds = reduce(Evo._reduce_nds, self.pop.keys(), self.pop.keys())\n self.pop = {k: self.pop[k] for k in nds}\n\n\n def __str__(self):\n \"\"\" Output the solutions in the population \"\"\"\n rslt = \"\"\n for eval,sol in self.pop.items():\n rslt += str(dict(eval))+\":\\t\"+str(sol)+\"\\n\"\n return rslt","repo_name":"EthanRiley/evo-supply-chain","sub_path":"evo.py","file_name":"evo.py","file_ext":"py","file_size_in_byte":4239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"22763501491","text":"import pytest\nfrom momox.tests.integration.employees.factories import EmployeeFactory\n\npy_test_mark = pytest.mark.django_db\n\n\nclass TestEmployeeORMModel:\n @py_test_mark\n def test_model_works(self):\n employee = EmployeeFactory()\n assert employee.is_attending\n\n @py_test_mark\n def test_model_constraint_works(self):\n with pytest.raises(Exception) as ex:\n EmployeeFactory(name='')\n assert 'NOT NULL constraint failed' in str(ex)\n EmployeeFactory(is_attending='')\n assert 'NOT NULL constraint failed' in str(ex)\n","repo_name":"eadwinCode/mmx-challenge","sub_path":"momox/tests/integration/employees/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"14843670744","text":"import pytest\n\nfrom fixture.application import Application\nfrom model.group import Group\n\napp_fixture = None\n\n\n@pytest.fixture(scope=\"function\")\ndef app():\n \"\"\"\n Before each test method:\n 1. Create an application fixture\n - if it doesn't exist or\n - if fixture doesn't valid (no opened browser)\n 2. Login\n :return: app fixture\n \"\"\"\n global app_fixture\n if not app_fixture or not app_fixture.is_valid():\n app_fixture = Application()\n app_fixture.session.ensure_login(username=\"admin\", password=\"secret\")\n return app_fixture\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef stop(request):\n \"\"\"\n Close session.\n :param request: request\n \"\"\"\n\n def fin():\n app_fixture.session.ensure_logout()\n app_fixture.tear_down()\n\n # Run after last test\n request.addfinalizer(fin)\n\n@pytest.fixture\ndef check_group():\n if not app_fixture.group.count():\n app_fixture.group.create(Group(name=\"test\"))\n","repo_name":"popova-sdet/addressbook_koza","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"39896824417","text":"import pandas as pd\n\n\ndef create_stores_demand(demand):\n stores_demand = {}\n\n # Keep track of cities where no store is present\n # Used to still include those cities as possible route nodes\n #cities_named = { city : False for city in CITIES}\n\n for index, row in demand.iterrows():\n city, demands = row[0], row[1:]\n\n if city not in CITIES:\n raise ReferenceError(\"De opgegeven winkelstad bestaat niet in het netwerk!\")\n\n for i, demand in enumerate(demands):\n if demand and demand > 0:\n store = (city, i)\n stores_demand[store] = round(demand)\n\n #cities_named[city] = True\n\n # for city, named in cities_named.items():\n # # if named is False:\n # # fake_store = (city, 0)\n # # stores_demand[fake_store] = 0\n # # print(city)\n\n return stores_demand\n\n\ndef create_edge_dict(nodes, matrix):\n dict = {}\n\n rows = matrix.shape[0]\n columns = matrix.shape[1]\n\n if rows != columns:\n raise ValueError(\"Matrix must be symmetric\")\n\n for i in range(rows):\n for j in range(columns):\n\n if i == j:\n continue\n\n names = (nodes[i], nodes[j])\n edge_val = matrix.iloc[i, j]\n\n if edge_val is None:\n edge_val = matrix.iloc[j, i]\n\n dict[names] = edge_val\n return dict\n\n\ndef create_store_edges(stores, city_matrix, same_city_value=0):\n stores_edges = {}\n\n for store1 in stores:\n for store2 in stores:\n store1_city, store1_branch = store1[0], store1[1]\n store2_city, store2_branch = store2[0], store2[1]\n\n if store1_city == store2_city:\n # Ignore edge to itself\n if store1_branch == store2_branch:\n if store1_branch == 'hub':\n edge_value = 0\n else:\n continue\n else:\n edge_value = same_city_value\n\n else:\n edge_value = city_matrix[(store1_city, store2_city)]\n\n stores_edges[(store1, store2)] = edge_value\n\n return stores_edges\n\n\nCITIES = ['Alkmaar ', 'Amersfort', 'Amsterdam', 'Apeldoorn', 'Arnhem', 'Breda', 'Delft', 'Den Helder', 'Dordrecht',\n 'Ede', 'Eindhoven', 'Enkhuizen', 'Gouda', 'Haarlem', 'Heerhugowaard', 'Hoorn', 'Huizen', 'Leiden',\n 'Medemblik', 'Nieuwegein', 'Nijmegen', 'Oss', 'Rotterdam', 'Schagen', \"'s-Hertogenbosch\", 'The Hague',\n 'Tilburg', 'Utrecht', 'Wageningen', 'Zaandam']\n\nHUB = ('Nijmegen', 'hub')\n\nIN_CITY_TIME = 5\nAMOUNT_OF_TRUCKS = 5\nTROLLEY_CAPACITY = 36\nCOST_PER_KM = 0.8\nCOST_PER_MIN = 10/60\nCOST_PER_TROLLEY = 5.8\nCOST_PER_TRUCK = 110\nUNLOAD_TIME_PER_TROLLEY = 1.4\nTOMATOES_PER_BOX = 50\nBOXES_PER_TROLLEY = 10\n\n# imports\ndf_demand = pd.read_excel('AH-Maandag.xlsx', header=None)\ndf_demand = df_demand.where((pd.notnull(df_demand)), None)\ndf_km = pd.read_excel('km.xlsx')\ndf_km = df_km.where((pd.notnull(df_km)), None)\ndf_min = pd.read_excel('min.xlsx')\ndf_min = df_min.where((pd.notnull(df_min)), None)\n\ndf_cities_km = df_km.iloc[:, 1:]\ndf_cities_min = df_min.iloc[:, 1:]\n\nstores_demand = create_stores_demand(df_demand)\nstores = [*stores_demand.keys()]\n\n# Meant to differentiate the hub from stores\nstores_and_hub = (*stores, HUB)\n\ncities_km = create_edge_dict(CITIES, df_cities_km)\nstores_km = create_store_edges(stores_and_hub, cities_km, same_city_value=0) # same_city stores => 0 km apart\n\ncities_min = create_edge_dict(CITIES, df_cities_min)\nstores_min = create_store_edges(stores_and_hub, cities_min, same_city_value=5) # same_city stores => 5 min apart\n\nstores_and_hub_edges = [*stores_min.keys()]\n\n\nexample_trip = [(('Apeldoorn', 0), ('Apeldoorn', 1)), (('Apeldoorn', 1), ('Apeldoorn', 2)), (('Apeldoorn', 2), ('Apeldoorn', 0)),\n (('Enkhuizen', 0), ('Medemblik', 0)), (('Medemblik', 0), ('Enkhuizen', 0)), (('Nijmegen', 2), ('Nijmegen', 'hub')),\n (('Nijmegen', 'hub'),('Nijmegen', 2))]\n","repo_name":"tituspellegrom/IntegrativePractical5","sub_path":"Titus/Gurobi code/Compleet/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":4044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"2076868873","text":"from project_utils import email\nfrom .model_init import connect_to_DB\n\nconnection = connect_to_DB()\n\n\ndef send_to_teacher(teacher,name, e_mail, phone, content):\n email_content = \"hello,\\n I am {}.\\n {}\\n Thanks! \\n{} \\nphone: {}, mail: {}\".format(name,content,name,phone,e_mail)\n\n with connection.cursor() as cursor:\n query = \"SELECT * FROM Teachers where id ={} \".format(teacher)\n cursor.execute(query)\n result = cursor.fetchone()\n\n email.send(result.get(\"e_mail\"), email_content)\n","repo_name":"Tzofia-Asherov/full_stack_learn_online_project","sub_path":"model/email_model.py","file_name":"email_model.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"12015001406","text":"from Products.CMFCore.interfaces import IPropertiesTool\nfrom zope.component import getUtility\n\nfrom collective.plonetruegallery.settings import GallerySettings\nfrom collective.plonetruegallery.interfaces import IFlickrGallerySettings, \\\n IGallerySettings, IHighSlideDisplaySettings\nfrom collective.plonetruegallery.tests import BaseTest\nfrom collective.plonetruegallery.utils import getGalleryAdapter, \\\n getDisplayAdapter\n\nimport unittest2 as unittest\n\n\nclass TestSettings(BaseTest):\n\n def test_settings_should_return_default_value(self):\n settings = GallerySettings(self.get_gallery())\n self.assertEquals(settings.gallery_type,\n IGallerySettings['gallery_type'].default)\n\n def test_added_interface_settings_should_return_default_value(self):\n settings = GallerySettings(self.get_gallery(),\n interfaces=[IHighSlideDisplaySettings])\n self.assertEquals(settings.highslide_outlineType, 'drop-shadow')\n\n def test_should_always_have_IGallerySettings_no_matter_what(self):\n settings = GallerySettings(self.get_gallery(), interfaces=[])\n self.failUnless(IGallerySettings in settings._interfaces)\n\n def test_should_handle_passing_in_single_item(self):\n settings = GallerySettings(self.get_gallery(),\n interfaces=IHighSlideDisplaySettings)\n self.assertEquals(settings.highslide_outlineType, 'drop-shadow')\n\n def test_should_return_default_to_None_if_it_is_not_in_an_interface(self):\n settings = GallerySettings(self.get_gallery())\n self.assertEquals(None, settings.foobar)\n\n def test_should_set_setting_correctly(self):\n settings = GallerySettings(self.get_gallery())\n settings.gallery_type = \"flickr\"\n self.assertEquals(settings.gallery_type, \"flickr\")\n\n def test_should_set_extra_interface_setting(self):\n settings = GallerySettings(\n self.get_gallery(),\n interfaces=[IFlickrGallerySettings]\n )\n settings.flickr_username = \"john\"\n self.assertEquals(settings.flickr_username, \"john\")\n\n\nclass TestUtils(BaseTest):\n\n def test_getGalleryAdapter(self):\n adapter = getGalleryAdapter(self.portal['test_gallery'], self.request)\n self.assertEquals(adapter.name, \"basic\")\n self.assertEquals(adapter.settings.gallery_type, \"basic\")\n\n def test_getDisplayAdapter(self):\n gadapter = getGalleryAdapter(self.portal['test_gallery'],\n self.request)\n displayer = getDisplayAdapter(gadapter)\n self.assertEquals(displayer.name, 'galleria')\n self.assertEquals(gadapter.settings.display_type, 'galleria')\n\n def test_getGalleryAdapter_when_asking_for_non_existant_type(self):\n gadapter = getGalleryAdapter(self.portal['test_gallery'],\n self.request, gallery_type=\"foobar\")\n displayer = getDisplayAdapter(gadapter)\n self.assertEquals(displayer.name, 'galleria')\n self.assertEquals(gadapter.settings.display_type, 'galleria')\n self.assertEquals(gadapter.name, 'basic')\n self.assertEquals(gadapter.settings.gallery_type, 'basic')\n\n\nclass TestPloneAppImagingIntegration(BaseTest):\n\n def test_size_map_for_default_sizes_with_size_upgrades(self):\n props = getUtility(IPropertiesTool)\n imaging_properties = props.get('imaging_properties', None)\n if imaging_properties:\n adapter = getGalleryAdapter(self.portal['test_gallery'],\n self.request)\n self.assertEquals(adapter.sizes['small']['width'], 320)\n self.assertEquals(adapter.sizes['small']['height'], 320)\n self.assertEquals(adapter.sizes['medium']['width'], 576)\n self.assertEquals(adapter.sizes['medium']['height'], 576)\n self.assertEquals(adapter.sizes['large']['width'], 768)\n self.assertEquals(adapter.sizes['large']['height'], 768)\n\n\ndef test_suite():\n return unittest.defaultTestLoader.loadTestsFromName(__name__)\n","repo_name":"Solgema/collective.plonetruegallery","sub_path":"collective/plonetruegallery/tests/test_various.py","file_name":"test_various.py","file_ext":"py","file_size_in_byte":4084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"}
+{"seq_id":"71977453201","text":"# -*- coding: utf-8 -*-\n_tdl_no_reimport = True\n\nimport os.path\nimport time\nfrom html.parser import HTMLParser\n\nimport Kittens.utils\n\nimport Purr\n\n_verbosity = Kittens.utils.verbosity(name=\"purrparse\")\ndprint = _verbosity.dprint\ndprintf = _verbosity.dprintf\n\n\nclass LogIndexParser(HTMLParser):\n def reset(self):\n HTMLParser.reset(self)\n self.title = None\n self.timestamp = None\n self.curclass = None\n\n def end(self):\n dprintf(4, \"end, title '%s', timestamp %s\", self.title, self.timestamp)\n\n def handle_starttag(self, tag, attrs):\n dprint(4, \"start tag\", tag, attrs)\n # anchor tag -- data we look for is in here\n if tag == \"a\":\n attrs = dict(attrs)\n tagclass = attrs.get('class')\n if tagclass:\n # whenever we encounter an A tag with a CLASS attribute, and the class\n # has a _handle_start_class method defined, call the handler.\n # The attributes are passed as keywords to the handler\n start_handler = getattr(self, \"_handle_start_%s\" % tagclass, None)\n if callable(start_handler):\n start_handler(**attrs)\n # If the class also has a _handle_end_class method defined, accumulate all text\n # inside the tag (in self.curdata) for handling in the end handler\n if hasattr(self, \"_handle_end_%s\" % tagclass):\n if self.curclass:\n raise ValueError(\"nested class %s inside tag of class %s\" % (tagclass, self.curclass))\n self.curclass = tagclass\n self.curdata = \"\"\n # paragraph tag: add newline to curdata, if accumulating\n elif tag == \"p\":\n if self.curclass and self.curdata:\n self.curdata += \"\\n\"\n elif tag == \"br\":\n if self.curclass and self.curdata:\n self.curdata += \" \"\n\n def _handle_start_TITLE(self, timestamp=0, **kw):\n if self.title is None:\n try:\n self.timestamp = int(float(timestamp))\n except:\n self.timestamp = time.time()\n\n def _handle_end_TITLE(self, data):\n if self.title is None:\n self.title = data\n\n def handle_data(self, data):\n dprintf(4, \"data: {%s}\\n\", data)\n # if curclass is None, we're not accumulating data, just skip it\n if self.curclass is None:\n return\n # is there anything here except whitespace? Append to data, but\n # replace newlines with spaces\n if data.rstrip():\n self.curdata += data.replace(\"\\n\", \" \")\n # else all space. Append a single space to curdata, if it doesn't\n # already end in a space\n else:\n if self.curdata and self.curdata[-1] not in \"\\n \":\n self.curdata += \" \"\n\n _entity_dict = dict(lt=\"<\", gt=\">\")\n\n def handle_entityref(self, name):\n dprintf(4, \"entityref: {%s}\\n\", name)\n data = self._entity_dict.get(name, None)\n if data:\n self.handle_data(data)\n\n def handle_endtag(self, tag):\n dprint(4, \"end tag\", tag)\n # if end of A tag with CLASS attribute, pass accumulated data to data handler\n if tag == \"a\":\n if self.curclass:\n getattr(self, \"_handle_end_%s\" % self.curclass)(self.curdata)\n self.curclass = None\n elif tag == \"html\":\n self.end()\n\n\nclass LogEntryIndexParser(LogIndexParser):\n def __init__(self, dirname):\n LogIndexParser.__init__(self)\n self._dirname = dirname\n\n def reset(self):\n LogIndexParser.reset(self)\n self.comments = None\n self.dps = []\n self._new_dp = None\n\n def end(self):\n self._add_data_product()\n LogIndexParser.end(self)\n\n def _handle_start_DP(self, filename=None, src=None, policy=None, quiet=False,\n timestamp=0, comment=None, render=None, **kw):\n # dispence with previous DP tag, if any\n self._add_data_product()\n # setup data for this tag\n comment = comment or \"\"\n try:\n timestamp = float(timestamp)\n except:\n timestamp = time.time()\n if not isinstance(quiet, bool):\n try:\n quiet = bool(int(quiet))\n except:\n quiet = bool(quiet)\n comment = comment.replace(\"<\", \"<\").replace(\">\", \">\")\n self._new_dp = Purr.DataProduct(filename=filename, sourcepath=src,\n timestamp=timestamp, comment=comment,\n fullpath=os.path.join(self._dirname, filename or \"\"),\n policy=policy, render=render, quiet=quiet, archived=True)\n\n def _handle_end_TITLE(self, data):\n self.title = data.replace(\"<\", \"<\").replace(\">\", \">\")\n\n def _handle_end_COMMENTS(self, data):\n self.comments = data.replace(\"<\", \"<\").replace(\">\", \">\")\n\n def _handle_end_DPCOMMENT(self, data):\n if self._new_dp:\n self._new_dp.comment = data.replace(\"<\", \"<\").replace(\">\", \">\")\n self._add_data_product()\n\n def _add_data_product(self):\n if self._new_dp:\n self.dps.append(self._new_dp)\n self._new_dp = None\n","repo_name":"ratt-ru/purr","sub_path":"Purr/Parsers.py","file_name":"Parsers.py","file_ext":"py","file_size_in_byte":5381,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"7961133579","text":"import maya.cmds as mc\n\ntry:\n from itertools import izip as zip\nexcept ImportError: # will be 3.x series\n pass\n\nfrom ..ar_functions import find_jnts\nfrom ..ar_functions import sel_joints\nfrom ..ar_tools import fk_ctrl\n\n\n# fk jaw ctrl\nclass face_rig():\n\n def jaw_ctrl(self, parent_to, ctrl_size):\n # find head joint\n head_jnt_temp = find_jnts.find_jnts()\n head_jnt = head_jnt_temp.find_head_jnt()\n # find jaw joint\n jaw_jnt_temp = find_jnts.find_jnts()\n jaw_jnt = jaw_jnt_temp.most_children_jnt(head_jnt)\n # create jaw fk ctrl\n jaw_ctrl_var = fk_ctrl.fk_ctrl()\n # parent jaw control under head ctrl\n jaw_ctrl_info = jaw_ctrl_var.single_fk_ctrl( jnt=jaw_jnt, \n parent_to=parent_to, \n normal=[0,1,0], \n size = ctrl_size,\n colorR=0, \n colorG=1, \n colorB=0)\n # return joint group and then control\n return jaw_ctrl_info[0], jaw_ctrl_info[1]\n\n\n def tongue_ctrls(self, ctrl_size, parent_to):\n # find head joint\n head_jnt_temp = find_jnts.find_jnts()\n head_jnt = head_jnt_temp.find_head_jnt()\n # find jaw joint\n jaw_jnt_temp = find_jnts.find_jnts()\n jaw_jnt = jaw_jnt_temp.most_children_jnt(head_jnt)\n # find jaw joint\n tongue_jnt_temp = find_jnts.find_jnts()\n tongue_jnt = tongue_jnt_temp.most_descendants_jnt(jaw_jnt)\n\n # find tongue joint chain\n tongue_list_var = sel_joints.sel_joints(firstJoint=tongue_jnt)\n\n tongue_list_info = tongue_list_var.sel_jnt_chain()\n\n #create controls and groups for tongue\n tongue_grp_list = []\n tongue_ctrl_list = []\n for jnt in tongue_list_info:\n jnt_var = fk_ctrl.fk_ctrl()\n jnt_var_info = jnt_var.single_fk_curve_ctrl(jnt=jnt, \n parent_to='', \n version='box', \n size=ctrl_size, \n colorR=1, \n colorG=0, \n colorB=0)\n # make grp and ctrl list for tongue ctrls\n tongue_grp_list.append(jnt_var_info[0])\n tongue_ctrl_list.append(jnt_var_info[1])\n # varaiable for top grp to parent\n tongue_top_grp = tongue_grp_list[0]\n\n #remove first and last of lists to correctly parent ctrls and grps together in for loop\n tongue_grp_list.pop(0)\n tongue_ctrl_list.pop(-1)\n\n #parent ctrls and grps together\n for i_grp, i_ctrl in zip(tongue_grp_list, tongue_ctrl_list):\n mc.parent(i_grp, i_ctrl)\n # parent top grp to head ctrl\n mc.parent(tongue_top_grp, parent_to)\n #return tongue top grp (not needed)\n return tongue_top_grp\n\n\n def bot_face_ctrls(self, ctrl_size, parent_to):\n # find head joint\n head_jnt_temp = find_jnts.find_jnts()\n head_jnt = head_jnt_temp.find_head_jnt()\n # find jaw joint\n jaw_jnt_temp = find_jnts.find_jnts()\n jaw_jnt = jaw_jnt_temp.most_children_jnt(head_jnt)\n # find jaw joint\n tongue_jnt_temp = find_jnts.find_jnts()\n tongue_jnt = tongue_jnt_temp.most_descendants_jnt(jaw_jnt)\n\n jaw_jnt_descendants = mc.listConnections(jaw_jnt, type='joint', d=True, s=False)\n\n bot_face_jnts = []\n for i in jaw_jnt_descendants:\n if i != tongue_jnt:\n bot_face_jnts.append(i)\n \n for i in bot_face_jnts:\n bot_face_ctrl = fk_ctrl.fk_ctrl()\n bot_face_ctrl.single_fk_curve_ctrl(jnt=i, \n parent_to=parent_to,\n size=ctrl_size,\n version='box',\n colorR=.5, \n colorG=1, \n colorB=0)\n \n\n #________________________________________________#\n #________________________________________________#\n #top face controls w/ mid ctrls (parented to head)\n def top_face_ctrls(self, ctrl_size, parent_to_head='', parent_to_jaw='', mid_ctrls=0):\n # find head joint\n head_jnt_temp = find_jnts.find_jnts()\n head_jnt = head_jnt_temp.find_head_jnt()\n # find jaw joint\n jaw_jnt_temp = find_jnts.find_jnts()\n jaw_jnt = jaw_jnt_temp.most_children_jnt(head_jnt)\n # get immediate descendants of head joint\n head_jnt_descendants = mc.listConnections(head_jnt, type='joint', d=True, s=False)\n # list head joint descendants without jaw joint\n top_head_jnts = []\n for i in head_jnt_descendants:\n if i != jaw_jnt:\n top_head_jnts.append(i)\n #list head joints without ear joints\n top_face_jnts = []\n for i in top_head_jnts:\n i_descendats = mc.listRelatives(i, type='joint', ad=True)\n try:\n if len(i_descendats) >= 1:\n pass\n except:\n top_face_jnts.append(i)\n \n #get position of face jnts\n y_pos_list = []\n for i in top_face_jnts:\n pos = mc.xform(i, q=True , ws=True, t=True, a=True)\n # y pos to find lowest ws value \n y_pos = pos[1]\n y_pos_list.append(y_pos)\n \n # combine y_pos and top face jnt lst\n zip_y_pos = zip(y_pos_list, top_face_jnts)\n #sort lists from smallest to greatest Y pos\n sort_zip_y_pos = sorted(zip_y_pos)\n # create sorted list with just face jnts\n sorted_top_face_jnts = [somVar for i, somVar in sort_zip_y_pos]\n # get just jnts with lowest y positions\n mid_jnt_list = sorted_top_face_jnts[:mid_ctrls]\n # new face jnt list without mid face jnts\n new_top_face_jnts = sorted_top_face_jnts[mid_ctrls:]\n\n \n # create nurbs ctrl for each top face jnt\n for i in new_top_face_jnts:\n top_face_ctrl = fk_ctrl.fk_ctrl()\n top_face_ctrl.single_fk_curve_ctrl(jnt=i, \n parent_to=parent_to_head, \n version='box',\n size=ctrl_size,\n colorR=1, \n colorG=.5, \n colorB=0)\n\n # mid face grp list\n mid_face_ctrl_grps = []\n # create ctrl for the mid face jnts\n for i in mid_jnt_list:\n mid_face_ctrl = fk_ctrl.fk_ctrl()\n mid_face_ctrl_info = mid_face_ctrl.single_fk_curve_ctrl( jnt=i, \n parent_to=parent_to_head, \n version='box',\n size=ctrl_size,\n colorR=0, \n colorG=.5, \n colorB=1)\n # parent constrain mid face ctrl grp between head and jaw\n mc.parentConstraint(parent_to_head, parent_to_jaw, mid_face_ctrl_info[0], mo=1)\n mc.scaleConstraint(parent_to_head, parent_to_jaw, mid_face_ctrl_info[0], mo=1)\n # append grps to list\n mid_face_ctrl_grps.append(mid_face_ctrl_info[0])\n\n return top_face_jnts, mid_jnt_list, mid_face_ctrl_grps\n \n \n #________________________________________________#\n #________________________________________________#\n def ear_ctrls(self, ctrl_size, parent_to):\n # find head joint\n head_jnt_temp = find_jnts.find_jnts()\n head_jnt = head_jnt_temp.find_head_jnt()\n # find jaw joint\n jaw_jnt_temp = find_jnts.find_jnts()\n jaw_jnt = jaw_jnt_temp.most_children_jnt(head_jnt)\n # get immediate descendants of head joint\n head_jnt_descendants = mc.listConnections(head_jnt, type='joint', d=True, s=False)\n # list head joint descendants without jaw joint\n top_head_jnts = []\n for i in head_jnt_descendants:\n if i != jaw_jnt:\n top_head_jnts.append(i)\n # list head joints without face joints\n ear_jnts = []\n for i in top_head_jnts:\n i_descendats = mc.listRelatives(i, type='joint', ad=True)\n try:\n if len(i_descendats) >= 1:\n ear_jnts.append(i)\n except:\n pass\n\n # if ear jnts do exist rig them (top face jnts w/ child/s)\n try:\n # chain for first r ear (should just put For Loop incase more fk chains on head)\n r_ear_list_var = sel_joints.sel_joints(firstJoint=ear_jnts[0])\n\n r_ear_list_info = r_ear_list_var.sel_jnt_chain()\n\n # chain for first l ear\n l_ear_list_var = sel_joints.sel_joints(firstJoint=ear_jnts[1])\n\n l_ear_list_info = l_ear_list_var.sel_jnt_chain()\n\n #create controls and groups for R EAR ___________________________\n r_ear_grp_list = []\n r_ear_ctrl_list = []\n for jnt in r_ear_list_info:\n jnt_var = fk_ctrl.fk_ctrl()\n jnt_var_info = jnt_var.single_fk_curve_ctrl(jnt=jnt, \n parent_to='', \n version='box', \n size=ctrl_size, \n colorR=0, \n colorG=0.5, \n colorB=1)\n r_ear_grp_list.append(jnt_var_info[0])\n r_ear_ctrl_list.append(jnt_var_info[1])\n # varaiable for top grp before removed\n r_ear_top_grp = r_ear_grp_list[0]\n\n #remove first and last of lists to correctly parent ctrls and grps together in for loop\n r_ear_grp_list.pop(0)\n r_ear_ctrl_list.pop(-1)\n\n #parent ctrls and grps together\n for i_grp, i_ctrl in zip(r_ear_grp_list, r_ear_ctrl_list):\n mc.parent(i_grp, i_ctrl)\n # parent top grp to head ctrl\n mc.parent(r_ear_top_grp, parent_to)\n\n #create controls and groups for L EAR ___________________________\n l_ear_grp_list = []\n l_ear_ctrl_list = []\n for jnt in l_ear_list_info:\n jnt_var = fk_ctrl.fk_ctrl()\n jnt_var_info = jnt_var.single_fk_curve_ctrl(jnt=jnt, \n parent_to='', \n version='box', \n size=ctrl_size, \n colorR=0, \n colorG=0.5, \n colorB=1)\n l_ear_grp_list.append(jnt_var_info[0])\n l_ear_ctrl_list.append(jnt_var_info[1])\n # varaiable for top grp before removed\n l_ear_top_grp = l_ear_grp_list[0]\n\n #remove first and last of lists to correctly parent ctrls and grps together in for loop\n l_ear_grp_list.pop(0)\n l_ear_ctrl_list.pop(-1)\n\n #parent ctrls and grps together\n for i_grp, i_ctrl in zip(l_ear_grp_list, l_ear_ctrl_list):\n mc.parent(i_grp, i_ctrl)\n # parent top grp to head ctrl\n mc.parent(l_ear_top_grp, parent_to)\n\n return r_ear_top_grp, l_ear_top_grp\n except:\n pass\n","repo_name":"natelollar/maya_auto_rigger_and_tools","sub_path":"character_rigger/ar_rig/face_rig.py","file_name":"face_rig.py","file_ext":"py","file_size_in_byte":12555,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"38454758455","text":"n=int(input())\ns=input()\ns1=s.upper()\ns3=set(s1)\nc=0\nfor i in s3:\n x=ord(i)\n if 65<=x<=90:\n c+=1\nif c==26:\n print(\"YES\")\nelse:\n print(\"No\")\n\n","repo_name":"Ritesh22p1401b/CodeForces-Solutions","sub_path":"Pangram.py","file_name":"Pangram.py","file_ext":"py","file_size_in_byte":160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"15588112103","text":"# Hierarchical Clustering\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('Mall_Customers.csv')\nX = dataset.iloc[:, [3, 4]].values\n# y = dataset.iloc[:, 3].values\n\n# Splitting the dataset into the Training set and Test set\n\"\"\"from sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\"\"\"\n\n# Feature Scaling\n\"\"\"from sklearn.preprocessing import StandardScaler\nsc_X = StandardScaler()\nX_train = sc_X.fit_transform(X_train)\nX_test = sc_X.transform(X_test)\nsc_y = StandardScaler()\ny_train = sc_y.fit_transform(y_train)\"\"\"\n\n# Using the dendrogram to find the optimal number of clusters\nimport scipy.cluster.hierarchy as sch\ndendrogram = sch.dendrogram(sch.linkage(X, method = 'ward'))\nplt.title('Dendrogram')\nplt.xlabel('Customers')\nplt.ylabel('Euclidean distances')\nplt.show()\n\n# Fitting Hierarchical Clustering to the dataset\nfrom sklearn.cluster import AgglomerativeClustering\nhc = AgglomerativeClustering(n_clusters = 5, affinity = 'euclidean', linkage = 'ward')\ny_hc = hc.fit_predict(X)\n\n# Visualising the clusters\nplt.scatter(X[y_hc == 0, 0], X[y_hc == 0, 1], s = 100, c = 'red', label = 'Cluster 1')\nplt.scatter(X[y_hc == 1, 0], X[y_hc == 1, 1], s = 100, c = 'blue', label = 'Cluster 2')\nplt.scatter(X[y_hc == 2, 0], X[y_hc == 2, 1], s = 100, c = 'green', label = 'Cluster 3')\nplt.scatter(X[y_hc == 3, 0], X[y_hc == 3, 1], s = 100, c = 'cyan', label = 'Cluster 4')\nplt.scatter(X[y_hc == 4, 0], X[y_hc == 4, 1], s = 100, c = 'magenta', label = 'Cluster 5')\nplt.title('Clusters of customers')\nplt.xlabel('Annual Income (k$)')\nplt.ylabel('Spending Score (1-100)')\nplt.legend()\nplt.show()\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndataset = pd.read_csv('Mall_Customers.csv')\nX = dataset.iloc[:,[3,4]].values\n\nfrom sklearn.cluster import AgglomerativeClustering\nhc = AgglomerativeClustering(n_clusters=5,affinity='euclidean',linkage='ward')\ny_hc = hc.fit_predict(X)\n\nfrom sklearn.cluster import AgglomerativeClustering\nhc = AgglomerativeClustering(n_clusters=5,affinity='euclidean',linkage='complete')\ny_hc_lin = hc.fit_predict(X)\n\nfrom sklearn.cluster import AgglomerativeClustering\nhc = AgglomerativeClustering(n_clusters=5,affinity='euclidean',linkage='average')\ny_hc_avg = hc.fit_predict(X)\n\nplt.scatter(X[y_hc==0,0],X[y_hc==0,1],s=100,c='red',label='clus1')\nplt.scatter(X[y_hc==1,0],X[y_hc==1,1],s=100,c='blue',label='clus2')\nplt.scatter(X[y_hc==2,0],X[y_hc==2,1],s=100,c='cyan',label='clus3')\nplt.scatter(X[y_hc==3,0],X[y_hc==3,1],s=100,c='magenta',label='clus4')\nplt.scatter(X[y_hc==4,0],X[y_hc==4,1],s=100,c='green',label='clus5')\nplt.show()\n\nplt.scatter(X[y_hc_lin==0,0],X[y_hc_lin==0,1],s=100,c='red',label='clus1')\nplt.scatter(X[y_hc_lin==1,0],X[y_hc_lin==1,1],s=100,c='blue',label='clus2')\nplt.scatter(X[y_hc_lin==2,0],X[y_hc_lin==2,1],s=100,c='cyan',label='clus3')\nplt.scatter(X[y_hc_lin==3,0],X[y_hc_lin==3,1],s=100,c='magenta',label='clus4')\nplt.scatter(X[y_hc_lin==4,0],X[y_hc_lin==4,1],s=100,c='green',label='clus5')\nplt.show()\n\nplt.scatter(X[y_hc_avg==0,0],X[y_hc_avg==0,1],s=100,c='red',label='clus1')\nplt.scatter(X[y_hc_avg==1,0],X[y_hc_avg==1,1],s=100,c='blue',label='clus2')\nplt.scatter(X[y_hc_avg==2,0],X[y_hc_avg==2,1],s=100,c='cyan',label='clus3')\nplt.scatter(X[y_hc_avg==3,0],X[y_hc_avg==3,1],s=100,c='magenta',label='clus4')\nplt.scatter(X[y_hc_avg==4,0],X[y_hc_avg==4,1],s=100,c='green',label='clus5')\nplt.show()\n\nfrom sklearn.metrics import adjusted_rand_score\nward_ar_score = adjusted_rand_score(y_hc,y_hc)\n\nfrom sklearn.metrics import adjusted_rand_score\nward_ar_score_avg = adjusted_rand_score(y_hc,y_hc_avg)\n\nfrom sklearn.metrics import adjusted_rand_score\nward_ar_score_com = adjusted_rand_score(y_hc,y_hc_lin)\n\nfrom sklearn import preprocessing\nnormalized_X = preprocessing.normalize(X)\n\nplt.scatter(normalized_X[:,0],normalized_X[:,1],color='red')\nplt.show()\n\nfrom sklearn.preprocessing import normalize\nnormalized_X1 = normalize(X)\n\nfrom scipy.cluster.hierarchy import linkage\nlinkage_type = 'ward'\n\nlinkage_matrix = linkage(X,linkage_type)\n\nfrom scipy.cluster.hierarchy import dendrogram\ndendrogram = dendrogram(linkage_matrix)\nplt.show()\n\nimport seaborn as sns\nsns.clustermap(X,figsize=(18,50),method='ward',cmap='viridis')\nplt.show()\n\n\n#DBSCAN\nimport pandas as pd\n\ndataset = pd.read_csv('Mall_Customers.csv')\nX = dataset.iloc[:, [3, 4]].values\n\nimport dbscan_lab_helper as helper\nfrom sklearn.cluster import DBSCAN\ndbscan = DBSCAN(eps=1,min_samples=3)\nypred_dbscan = dbscan.fit_predict(X)\n\n\nplt.scatter(X[ypred_dbscan==-1,0],X[ypred_dbscan==-1,1],s=100,c='red',label='clus1')\nplt.scatter(X[ypred_dbscan==1,0],X[ypred_dbscan==1,1],s=100,c='blue',label='clus2')\nplt.scatter(X[ypred_dbscan==2,0],X[ypred_dbscan==2,1],s=100,c='cyan',label='clus3')\nplt.scatter(X[ypred_dbscan==3,0],X[ypred_dbscan==3,1],s=100,c='magenta',label='clus4')\nplt.scatter(X[ypred_dbscan==4,0],X[ypred_dbscan==4,1],s=100,c='green',label='clus5')\nplt.show()\n\n","repo_name":"raajeshlr/ML-A-Z-Udemy","sub_path":"Part 4 - Clustering/Section 25 - Hierarchical Clustering/hc.py","file_name":"hc.py","file_ext":"py","file_size_in_byte":5103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"24464017342","text":"from flask import (\n Flask, escape, render_template, url_for, request\n)\n\nfrom backend import (\n lucene, Searcher, search_query, read_stopwords, read_pos_translation, pos_list_string, parse_win,\n idxdir_path, file_stopwords, file_pos\n)\n\n## html和css文件都在根目录下\napp = Flask(__name__, template_folder=\"\", static_folder=\"\")\n\n@app.route('/')\ndef index():\n query = request.args.get(\"query\")\n print(f\"### query: {query}\")\n if query:\n ## ---start--- MUST add for lucene functionality\n vm_env = lucene.getVMEnv()\n vm_env.attachCurrentThread()\n ## ---end---\n\n win_str = request.args.get(\"win\")\n win = parse_win(win_str)\n pos = []\n checkbox_state = {}\n if not request.args.get(\"cb_all\"):\n for key in request.args.keys():\n if key.startswith(\"cb_\"):\n pos_t = key[3:]\n pos.append(pos_t)\n for key in request.args.keys():\n if key.startswith(\"cb_\"):\n pos_t = key[3:]\n pos.append(pos_t)\n checkbox_state[key] = True\n print(f\"### pos: {pos}\")\n print(f\"### win: {win}\")\n counter, pos_dict = search_query(query, searcher, pos, win, stopwords)\n ans = counter.most_common(n)\n\n ## display answers\n answer_list = []\n print(f\"### get top {len(ans)} answers\")\n for item in ans:\n ans = (item[0], pos_list_string(pos_dict[item[0]], pos_trans))\n answer_list.append(ans)\n return render_template(\"result.html\", answer_list=answer_list, query_str=query, win=win if win != 0 else \"\", **checkbox_state)\n\n return render_template(\"index.html\")\n\n\n### backend init begin\nprint(\"initiating backend: lucene\")\nlucene.initVM()\nsearcher = Searcher(idxdir_path)\nstopwords = read_stopwords(file_stopwords)\npos_trans = read_pos_translation(file_pos)\nn = 20\nprint(\"backend initiated!\")\n### backend init end\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"atomiechen/CollocationRetrieval","sub_path":"src-flask/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"2793312710","text":"from oslo_log import versionutils\nfrom oslo_policy import policy\n\nRULE_ADMIN_OR_OWNER = 'rule:admin_or_owner'\nRULE_ADMIN_API = 'rule:admin_api'\nRULE_DEFAULT = 'rule:default'\n\ndeprecation_msg = (\"The `context_is_admin` check is superseded by more \"\n \"specific check strings that consume system and project \"\n \"scope attributes from keystone tokens.\")\nDEPRECATED_CONTEXT_IS_ADMIN = policy.DeprecatedRule(\n name='context_is_admin',\n check_str='role:admin',\n deprecated_reason=deprecation_msg,\n deprecated_since=versionutils.deprecated.WALLABY\n)\n\n# Generic policy check string for system administrators. These are the people\n# who need the highest level of authorization to operate the deployment.\n# They're allowed to create, read, update, or delete any system-specific\n# resource. They can also operate on project-specific resources where\n# applicable (e.g., cleaning up shares or snapshots).\nSYSTEM_ADMIN = 'rule:system-admin'\n\n# Generic policy check string for system users who don't require all the\n# authorization that system administrators typically have. This persona, or\n# check string, typically isn't used by default, but it's existence it useful\n# in the event a deployment wants to offload some administrative action from\n# system administrator to system members.\nSYSTEM_MEMBER = 'rule:system-member'\n\n# Generic policy check string for read-only access to system-level resources.\n# This persona is useful for someone who needs access for auditing or even\n# support. These uses are also able to view project-specific resources where\n# applicable (e.g., listing all shares in the deployment, regardless of the\n# project they belong to).\nSYSTEM_READER = 'rule:system-reader'\n\n# This check string is reserved for actions that require the highest level of\n# authorization on a project or resources within the project (e.g., resyncing a\n# share replica).\nPROJECT_ADMIN = 'rule:project-admin'\n\n# This check string is the primary use case for typical end-users, who are\n# working with resources that belong to a project (e.g., managing shares or\n# share replicas).\nPROJECT_MEMBER = 'rule:project-member'\n\n# This check string should only be used to protect read-only project-specific\n# resources. It should not be used to protect APIs that make writable changes\n# (e.g., updating a share or snapshot).\nPROJECT_READER = 'rule:project-reader'\n\n# The following are common composite check strings that are useful for\n# protecting APIs designed to operate with multiple scopes (e.g., a system\n# administrator should be able to delete any share in the deployment, a\n# project member should only be able to delete shares in their project).\nSYSTEM_ADMIN_OR_PROJECT_ADMIN = (\n '(' + SYSTEM_ADMIN + ') or (' + PROJECT_ADMIN + ')'\n)\nSYSTEM_ADMIN_OR_PROJECT_MEMBER = (\n '(' + SYSTEM_ADMIN + ') or (' + PROJECT_MEMBER + ')'\n)\nSYSTEM_OR_PROJECT_READER = (\n '(' + SYSTEM_READER + ') or (' + PROJECT_READER + ')'\n)\n\nrules = [\n # ***Default OpenStack scoped personas*** #\n policy.RuleDefault(\n name='system-admin',\n check_str='role:admin and '\n 'system_scope:all',\n description='System scoped Administrator',\n scope_types=['system']),\n policy.RuleDefault(\n name='system-member',\n check_str='role:member and '\n 'system_scope:all',\n description='System scoped Member',\n scope_types=['system']),\n policy.RuleDefault(\n name='system-reader',\n check_str='role:reader and '\n 'system_scope:all',\n description='System scoped Reader',\n scope_types=['system']),\n policy.RuleDefault(\n name='project-admin',\n check_str='role:admin and '\n 'project_id:%(project_id)s',\n description='Project scoped Administrator',\n scope_types=['project']),\n policy.RuleDefault(\n name='project-member',\n check_str='role:member and '\n 'project_id:%(project_id)s',\n description='Project scoped Member',\n scope_types=['project']),\n policy.RuleDefault(\n name='project-reader',\n check_str='role:reader and '\n 'project_id:%(project_id)s',\n description='Project scoped Reader',\n scope_types=['project']),\n\n # ***Special personas for Manila*** #\n policy.RuleDefault(\n name='context_is_admin',\n check_str='rule:system-admin',\n description='Privileged users checked via \"context.is_admin\"',\n deprecated_rule=DEPRECATED_CONTEXT_IS_ADMIN,\n scope_types=['system']),\n\n # ***Legacy/deprecated unscoped rules*** #\n # can be removed after \"enforce_scope\" defaults to True in oslo.policy\n policy.RuleDefault(\n name='admin_or_owner',\n check_str='is_admin:True or project_id:%(project_id)s',\n description='Administrator or Member of the project'),\n policy.RuleDefault(\n name='default',\n check_str=RULE_ADMIN_OR_OWNER,\n description='Default rule for most non-Admin APIs'),\n policy.RuleDefault(\n name='admin_api',\n check_str='is_admin:True',\n description='Default rule for most Admin APIs.'),\n]\n\n\ndef list_rules():\n return rules\n","repo_name":"LucasmOliveira059/manila","sub_path":"manila/policies/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":5228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"}
+{"seq_id":"15725662636","text":"A = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\na = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n\nn = input(\"Digite a palavra: \")\npos = int(input(\"Quantas posições deseja alterar: \"))\n\nnovo = []\n\nfor i in n:\n if i in A:\n l = A.index(i) + pos\n if l > 25:\n novo.append(A[l - 26])\n else:\n novo.append(A[l])\n \n if i in a:\n s = a.index(i) + pos\n if s > 25:\n novo.append(a[s - 26])\n else:\n novo.append(a[s])\n\nfor nv in novo:\n print(nv, end='')\n","repo_name":"nascimentolds/IFPE","sub_path":"ADS/1-PERIODO/LP/04-Maratona/problema6.py","file_name":"problema6.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"te","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"38108065803","text":"import urllib.request\nimport re\nimport json\nfrom datetime import datetime, timedelta\nfrom requests import get\nfrom shapely.geometry import Polygon\nfrom urllib.request import urlopen\n\n\ndef get_prediction(model):\n\n now = datetime.now()\n today = now.strftime(\"%Y/%m/%d\")\n yesterday = (now - timedelta(days=1)).strftime(\"%Y/%m/%d\")\n\n if model == 'wrf7':\n try:\n res_json = get(\n f'http://ftp.cptec.inpe.br/modelos/tempo/WRF/ams_07km/recortes/grh/json/{today}/00/4704.json').json()\n except:\n res_json = get(\n f'http://ftp.cptec.inpe.br/modelos/tempo/WRF/ams_07km/recortes/grh/json/{yesterday}/00/4704.json').json()\n elif model == 'wrf':\n try:\n res_json = get(\n f'http://ftp.cptec.inpe.br/modelos/tempo/WRF/ams_05km/recortes/grh/json/{today}/00/4704.json').json()\n except:\n res_json = get(\n f'http://ftp.cptec.inpe.br/modelos/tempo/WRF/ams_05km/recortes/grh/json/{yesterday}/00/4704.json').json()\n elif model == 'bam':\n try:\n res_json = get(\n f'http://ftp.cptec.inpe.br/modelos/tempo/BAM/TQ0666L064/recortes/grh/json/{today}/00/4704.json').json()\n except:\n res_json = get(\n f'http://ftp.cptec.inpe.br/modelos/tempo/BAM/TQ0666L064/recortes/grh/json/{yesterday}/00/4704.json').json()\n\n raw_data = res_json['datasets'][0]['data']\n\n x_data_t = {}\n y_data = {}\n\n # Obtenção de datetime corrigido\n initial_date = datetime.fromisoformat(raw_data[0]['date']) # Data inicial devido a divergência entre modelos\n x_data_t[\"precipitacao\"] = [initial_date + timedelta(hours = i['fcst']) for i in raw_data]\n x_data_t[\"precipitacao_acc\"] = [initial_date + timedelta(hours = i['fcst']) for i in raw_data]\n x_data_t[\"temperatura\"] = [initial_date + timedelta(hours = i['fcst']) for i in raw_data]\n x_data_t[\"temperatura_aparente\"] = [initial_date + timedelta(hours = i['fcst']) for i in raw_data]\n x_data_t[\"pressao\"] = [initial_date + timedelta(hours = i['fcst']) for i in raw_data]\n x_data_t[\"umidade_relativa\"] = [initial_date + timedelta(hours = i['fcst']) for i in raw_data]\n\n # Obtenção de dados meteorológicos\n y_data[\"precipitacao\"] = [i['prec'] for i in raw_data]\n y_data[\"temperatura\"] = [i['temp'] for i in raw_data]\n y_data[\"temperatura_aparente\"] = [i['heat_index'] for i in raw_data]\n y_data[\"pressao\"] = [i['press'] for i in raw_data]\n y_data['umidade_relativa'] = [i['ur'] for i in raw_data]\n acc = 0\n for i in raw_data:\n # soma a precipitação atual com a acumulada anterior\n precipitacao_acc = i['prec']+acc\n # atualiza o valor da precipitação acumulada\n acc = precipitacao_acc\n y_data['precipitacao_acc'] = (precipitacao_acc)\n\n return x_data_t, y_data\n\n\ndef extract_data(source_string: str):\n res = json.loads(source_string)\n x_data = [point['x']for point in res]\n x_data_t = [datetime.fromtimestamp(t//1000) for t in x_data]\n y_data = [point['y']for point in res]\n\n return x_data, x_data_t, y_data\n\n\ndef get_SantoAndre_polygon():\n\n path = 'https://raw.githubusercontent.com/tbrugz/geodata-br/master/geojson/geojs-35-mun.json'\n\n with urlopen(path) as response:\n counties = json.load(response)\n SA = [i for i in counties['features'] if i['properties']['name'] == 'Santo André'][0]\n\n return SA\n\n\ndef verify_title_string(t):\n if 'observação' in t.lower() or 'observacao' in t.lower():\n text = 'Aviso de Observação'\n elif 'atenção' in t.lower() or 'atencao' in t.lower():\n text = 'Aviso de Atenção'\n elif 'especial' in t.lower():\n text = 'Aviso Especial'\n elif 'extraordinário' in t.lower() or 'extraordinario' in t.lower() or 'risco' in t.lower():\n text = 'Aviso Extraordinário de Risco Iminente'\n elif 'cessado' in t.lower():\n text = 'Aviso Cessado'\n else:\n text = 'Sem Aviso'\n\n return text\n\n\ndef get_polygon():\n\n value_dict = {'Aviso de Observação': 1,\n 'Aviso de Atenção': 2,\n 'Aviso Especial': 3,\n 'Aviso Extraordinário de Risco Iminente': 4,\n 'Aviso Cessado': 5\n }\n\n inverse_value_dict = {\n 0: 'Sem Aviso',\n 1: 'Aviso de Observação',\n 2: 'Aviso de Atenção',\n 3: 'Aviso Especial',\n 4: 'Aviso Extraordinário de Risco Iminente',\n 5: 'Aviso Cessado'}\n\n intersection = {}\n output_dict = {}\n\n SA = get_SantoAndre_polygon()\n SA_polygon = Polygon(SA['geometry']['coordinates'][0])\n SA_layer = dict(sourcetype='geojson',\n source=SA,\n below='',\n type='fill',\n opacity=0.25,\n color='#1c1e2f')\n\n with urllib.request.urlopen('http://tempo.cptec.inpe.br/avisos/') as response:\n html_source = str(response.read())\n\n htmlnow = re.search(r'^(.+?)\\/\\/ 48 horas', html_source).group(1)\n html48 = re.search(r'\\/\\/ 48 horas(.*?)\\/\\/ 72 horas', html_source).group(1)\n html72 = re.search(r'\\/\\/ 72 horas(.*)', html_source).group(1)\n\n for text, html in zip(['Hoje', '48 horas', '72 horas'], [htmlnow, html48, html72]):\n\n intersection[text] = 0\n\n output_dict[text] = {'geom': [],\n 'title': []}\n\n poly_func_string_list = re.findall(r'google.maps.Polygon(.*?)\\)', html)\n poly_func_string = re.search(r'new google.maps.Polygon\\((.*?)\\}\\)', html)\n\n if poly_func_string is None:\n continue\n\n poly_func_string = poly_func_string.group(1)\n\n for poly_func_string in poly_func_string_list:\n\n polygon_string = re.search(r'paths\\: (.*?),\\\\n', poly_func_string).group(1)\n\n polygon_string = polygon_string.replace('lat', '\"lat\"').replace('lng', '\"lng\"')\n polygon_dict = json.loads(polygon_string)\n polygon_points = [(p['lng'], p['lat']) for p in polygon_dict]\n\n title_string = re.search(r'title\\:\\\"(.*?)\"', poly_func_string).group(1)\n title_string = title_string.replace('\\\\xc3\\\\xa7', 'ç').replace('\\\\xc3\\\\xa3', 'ã').replace('\\\\xc3\\\\xa1', 'á')\n\n title_string = verify_title_string(title_string)\n\n # Populate Dict\n output_dict[text]['geom'].append(polygon_points)\n output_dict[text]['title'].append(title_string)\n\n polygon = Polygon(polygon_points)\n\n if SA_polygon.intersects(polygon):\n if value_dict[title_string] > intersection[text]:\n intersection[text] = value_dict[title_string]\n\n for k in intersection.keys():\n output_dict[k]['aviso'] = inverse_value_dict[intersection[k]]\n\n return output_dict, SA_polygon, SA_layer\n","repo_name":"alagamentos/floodprediction","sub_path":"src/Dash/cptec.py","file_name":"cptec.py","file_ext":"py","file_size_in_byte":6435,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"21088008160","text":"import numpy as np\nimport torch\n\nfrom util import get_model_detect, get_model_recognition\nfrom util.utils import image_process_for_detect, get_scale_output, get_anchors, decode, decode_landmarks, \\\n non_max_suppression, image_process_for_recognition, l2_norm, retinaface_correct_boxes, alignment\n\n\nclass Embeddings(object):\n def __init__(self, cfg):\n super(Embeddings, self).__init__()\n self.cfg = cfg\n self.retina = get_model_detect(cfg.retina)\n self.recognition = get_model_recognition(cfg.recognition)\n\n def get_embeddings(self, img):\n \"\"\"\n args:\n img: 一张图片,图片是GBR通道(w,h,c)\n return:\n face_embeddings: 如果他返回None 代表整张图片上不存在人脸、如果返回是具体的向量向量形式是(n,v)\n boxes_conf_landmarks: 如果这个返回是None,也代表不存在人脸,如果返回的是具体向量(n,15)\n 上面的n代表的是人脸的数目,v代表的是向量的长度\n\n note:\n 如果进行编码:一张图片只允许包含0或1张人脸\n 如果进行识别:一张图片可以包>=0张人脸\n \"\"\"\n old_image = img.copy()\n scale, scale_for_landmarks = get_scale_output(img)\n anchors = get_anchors(self.cfg.retina)\n img = image_process_for_detect(img, self.cfg)\n with torch.no_grad():\n # img = torch.from_numpy(img).type(torch.FloatTensor)\n img = img.to(self.cfg.retina.device)\n loc, conf, landmarks = self.retina(img)\n loc, conf, landmarks = loc.cpu(), conf.cpu(), landmarks.cpu()\n boxes = decode(loc.data.squeeze(0), anchors, self.cfg.retina.net_cfg['variance'], scale)\n conf = conf.data.squeeze(0)[:, 1:2].numpy()\n landmarks = decode_landmarks(landmarks.data.squeeze(0), anchors, self.cfg.retina.net_cfg['variance'],\n scale_for_landmarks)\n boxes_conf_landmarks = np.concatenate([boxes, conf, landmarks], -1)\n boxes_conf_landmarks = non_max_suppression(boxes_conf_landmarks, self.cfg.retina.confidence)\n if len(boxes_conf_landmarks) < 1:\n return None, None\n boxes_conf_landmarks = retinaface_correct_boxes(boxes_conf_landmarks, np.array(self.cfg.retina.input_shape[:2]), np.array(old_image.shape[:2]))\n face_embeddings = []\n for boxes_conf_landmark in boxes_conf_landmarks:\n boxes_conf_landmark = np.maximum(boxes_conf_landmark, 0)\n recognition_image = image_process_for_recognition(old_image, boxes_conf_landmark, self.cfg)\n with torch.no_grad():\n recognition_image = recognition_image.type(torch.FloatTensor)\n recognition_image = recognition_image.to(self.cfg.recognition.device)\n if self.cfg.recognition.split:\n embeddings = self.recognition(recognition_image)[1]\n else:\n embeddings = self.recognition(recognition_image)\n face_embeddings.append(embeddings.cpu())\n face_embeddings = torch.cat(face_embeddings, 0)\n face_embeddings = l2_norm(face_embeddings, -1)\n return face_embeddings, boxes_conf_landmarks\n","repo_name":"Meng-Sang/FR","sub_path":"util/detect_recognition.py","file_name":"detect_recognition.py","file_ext":"py","file_size_in_byte":3322,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"72476675921","text":"#!/usr/bin/env python3\nimport random\nfrom ex2_rpsls_helper import get_selection\n\ndef rpsls_game():\n compRes=0\n userRes=0\n drawRes=0\n while (compRes-userRes)!=2 and (userRes-compRes)!=2:\n userChoice=int(input(\" Please enter your selection: \"+\n \"1 (Rock), 2 (Paper), 3 (Scissors), 4 (Lizard) or 5 (Spock): \"))\n if userChoice>5 or userChoice<1:\n print(\" Please select one of the available options.\\n\")\n else:\n print(\" Player has selected: %s.\" %get_selection(userChoice))\n compChoice=random.randint(1, 5)\n print(\" Computer has selected: %s.\" %get_selection(compChoice))\n\n if userChoice==1: #Rock\n if compChoice==3 or compChoice==4:\n print(\" The winner for this round is: Player\\n\")\n userRes+=1\n elif compChoice==1:\n print(\" This round was drawn\\n\")\n drawRes+=1\n else:\n print(\" The winner for this round is: Computer\\n\")\n compRes+=1\n\n elif userChoice==2: #Paper\n if compChoice==1 or compChoice==5:\n print(\" The winner for this round is: Player\\n\")\n userRes+=1\n elif compChoice==2:\n print(\" This round was drawn\\n\")\n drawRes+=1\n else:\n print(\" The winner for this round is: Computer\\n\")\n compRes+=1\n \n elif userChoice==3: #Scissors\n if compChoice==2 or compChoice==4:\n print(\" The winner for this round is: Player\\n\")\n userRes+=1\n elif compChoice==3:\n print(\" This round was drawn\\n\")\n drawRes+=1\n else:\n print(\" The winner for this round is: Computer\\n\")\n compRes+=1\n\n elif userChoice==4: #Lizard\n if compChoice==2 or compChoice==5:\n print(\" The winner for this round is: Player\\n\")\n userRes+=1\n elif compChoice==4:\n print(\" This round was drawn\\n\")\n drawRes+=1\n else:\n print(\" The winner for this round is: Computer\\n\")\n compRes+=1\n\n else: #Spock\n if compChoice==1 or compChoice==3:\n print(\" The winner for this round is: Player\\n\")\n userRes+=1\n elif compChoice==5:\n print(\" This round was drawn\\n\")\n drawRes+=1\n else: \n print(\" The winner for this round is: Computer\\n\")\n compRes+=1\n \n if compRes>userRes:\n print(\"The winner for this game is: Computer\")\n print(\"Game score: Player %s, Computer %s, draws %s\" % (userRes, compRes, drawRes))\n return -1\n else:\n print(\"The winner for this game is: Player\")\n print(\"Game score: Player %s, Computer %s, draws %s\" % (userRes, compRes, drawRes))\n return 1\n \ndef rpsls_play():\n\n setsCount=0\n winCount=0\n gameCount=1\n userSetCount=0\n compSetCount=0\n print(\"Welcome to the Rock-Scissors-Paper-Lizard-Spock game!\")\n setLen=int(input(\"Select set length: \"))\n setEven=setLen\n userQuit=-1\n while userQuit!=1:\n while setLen>0 or compSetCount==userSetCount or +\\\n (setEven%2==0 and abs(compSetCount-userSetCount)==1):\n print(\"Now beginning game %s\" %gameCount)\n rpslsRes=rpsls_game()\n if rpslsRes==1:\n userSetCount+=1\n elif rpslsRes==-1:\n compSetCount+=1\n setLen-=1\n gameCount+=1\n print(\"Set score: Player %s, Computer %s\" % (userSetCount, compSetCount))\n if setEven%2==1:\n if ((userSetCount>setEven/2) or (compSetCount>setEven/2)) and compSetCount!=userSetCount :\n break\n elif setEven%2==0:\n if (compSetCount>setEven/2 or userSetCount>setEven/2) and +\\\n abs(compSetCount-userSetCount)!=1 and compSetCount!=userSetCount:\n break\n \n if userSetCount>compSetCount:\n print(\"Congratulations! You have won in %s games.\" %(gameCount-1))\n winCount+=1\n elif userSetCount 0$\n# \\begin{equation*} \n# r(\\alpha x) = \\max(0, \\alpha x) = \\alpha \\max(0, x) = \\alpha r(x)\n# \\end{equation*}\n# dass auch \n# \\begin{equation*} \n# \\hat{w}_\\alpha = \\big(\\alpha, 0, \\frac{1}{\\alpha}, 0\\big), \\quad \\forall \\alpha > 0\n# \\end{equation*} \n# ein globales Minimum von $l$ ist und analog auch\n# \\begin{equation*} \n# \\hat{w}_\\beta = \\big(1, \\beta, 1, -\\beta \\big), \\quad \\forall \\beta > 0.\n# \\end{equation*} \n\n# Damit kann $l$ nicht strikt konvex sein. Wie das folgende Beispiel zeigt ist $l$ nicht einmal konvex.\n\n# In[2]:\n\n\n#%matplotlib notebook \nget_ipython().run_line_magic('matplotlib', 'inline')\n\nrelu = lambda x : np.maximum(0, x)\n\ndef l(w1, w3):\n fw = 0.0\n for x,y in zip(Xtrain, ytrain):\n fw += (relu(w1 * x) * w3 - y)**2\n return fw/Xtrain.shape[0]\n\nl = np.vectorize(l)\n\n\nfrom mpl_toolkits.mplot3d import Axes3D\nplt.figure(figsize = (10, 10))\nplt.axes(projection='3d')\n\nw1 = np.array([ 0, 2])\nw3 = np.array([-1, 1])\n\n#w1 = np.array([ 0, 1])\n#w3 = np.array([-1, 0])\n\ng1 = np.linspace(*w1)\ng3 = np.linspace(*w3)\n\nww3, ww1 = np.meshgrid(g3, g1)\nww1 = ww1.T\nww3 = ww3.T\nff = l(ww1, ww3)\n\nax = plt.gca()\nax.plot_surface(ww1, ww3, ff, alpha = 0.5, cmap=plt.cm.jet)\ncc = ff.max() * np.linspace(0,1)**2\n#cc = 10*np.linspace(0,1)**5\nax.contour(ww1, ww3, ff, cc)\n\nzoff = -2\nax.contour(ww1, ww3, ff, cc, zdir='z', offset=zoff, cmap=plt.cm.jet)\n#plt.contour(ww1, ww3, f(ww1, ww3), zdir='x', cmap = plt.cm.jet);\n\nl1 = np.linspace(*w1)\nl3 = np.linspace(*w3)\nax.plot3D(l1, l3, l(l1, l3), c = 'r')\n\nl1 = np.linspace(*w1, 2)\nl3 = np.linspace(*w3, 2)\nax.plot3D(l1, l3, zoff * np.ones(l1.shape), 'r')\n\nax.set_zlim(zoff, ff.max())\nax.set_xlabel('$w_1$')\nax.set_ylabel('$w_3$')\nax.set_zlabel('$f$')\nax.view_init(20, 150)\n\n\n# Für $w_2 = w_4 = 0$ sind die Funktionswerte entlang der\n# Strecke $(w_1,w_3) = (0,-1)$ nach $(w_1,w_3) = (2,1)$ dargestellt.\n\n# Die fehlende Konvexität wird uns beim Anpassen der Parameter $w$ noch viel \"Freude\" bereiten.\n# \n# Diese Anpassung werden wir nun mit 3 der gängigsten Software Tools vornehmen.\n\n# ### Scikit-Learn\n\n# Wir passen einen `MLPRegressor` an und benutzen die Default-Einstellungen.\n\n# In[3]:\n\n\nfrom sklearn import neural_network\n\nmlp = neural_network.MLPRegressor(hidden_layer_sizes = [nn], max_iter = 10000, random_state = seed)\nmlp.fit(Xtrain, ytrain.flat)\n\ndef ev(mlp, c = 'r', label=''):\n plt.plot(Xtrain, ytrain, 'b.');\n plt.plot(Xplot, mlp.predict(Xplot), c, label=label);\n print(\"solver = {}, score = {}\".format(mlp.solver, mlp.score(Xtrain, ytrain)))\n\nev(mlp)\n\n\n# Das Ergebnis ist unbrauchbar.\n# \n# Der Startwert für den iterativen Löser wird zufällig gewählt und kann\n# über den Parameter `random_state` beeinflusst werden\n\n# In[4]:\n\n\nmlp = neural_network.MLPRegressor(hidden_layer_sizes = [nn], max_iter = 10000, random_state = 234)\nmlp.fit(Xtrain, ytrain.flat)\nev(mlp)\n\n\n# In[5]:\n\n\nmlp = neural_network.MLPRegressor(hidden_layer_sizes = [nn], max_iter = 10000, random_state = 314159)\nmlp.fit(Xtrain, ytrain.flat)\nev(mlp)\n\n\n# Die Ergebnisse hängen offensichtlich extrem stark vom Startwert ab. Die Qualität ist insgesamt sehr dürftig.\n\n# ### Keras-Tensorflow\n\n# In[6]:\n\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Activation\n\nmodel = Sequential(\n[\n Dense(units = nn, input_dim = 1),\n #Dense(units = nn, input_dim = 1, kernel_initializer = keras.initializers.RandomUniform(minval=-0.05, maxval=0.05, seed=seed)),\n Activation('relu'),\n #Activation('tanh'),\n Dense(units = 1)\n])\n\nmodel.summary()\n\n\n# In[7]:\n\n\nmodel.compile(loss='mse',\n optimizer='nadam',\n metrics=['accuracy'])\n\nmodel.fit(Xtrain, ytrain, epochs = 100, verbose = 0);\n\ndef kev(model, c = 'r'):\n plt.plot(Xtrain, g(Xtrain), 'b.');\n plt.plot(Xplot, model.predict(Xplot), c);\n print(\"loss = {}\".format(model.evaluate(Xtrain, ytrain)[0]))\n \nkev(model) \n\n\n# ### Pytorch\n\n# In[8]:\n\n\nimport torch\nimport torch.nn as tnn\n\nxt = torch.from_numpy(Xtrain).to(torch.float32)\nyt = torch.from_numpy(ytrain).to(torch.float32)\n\nfrom torch.utils.data import TensorDataset, DataLoader\n\ndst = TensorDataset(xt, yt)\ndlt = DataLoader(dst, batch_size = 5, shuffle=True)\n\nclass Percep(tnn.Module):\n def __init__(self):\n super().__init__()\n self.linear1 = tnn.Linear(1, nn)\n self.act1 = tnn.ReLU() \n self.linear2 = tnn.Linear(nn, 1)\n \n def forward(self, x):\n x = self.linear1(x)\n x = self.act1(x)\n x = self.linear2(x)\n return x\n\nmodel = Percep()\n\nmodel.eval()\n\n\n# In[9]:\n\n\nopt = torch.optim.SGD(model.parameters(), lr=1e-5)\n\nimport torch.nn.functional as F\nloss_fn = F.mse_loss\n\ndef fit(num_epochs, model, loss_fn, opt):\n for epoch in range(num_epochs):\n for xb,yb in dlt:\n # Generate predictions\n pred = model(xb)\n loss = loss_fn(pred, yb)\n # Perform gradient descent\n loss.backward()\n opt.step()\n opt.zero_grad()\n print('Training loss: ', loss_fn(model(xt), yt))\n\nfit(100, model, loss_fn, opt)\n\ndef pev(model, c = 'r'):\n plt.plot(Xtrain, g(Xtrain), 'b.');\n plt.plot(xt.data.numpy(), model(xt).data.numpy(), 'r');\n\npev(model)\n\n\n# ## (Sub)Gradient-Descent\n\n# Wir betrachten wieder unser triviales Netz von oben\n# \\begin{equation*} \n# g(x, w) = r(w_1 \\, x + w_2)\\,w_3 + w_4,\n# \\quad\n# r(x) = \\max(0, x),\n# \\end{equation*}\n# mit Trainingsdatensatz\n# \\begin{equation*} \n# x_i = y_i = \\frac{i}{n}, \\quad i = 0,\\ldots,n, \\quad n=10.\n# \\end{equation*}\n# \n# Da $r$ bei $0$ nicht differenzierbar ist, ist eine direkte Anwendung des Gradientenverfahrens zunächst nicht möglich.\n# \n# Man kann dies durch zwei Strategien reparieren:\n# \n# - man ersetzt $r$ durch eine differenzierbare Approximation $\\tilde{r}$\n# \n# - man benutzt statt dem Gradienten den Subgradienten\n# \n# Wir benutzen die zweite Variante. Als Subgradient von $r$ erhalten wir\n# \\begin{equation*} \n# \\partial r(x)=\n# \\begin{cases}\n# 0 & x < 0\\\\\n# [0,1] & x = 0\\\\\n# 1 & 0< x\n# \\end{cases},\n# \\end{equation*}\n# d.h. bei $x=0$ müssen wir uns für einen Wert in $[0,1]$ entscheiden.\n# Wie wir später sehen werden, haben wir hier \"freie Auswahl\".\n# Der Einfachheit halber benutzen wir den Wert $\\frac{1}{2}$.\n\n# In[10]:\n\n\nimport autograd.numpy as np\nfrom autograd import grad\n\nrelu = lambda x : np.maximum(0, x)\n\nrelu1 = grad(relu)\nrelu1(-1.0), relu1(0.0), relu1(1.0)\n\n\n# Für verschieden Anfangswerte erhalten wir\n\n# In[11]:\n\n\ng = lambda x,w : relu(w[0] * x + w[1]) * w[2] + w[3]\n\ndef l(w):\n fw = 0.0\n for x,y in zip(Xtrain, ytrain):\n fw += (g(x,w) - y)**2\n return fw[0] / Xtrain.shape[0]\n\nl1 = grad(l)\n\ndef GD(w0, l1, lr = 1e-1, nit = 100):\n w = w0.copy()\n ww = [w]\n for k in range(nit):\n w = w - lr * l1(w)\n ww.append(w)\n return ww\n\ndef ev(w, c = 'r', label=''):\n plt.plot(Xtrain, ytrain, 'b.');\n plt.plot(Xplot, g(Xplot, w[-1]), c);\n plt.figure()\n plt.semilogy(list(map(l, w)), label=label)\n #plt.ylabel('$l$',rotation=0)\n\nw0 = np.zeros(4)\nw = GD(w0, l1, 0.1, 200)\nev(w)\n\n\n# bzw.\n\n# In[12]:\n\n\nw0 = np.ones(4)\nw = GD(w0, l1, 0.1, 200)\nev(w)\n\n\n# ## Accelerated Gradient-Descent (Nesterov)\n\n# Durch eine einfache Modifikation kann man das (Sub)Gradientenverfahren\n# beschleunigen. \n# Man bestimmt die neue Suchrichtung als Kombination aus dem aktuellen\n# negativen Gradienten und der vorherigen Suchrichtung (ähnlich wie beim\n# CG-Verfahren).\n# \n# Die bekannteste Variante stammt von [Nesterov](https://uclouvain.be/fr/repertoires/yurii.nesterov), der auch nachgewiesen hat, dass diese Verfahren in einem\n# gewissen Sinn optimal sind.\n# Die Iterationsvorschrift sieht wie folgt aus:\n# \\begin{align*}\n# w^{(-1)} &= w^{(0)} \\text {gegeben}\\\\\n# k = 1&,2,...\\\\\n# & v^{(k)} = w^{(k-1)} + \\frac{k-2}{k+1} \\big( w^{(k-1)} - w^{(k-2)} \\big) \\\\\n# & w^{(k)} = v^{(k)} - \\alpha^{(k)} l'(v^{(k)})\n# \\end{align*}\n\n# In[13]:\n\n\ndef Nes(w0, l1, lr = 0.1, maxit = 30):\n # Variante von Tibshirani\n w = [w0, w0]\n \n for k in range(1,maxit+1):\n vk = w[-1] + (k-2)/(k+1) * (w[-1] - w[-2])\n wk = vk - lr * l1(vk)\n\n w.append(wk)\n\n return(w[1:])\n\nw0 = np.ones(4)\nwnes = Nes(w0, l1, 0.1, 200)\nev(wnes, label='Nesterov')\nplt.semilogy(list(map(l, w)), label=\"SubGD\")\nplt.legend();\n\n\n# ## Stochastic (Sub)Gradient-Descent\n\n# Wir betrachten noch einmal unsere Zielfunktion $l$\n# \\begin{align*} \n# l(w) \n# &= \\frac{1}{n} \\sum_{i=1}^n \\big(g(x_i, w) -y_i\\big)^2\n# = \\frac{1}{n} \\sum_{i=1}^n l_i(w),\n# \\\\\n# l_i(w) &= \\big(g(x_i, w) -y_i\\big)^2.\n# \\end{align*}\n# In jedem Schritt des (Sub)Gradienten-Verfahren muss\n# \\begin{equation*} \n# \\partial l(w) = \\frac{1}{n} \\sum_{i=1}^n \\partial l_i(w)\n# \\end{equation*}\n# berechnet werden, d.h. der Aufwand skaliert mit der Anzahl\n# $n$ an Trainingsdaten, die zur Bestimmung der Parameter $w$\n# benutzt werden.\n\n# Andererseits ist $\\partial l(w)$ offensichtlich der Mittelwert der einzelnen $\\partial l_i(w)$, so dass es naheliegend ist, diesen Mittelwert durch eine weniger aufwendige Approximation zu nähern, z.B.:\n# \n# - $\\partial l(w) \\approx \\partial l_{\\hat{i}}(w)$ für *ein* $\\hat{i}\\in \\{1,\\ldots,n\\}$\n# \n# - $\\partial l(w) \\approx \\frac{1}{n_B} \\sum_{i\\in B} \\partial l_i(w)$ für eine $n_B$-elementige Teilmenge $B \\subset \\{1,\\ldots,n\\}$ mit $n_B \\le n$\n\n# Den Index $\\hat{i}$ bzw. die Teilmenge $B$ wird in jedem Schritt des Gradienten-Verfahrens zufällig neu bestimmt. Das resultierende Verfahren heißt Stochastic Gradient-Descent- bzw.\n# Minibatch Stochastic Gradient-Descent-Verfahren.\n\n# Angewandt auf unser Modellproblem erhalten wir mit $n_B = 1$\n\n# In[14]:\n\n\nli = lambda w, x, y : ((g(x,w) - y)**2)[0]\nli1 = grad(li)\n\ndef SGD(w0, li1, x, y, lr = 1e-1, nit = 100, bs = 1):\n w = w0.copy()\n ww = [w]\n for k in range(nit):\n g = 0.0\n for i in np.random.permutation(x.shape[0])[:bs]:\n g += li1(w, x[i], y[i])\n g /= bs\n w = w - lr * g\n ww.append(w)\n return ww\n\nnp.random.seed(seed)\nw0 = np.ones(4)\nw = SGD(w0, li1, Xtrain, ytrain, 0.1, 200)\nev(w)\n\n\n# Der Abfall der Loss-Funktion ist ähnlich schnell wie beim Standard-Gradienten-Verfahren, aber nicht monoton (\"Rauschen\")\n# \n# Bei $n$ Training-Samples $x_i,y_i$ ist der Aufwand bei SGD pro Iteration\n# um einen Faktor $n$ kleiner.\n\n# Für $n_B = 3$ folgt\n\n# In[15]:\n\n\nnp.random.seed(seed)\nw0 = np.ones(4)\nwb = SGD(w0, li1, Xtrain, ytrain, 0.1, 200, bs = 3)\nev(wb)\n\n\n# Hier ist der Verlauf der Abfall der Loss-Werte etwas weniger \"zitterig\" als im Fall $n_B=1$, allerdings ist der Aufwand pro Iteration auch wesentlich höher.\n\n# Analog kann man auch für das Nesterov-Verfahren eine stochastische Variante\n# aufbauen.\n\n# In[16]:\n\n\ndef SNes(w0, li1, x, y, lr = 0.1, maxit = 30, bs = 1):\n # Variante von Tibshirani\n w = [w0, w0]\n \n for k in range(1,maxit+1):\n vk = w[-1] + (k-2)/(k+1) * (w[-1] - w[-2])\n\n gk = 0.0\n for i in np.random.permutation(x.shape[0])[:bs]:\n gk += li1(vk, x[i], y[i])\n \n wk = vk - lr * gk\n\n w.append(wk)\n\n return(w[1:])\n\nnp.random.seed(seed)\nw0 = np.ones(4)\nwnes = SNes(w0, li1, Xtrain, ytrain, 0.1, 200)\nev(wnes, label='Nesterov')\nplt.semilogy(list(map(l, w)), label=\"SubGD\")\nplt.legend();\n\n\n# ## Backpropagation\n\n# Zuletzt muss noch überlegt werden, wie die (Sub)Gradienten\n# \\begin{equation*} \n# \\partial l_i(w),\n# \\quad \n# l_i(w) = \\big(g(x_i, w) -y_i\\big)^2\n# \\end{equation*}\n# möglichst effizient berechnet werden können.\n# Dadurch dass beim MLP die Parameter $w$ sehr komplex\n# in $g$ eingehen, ist dies nicht trivial.\n\n# Wir betrachten zunächst den trivialen Fall eines einzelnen skalaren Neurons.\n# Zur Vereinfachung der Notation wird der Index $i$ weg gelassen.\n# \\begin{equation*} \n# x \\rightarrow w_1 x =\\colon i_1 \\rightarrow a(i_1) =\\colon o_1 \n# \\end{equation*}\n# mit differenzierbarem Loss $l$.\n# Für den Gradienten von $l$ nach $w_1$ erhalten wir\n# \\begin{equation*} \n# \\partial_{w_1} l(o_1)\n# = l'(o_1)\\partial_{w_1} o_1\n# = l'(o_1)a'(i_1)\\partial_{w_1} i_1\n# = l'(o_1)a'(i_1) x\n# \\end{equation*}\n# Hat man $o_1$ berechnet, so kennt man auch $i_1$ und $\\partial_{w_1} l(o_1)$ ist direkt bestimmbar.\n\n# Betrachten wir nun die analoge Konstellation für zwei Neuronen\n# \\begin{equation*} \n# x \n# \\rightarrow w_1 x =\\colon i_1 \\rightarrow a(i_1) =\\colon o_1 \n# \\rightarrow w_2 o_1 =\\colon i_2 \\rightarrow a(i_2) =\\colon o_2. \n# \\end{equation*}\n# Für die Ableitung von $l(o_2)$ nach $w_k$ erhalten wir\n# \\begin{equation*} \n# \\partial_{w_k} l(o_2) \n# = l'(o_2)\\partial_{w_k} o_2 \n# = l'(o_2)a'(i_2)\\partial_{w_k} i_2\n# = l'(o_2)a'(i_2)\\partial_{w_k} (w_2 o_1) \n# \\end{equation*}\n# Für $w_2$ gilt dann\n# \\begin{equation*} \n# \\partial_{w_2} l(o_2)\n# = l'(o_2)a'(i_2)\\partial_{w_2} (w_2 o_1) \n# = l'(o_2)a'(i_2) \\big(o_1 + w_2 \\partial_{w_2} o_1 \\big)\n# \\end{equation*}\n# und da $o_1$ nicht von $w_2$ abhängt folgt\n# \\begin{equation*} \n# \\partial_{w_2} l(o_2) = l'(o_2)a'(i_2) o_1,\n# \\end{equation*}\n# d.h. $\\partial_{w_2} l(o_2)$ kann einfach bestimmt werden.\n\n# Für $\\partial_{w_1} l(o_2)$ erhalten wir \n# \\begin{equation*} \n# \\partial_{w_1} l(o_2)\n# = l'(o_2)a'(i_2)\\partial_{w_1} (w_2 o_1) \n# = l'(o_2)a'(i_2) w_2 \\partial_{w_1} o_1\n# \\end{equation*}\n# und mit $\\partial_{w_1} o_1 = a'(i_1)\\partial_{w_1} i_1 = a'(i_1) x$ folgt\n# \\begin{equation*} \n# \\partial_{w_1} l(o_2) = l'(o_2)a'(i_2) w_2 a'(i_1) x\n# \\end{equation*}\n\n# Analog kann man auch bei komplexeren Netzen beginnend von der Output-Seite hin zur Input-Seite Schritt für Schritt die Ableitungen nach von $l_i$ nach den Parametern der jeweiligen\n# Schicht generieren. Deshalb heißt dieser Zugang *Backpropagation*.\n\n# ## Zusammenfassung\n\n# Die Parameteranpassung bei neuronalen Netzen ist schwierig, da\n# die Zielfunktion oft nicht differenzierbar (z.B. RELU Aktivierung $a(x)=\\max(0,x)$)\n# bzw. nicht konvex ist, so dass die Ergebnisse von gradientenartigen Verfahren sehr stark von der Wahl des Anfangswertes abhängen (Nebenminima).\n# \n# Besonders populär sind stochastische Gradienten-Verfahren, die auch bei großen Trainings-Datensätzen sehr effizient sind. Die benötigten Ableitungen werden dabei in der Regel mit\n# Backpropagation berechnet.\n\n# Man beachte den Unterschied zwischen SGD und Coordinate-Descent. Mit beiden Verfahren minimiert man die Zielfunktion\n# \\begin{equation*} \n# l(w) = \\frac{1}{n} \\sum_{i=1}^n l_i(w),\n# \\quad\n# l_i(w) = \\big(g(x_i, w) -y_i\\big)^2\n# \\end{equation*}\n# durch approximative Gradienten-Updates\n# \\begin{equation*} \n# w^{(k+1)} = w^{(k)} - \\alpha^{(k)} g^{(k)}\n# \\end{equation*}\n# mit\n# \\begin{equation*} \n# g_{SGD}^{(k)} = \\partial_w l_{\\hat{i}}(w)\n# \\end{equation*}\n# bzw.\n# \\begin{equation*} \n# g_{CD}^{(k)} = \\partial_{w_{\\hat{i}}} l(w).\n# \\end{equation*}\n","repo_name":"mre2110/NumMLv042","sub_path":"_build/jupyter_execute/06_Neuronale_Netze.py","file_name":"06_Neuronale_Netze.py","file_ext":"py","file_size_in_byte":20204,"program_lang":"python","lang":"de","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"70883460243","text":"import json\nimport logging\nfrom django.http import HttpResponse\n\nfrom ...recommendation.arxiv_evaluation_handler import ArXivEvaluationListHandler\nfrom ...recommendation.wikipedia_evaluation_handler import WikipediaEvaluationListHandler\nfrom ...recommendation.static_wikidata_handler import StaticWikidataHandler\nfrom ...recommendation.manual_recommendations_handler import ManualRecommendationsHandler\nfrom ...recommendation.formula_concept_db_handler import FormulaConceptDBHandler\nfrom ...views.helper_classes.data_repo_handler import DataRepoHandler\nfrom ...views.helper_classes.cache_handler import CacheHandler\nfrom ...config import *\n\nlogging.basicConfig(level=logging.INFO)\ntoken_clicked_handler_logger = logging.getLogger(__name__)\n\nclass TokenClickedHandler:\n \"\"\"\n This class handles the use case, when the user selects a token (word, identifier or formula) to annotate.\n Depending on the type of the token, different types of data are sent back to the frontend.\n\n Identifier:\n - Wikidata query is made\n - ArXiv evaluation list is checked for matches8\n - Wikipedia evaluation list is checked for matches\n - Word window is computed\n\n Formula:\n - Wikidata query is made\n - Word window is computed\n\n Word (must not necessarily be named entity, as found by tagger):\n - Wikidata query is made\n\n\n For identifier and formulae, additionaly the concatenated results are computed, taking results from each of the\n sources and combining them in one column.\n\n :param request: Request object. Request made by the user through the frontend.\n :return: The rendered response containing the template name, the necessary form and the response data.\n \"\"\"\n\n def __init__(self, items):\n self.items = items\n\n\n def get_recommendations(self):\n\n\n\n recommendations_dict = {'arXivEvaluationItems': [],\n 'wikipediaEvaluationItems': [],\n 'wikidata1Results': [],\n 'wikidata2Results': [],\n 'wordWindow': [],\n 'formulaConceptDB': [],\n 'manual': []}\n\n\n search_string = [k for k in self.items['searchString']][0]\n token_type_dict = self.items['tokenType']\n token_type = [k for k in token_type_dict][0]\n unique_id = [k for k in self.items['uniqueId']][0]\n math_env = self.items['mathEnv']['dummy']\n annotations = self.items['annotations']\n\n token_clicked_handler_logger.info('Type: {}'.format(token_type))\n\n all_manual_recommendations = DataRepoHandler().get_manual_recommendations()\n\n if token_type == 'Identifier':\n recommendations_dict['arXivEvaluationItems'] = ArXivEvaluationListHandler().check_identifiers(search_string)\n recommendations_dict['wikipediaEvaluationItems'] = WikipediaEvaluationListHandler().check_identifiers(search_string)\n recommendations_dict['wikidata1Results'] = StaticWikidataHandler().check_identifiers(search_string)\n\n elif token_type == 'Formula':\n recommendations_dict['wikidata1Results'], recommendations_dict['wikidata2Results'] = StaticWikidataHandler().check_formulae(math_env, annotations)\n recommendations_dict['formulaConceptDB'] = FormulaConceptDBHandler().query_tex_string(math_env)\n #token_clicked_handler_logger.info(recommendations_dict['formulaConceptDB'])\n\n else:\n token_clicked_handler_logger.info('Faulty token_type: {}'.format(token_type))\n\n\n\n\n\n recommendations_dict['wordWindow'] = self.get_word_window(unique_id)\n\n recommendations_dict['manual'] = ManualRecommendationsHandler(\n all_manual_recommendations).check_identifier_or_formula(search_string)\n\n data_repo_handler = DataRepoHandler()\n all_wikidata_identifiers = data_repo_handler.get_wikidata_identifiers_by_name()\n all_wikidata_formulae = data_repo_handler.get_wikidata_formulae()\n all_math_items = data_repo_handler.get_math_wikidata_items()\n\n token_clicked_handler_logger.info(type(all_math_items))\n token_clicked_handler_logger.info(all_math_items[\"metabiaugmented hexagonal prism\"])\n\n\n def pp(dict_list, source):\n \"\"\"\n post process: add QID and fill to recommendations limit\n :param dict_list: ditionary list of recommendations from one source\n :return:\n \"\"\"\n def add_qid_identifier(r):\n \"\"\"\n :param r: single recommendation\n :return:\n \"\"\"\n name = r['name']\n if name in all_wikidata_identifiers:\n r['qid'] = all_wikidata_identifiers[name]['qid']\n else:\n r['qid'] = 'N/A'\n #token_clicked_handler_logger.info(r)\n return r\n\n def add_qid_formula(r):\n \"\"\"\n :param r: single recommendation\n :return:\n \"\"\"\n name = r['name']\n if name in all_wikidata_formulae:\n r['qid'] = all_wikidata_formulae[name]['qid']\n else:\n r['qid'] = 'N/A'\n return r\n\n def add_qid_all_math(r):\n\n\n\n if source not in ['wikidata1Results', 'wikidata2Results']:\n\n name = r['name']\n if name in all_math_items:\n r['qid'] = all_math_items[name]\n else:\n r['qid'] = 'N/A'\n r['name'] = r['name'].replace(\"\\'\", '__APOSTROPH__')\n return r\n\n\n dict_list = list(map(add_qid_all_math, dict_list))\n\n\n dict_list += [{'name': ''} for _ in range(recommendations_limit - len(dict_list))]\n return dict_list\n\n recommendations_dict_pp = dict(map(lambda kv: (kv[0], pp(kv[1], kv[0])), recommendations_dict.items()))\n response = HttpResponse(json.dumps(recommendations_dict_pp), content_type='application/json')\n return response, recommendations_dict_pp\n\n def get_word_window(self, unique_id):\n \"\"\"\n This method produces the word window for a selected (by the user) formula or identifier. It iteratively takes\n named entities from the lines before and after the selected token(s) to fill the number of named entities as\n specified by the recommendation limit.\n :param unique_id: The unique id if the token (identifier or formula).\n :return: a list of named entities that appear around the selected token.\n \"\"\"\n\n word_window = []\n limit = int(recommendations_limit / 2)\n #dicts = self.cache_to_dicts()\n dicts = CacheHandler().cache_to_dicts()\n identifier_line_dict = dicts['identifiers']\n line_dict = dicts['lines']\n if unique_id in identifier_line_dict:\n line_num = identifier_line_dict[unique_id]\n else:\n return []\n\n i = 0\n while i < limit:\n # lines before\n b = line_num - i\n # lines after\n a = line_num + i\n\n if b in line_dict:\n for word in reversed(line_dict[b]):\n # value not yet in word window\n if not list(filter(lambda d: d['name'] == word.content.lower(), word_window)):\n word_window.append({\n 'name': word.content.lower(),\n #'unique_id': word.unique_id\n })\n i += 1\n if a in line_dict:\n for word in reversed(line_dict[a]):\n # value not yet in word window\n if not list(filter(lambda d: d['name'] in word.content.lower(), word_window)):\n word_window.append({\n 'name': word.content.lower(),\n #'unique_id': word.unique_id\n })\n i += 1\n if not word_window:\n word_window = [{}]\n return word_window[:recommendations_limit]\n\n\n","repo_name":"gipplab/AnnoMathTeX","sub_path":"annomathtex/annomathtex/views/helper_classes/token_clicked_handler.py","file_name":"token_clicked_handler.py","file_ext":"py","file_size_in_byte":8251,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"}
+{"seq_id":"40627659756","text":"import numpy as np\nf = float(input(\"Enter the fox growth rate: \")) #0.6\n\nc = float(input(\"Enter the chicken growth rate: \")) #1.2\n\ne= float(input(\"Enter the eat-chicken rate: \")) #0.5\n\nk = float(input(\"Enter kill rate: \")) #0.5\n\niter = int(input(\"Enter number of iterations: \")) #2\n\n#print the time period, chicken and fox population \n\nprint('{:<20} {:<15} {:<15}'.format('Time period', '# foxes', '# chickens'))\n\nprint()\n\n## calculate the result of somulation for i times\nfox = 100\nchicken = 1000\nprint('{:<20} {:<15} {:<15}'.format(0, fox, chicken))\n\nfor i in range(iter):\n result = np.array([[f, e], [-k, c]]).dot(np.array([fox, chicken]))\n fox = result[0]\n chicken = result[1]\n print('{:<20} {:<15} {:<15}'.format(i, fox, chicken))","repo_name":"jiajia20/90819_python_mini","sub_path":"W2/test_2.2.py","file_name":"test_2.2.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"29814292932","text":"import os\nfrom abc import ABC, abstractmethod\nimport torch\nfrom transformers import BertConfig,BertPreTrainedModel, BertModel\nfrom datetime import datetime\nimport torch.nn as nn\n\nimport torch\nimport torch.nn as nn\nfrom collections import OrderedDict\n\ndef tuple_prod(x):\n prod = 1\n for xx in x:\n prod *= xx\n return prod\n\nclass GreenBlock(nn.Module):\n def __init__(self, in_channels, out_channels ,drop_rate=0.4):\n \"\"\"\n green_block(inp, filters, name=None)\n ------------------------------------\n Implementation of the special residual block used in the paper. The block\n consists of two (GroupNorm --> ReLu --> 3x3x3 non-strided Convolution)\n units, with a residual connection from the input `inp` to the output. Used\n internally in the model. Can be used independently as well.\n Note that images must come with dimensions \"c, H, W, D\"\n Parameters\n ----------\n `inp`: An keras.layers.layer instance, required\n The keras layer just preceding the green block.\n `out_channels`: integer, required\n No. of filters to use in the 3D convolutional block. The output\n layer of this green block will have this many no. of channels.\n Returns\n -------\n `out`: A keras.layers.Layer instance\n The output of the green block. Has no. of channels equal to `filters`.\n The size of the rest of the dimensions remains same as in `inp`.\n \"\"\"\n super(GreenBlock, self).__init__()\n self.Drop_Rate = drop_rate\n # Define block\n self.block = nn.Sequential(OrderedDict([\n ('group_norm0', nn.GroupNorm(num_channels=in_channels, num_groups=in_channels // 4)),\n #('norm0', nn.BatchNorm3d(num_features=in_channels)),\n ('relu0', nn.LeakyReLU(inplace=True)),\n ('conv0', nn.Conv3d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)),\n ('group_norm1', nn.GroupNorm(num_channels=out_channels, num_groups=in_channels // 4)),\n #('norm1', nn.BatchNorm3d(num_features=out_channels)),\n ('relu1', nn.LeakyReLU(inplace=True)),\n ('conv2', nn.Conv3d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)),\n ]))\n\n def forward(self, inputs):\n #x_res = self.res(inputs)\n x_res = inputs\n #print('in green block: before')\n x = torch.nn.functional.dropout(self.block(inputs), p=self.Drop_Rate, training=self.training)\n #print('in green block: after')\n #return torch.cat([x, x_res], dim=1)\n return x + x_res\n\n\n\nclass UpGreenBlock(nn.Sequential):\n def __init__(self, in_features, out_features, shape, Drop_Rate):\n super(UpGreenBlock, self).__init__()\n\n self.add_module('conv', nn.Conv3d(in_features, out_features, kernel_size=1, stride=1))\n self.add_module('up', nn.Upsample(size=shape))\n self.add_module('green', GreenBlock(out_features, out_features, Drop_Rate))\nclass BaseModel(nn.Module, ABC):\n def __init__(self):\n super().__init__()\n self.best_loss = 1000000\n self.best_accuracy = 0\n\n @abstractmethod\n def forward(self, x):\n pass\n\n @property\n def device(self):\n return next(self.parameters()).device\n\n def determine_shapes(self,encoder,dim):\n def get_shape(module,input,output):\n module.input_shape = tuple(input[0].shape[-3:])\n module.output_shape = tuple(output[0].shape[-3:])\n hook1 = encoder.down_block1.register_forward_hook(get_shape)\n hook2 = encoder.down_block3.register_forward_hook(get_shape)\n input_shape = (1,2,) + dim #batch,norms,H,W,D,time\n x = torch.ones((input_shape))\n with torch.no_grad():\n encoder(x)\n del x\n self.shapes = {'dim_0':encoder.down_block1.input_shape,\n 'dim_1':encoder.down_block1.output_shape,\n 'dim_2':encoder.down_block3.input_shape,\n 'dim_3':encoder.down_block3.output_shape}\n hook1.remove()\n hook2.remove()\n\n def register_vars(self,**kwargs):\n intermediate_vec = 2640\n if kwargs.get('task') == 'fine_tune':\n self.dropout_rates = {'input': 0, 'green': 0.35,'Up_green': 0,'transformer':0.1}\n else:\n self.dropout_rates = {'input': 0, 'green': 0.2, 'Up_green': 0.2,'transformer':0.1}\n\n self.BertConfig = BertConfig(hidden_size=intermediate_vec, vocab_size=1,\n num_hidden_layers=kwargs.get('transformer_hidden_layers'),\n num_attention_heads=16, max_position_embeddings=30,\n hidden_dropout_prob=self.dropout_rates['transformer'])\n\n self.label_num = 1\n self.inChannels = 2\n self.outChannels = 1\n self.model_depth = 4\n self.intermediate_vec = intermediate_vec\n self.use_cuda = kwargs.get('cuda')\n self.shapes = kwargs.get('shapes')\n\n\n def load_partial_state_dict(self, state_dict,load_cls_embedding):\n print('loading parameters onto new model...')\n own_state = self.state_dict()\n loaded = {name:False for name in own_state.keys()}\n for name, param in state_dict.items():\n if name not in own_state:\n print('notice: {} is not part of new model and was not loaded.'.format(name))\n continue\n elif 'cls_embedding' in name and not load_cls_embedding:\n continue\n elif 'position' in name and param.shape != own_state[name].shape:\n print('debug line above')\n continue\n param = param.data\n own_state[name].copy_(param)\n loaded[name] = True\n for name,was_loaded in loaded.items():\n if not was_loaded:\n print('notice: named parameter - {} is randomly initialized'.format(name))\n\n\n def save_checkpoint(self, directory, title, epoch, optimizer=None,schedule=None):\n # Create directory to save to\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n # Build checkpoint dict to save.\n ckpt_dict = {\n 'model_state_dict':self.state_dict(),\n 'optimizer_state_dict':optimizer.state_dict() if optimizer is not None else None,\n 'epoch':epoch}\n\n if schedule is not None:\n ckpt_dict['schedule_state_dict'] = schedule.state_dict()\n ckpt_dict['lr'] = schedule.get_last_lr()[0]\n if hasattr(self,'loaded_model_weights_path'):\n ckpt_dict['loaded_model_weights_path'] = self.loaded_model_weights_path\n\n # Save the file with specific name\n core_name = title\n name = \"{}_last_epoch.pth\".format(core_name)\n torch.save(ckpt_dict, os.path.join(directory, name))\n\n\nclass Encoder(BaseModel):\n def __init__(self,**kwargs):\n super(Encoder, self).__init__()\n self.register_vars(**kwargs)\n self.down_block1 = nn.Sequential(OrderedDict([\n ('conv0', nn.Conv3d(self.inChannels, self.model_depth, kernel_size=3, stride=1, padding=1)),\n ('sp_drop0', nn.Dropout3d(self.dropout_rates['input'])),\n ('green0', GreenBlock(self.model_depth, self.model_depth, self.dropout_rates['green'])),\n ('downsize_0', nn.Conv3d(self.model_depth, self.model_depth * 2, kernel_size=3, stride=2, padding=1))]))\n self.down_block2 = nn.Sequential(OrderedDict([\n ('green10', GreenBlock(self.model_depth * 2, self.model_depth * 2, self.dropout_rates['green'])),\n ('green11', GreenBlock(self.model_depth * 2, self.model_depth * 2, self.dropout_rates['green'])),\n ('downsize_1', nn.Conv3d(self.model_depth * 2, self.model_depth * 4, kernel_size=3, stride=2, padding=1))]))\n self.down_block3 = nn.Sequential(OrderedDict([\n ('green20', GreenBlock(self.model_depth * 4, self.model_depth * 4, self.dropout_rates['green'])),\n ('green21', GreenBlock(self.model_depth * 4, self.model_depth * 4, self.dropout_rates['green'])),\n ('downsize_2', nn.Conv3d(self.model_depth * 4, self.model_depth * 8, kernel_size=3, stride=2, padding=1))]))\n self.final_block = nn.Sequential(OrderedDict([\n ('green30', GreenBlock(self.model_depth * 8, self.model_depth * 8, self.dropout_rates['green'])),\n ('green31', GreenBlock(self.model_depth * 8, self.model_depth * 8, self.dropout_rates['green'])),\n ('green32', GreenBlock(self.model_depth * 8, self.model_depth * 8, self.dropout_rates['green'])),\n ('green33', GreenBlock(self.model_depth * 8, self.model_depth * 8, self.dropout_rates['green']))]))\n\n def forward(self, x):\n # print('before')\n x = self.down_block1(x)\n # print('after down_block1')\n x = self.down_block2(x)\n # print('after down_block2')\n x = self.down_block3(x)\n # print('after down_block3')\n x = self.final_block(x)\n # print('after final block')\n return x\n\n\nclass BottleNeck_in(BaseModel):\n def __init__(self,**kwargs):\n super(BottleNeck_in, self).__init__()\n self.register_vars(**kwargs)\n self.reduce_dimension = nn.Sequential(OrderedDict([\n ('group_normR', nn.GroupNorm(num_channels=self.model_depth * 8, num_groups=8)),\n # ('norm0', nn.BatchNorm3d(model_depth * 8)),\n ('reluR0', nn.LeakyReLU(inplace=True)),\n ('convR0', nn.Conv3d(self.model_depth * 8, self.model_depth // 2, kernel_size=(3, 3, 3), stride=1, padding=1)),\n ]))\n flat_factor = tuple_prod(self.shapes['dim_3'])\n self.flatten = nn.Flatten()\n if (flat_factor * self.model_depth // 2) == self.intermediate_vec:\n self.into_bert = nn.Identity()\n print('flattened vec identical to intermediate vector...\\ndroppping fully conneceted bottleneck...')\n else:\n self.into_bert = nn.Linear(in_features=(self.model_depth // 2) * flat_factor, out_features=self.intermediate_vec)\n\n def forward(self, inputs):\n x = self.reduce_dimension(inputs)\n x = self.flatten(x)\n x = self.into_bert(x)\n\n return x\n\n\nclass BottleNeck_out(BaseModel):\n def __init__(self,**kwargs):\n super(BottleNeck_out, self).__init__()\n self.register_vars(**kwargs)\n flat_factor = tuple_prod(self.shapes['dim_3'])\n minicube_shape = (self.model_depth // 2,) + self.shapes['dim_3']\n self.out_of_bert = nn.Linear(in_features=self.intermediate_vec, out_features=(self.model_depth // 2) * flat_factor)\n self.expand_dimension = nn.Sequential(OrderedDict([\n ('unflatten', nn.Unflatten(1, minicube_shape)),\n ('group_normR', nn.GroupNorm(num_channels=self.model_depth // 2, num_groups=2)),\n # ('norm0', nn.BatchNorm3d(model_depth * 8)),\n ('reluR0', nn.LeakyReLU(inplace=True)),\n ('convR0', nn.Conv3d(self.model_depth // 2, self.model_depth * 8, kernel_size=(3, 3, 3), stride=1, padding=1)),\n ]))\n\n def forward(self, x):\n x = self.out_of_bert(x)\n return self.expand_dimension(x)\n\nclass Decoder(BaseModel):\n def __init__(self,**kwargs):\n super(Decoder, self).__init__()\n self.register_vars(**kwargs)\n self.decode_block = nn.Sequential(OrderedDict([\n ('upgreen0', UpGreenBlock(self.model_depth * 8, self.model_depth * 4, self.shapes['dim_2'], self.dropout_rates['Up_green'])),\n ('upgreen1', UpGreenBlock(self.model_depth * 4, self.model_depth * 2, self.shapes['dim_1'], self.dropout_rates['Up_green'])),\n ('upgreen2', UpGreenBlock(self.model_depth * 2, self.model_depth, self.shapes['dim_0'], self.dropout_rates['Up_green'])),\n ('blue_block', nn.Conv3d(self.model_depth, self.model_depth, kernel_size=3, stride=1, padding=1)),\n ('output_block', nn.Conv3d(in_channels=self.model_depth, out_channels=self.outChannels, kernel_size=1, stride=1))\n ]))\n\n def forward(self, x):\n x = self.decode_block(x)\n return x\n\n\nclass AutoEncoder(BaseModel):\n def __init__(self,dim,**kwargs):\n super(AutoEncoder, self).__init__()\n # ENCODING\n self.task = 'autoencoder_reconstruction'\n self.encoder = Encoder(**kwargs)\n self.determine_shapes(self.encoder,dim)\n kwargs['shapes'] = self.shapes\n # BottleNeck into bert\n self.into_bert = BottleNeck_in(**kwargs)\n\n # BottleNeck out of bert\n self.from_bert = BottleNeck_out(**kwargs)\n\n # DECODER\n self.decoder = Decoder(**kwargs)\n\n def forward(self, x):\n if x.isnan().any():\n print('nans in data!')\n batch_size, Channels_in, W, H, D, T = x.shape\n x = x.permute(0, 5, 1, 2, 3, 4).reshape(batch_size * T, Channels_in, W, H, D)\n encoded = self.encoder(x)\n encoded = self.into_bert(encoded)\n encoded = self.from_bert(encoded)\n reconstructed_image = self.decoder(encoded)\n _, Channels_out, W, H, D = reconstructed_image.shape\n reconstructed_image = reconstructed_image.reshape(batch_size, T, Channels_out, W, H, D).permute(0, 2, 3, 4, 5, 1)\n return {'reconstructed_fmri_sequence': reconstructed_image}\n\n\nclass Transformer_Block(BertPreTrainedModel, BaseModel):\n def __init__(self,config,**kwargs):\n super(Transformer_Block, self).__init__(config)\n self.register_vars(**kwargs)\n self.cls_pooling = True\n self.bert = BertModel(self.BertConfig, add_pooling_layer=self.cls_pooling)\n self.init_weights()\n self.cls_embedding = nn.Sequential(nn.Linear(self.BertConfig.hidden_size, self.BertConfig.hidden_size), nn.LeakyReLU())\n self.register_buffer('cls_id', torch.ones((kwargs.get('batch_size'), 1, self.BertConfig.hidden_size)) * 0.5,persistent=False)\n\n\n def concatenate_cls(self, x):\n cls_token = self.cls_embedding(self.cls_id)\n return torch.cat([cls_token, x], dim=1)\n\n\n def forward(self, x ):\n inputs_embeds = self.concatenate_cls(x=x)\n outputs = self.bert(input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=self.BertConfig.use_return_dict\n )\n\n sequence_output = outputs[0][:, 1:, :]\n pooled_cls = outputs[1]\n\n return {'sequence': sequence_output, 'cls': pooled_cls}\n\n\nclass Encoder_Transformer_Decoder(BaseModel):\n def __init__(self, dim,**kwargs):\n super(Encoder_Transformer_Decoder, self).__init__()\n self.task = 'transformer_reconstruction'\n self.register_vars(**kwargs)\n # ENCODING\n self.encoder = Encoder(**kwargs)\n self.determine_shapes(self.encoder,dim)\n kwargs['shapes'] = self.shapes\n\n # BottleNeck into bert\n self.into_bert = BottleNeck_in(**kwargs)\n\n # transformer\n self.transformer = Transformer_Block(self.BertConfig, **kwargs)\n\n # BottleNeck out of bert\n self.from_bert = BottleNeck_out(**kwargs)\n\n # DECODER\n self.decoder = Decoder(**kwargs)\n\n def forward(self, x):\n batch_size, inChannels, W, H, D, T = x.shape\n x = x.permute(0, 5, 1, 2, 3, 4).reshape(batch_size * T, inChannels, W, H, D)\n encoded = self.encoder(x)\n encoded = self.into_bert(encoded)\n encoded = encoded.reshape(batch_size, T, -1)\n transformer_dict = self.transformer(encoded)\n out = transformer_dict['sequence'].reshape(batch_size * T, -1)\n out = self.from_bert(out)\n reconstructed_image = self.decoder(out)\n reconstructed_image = reconstructed_image.reshape(batch_size, T, self.outChannels, W, H, D).permute(0, 2, 3, 4, 5, 1)\n return {'reconstructed_fmri_sequence': reconstructed_image}\n\n","repo_name":"intsystems/CreationOfIntelligentSystems_FMRI_23","sub_path":"code/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":16335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"39252639283","text":"\n##LESSSE\n##10 November 2018\n##gmidi\n##____________\n##Methods for pygmidi vizualization adapted from pypianoroll library by Hao-Wen Dong available in https://github.com/salu133445/pypianoroll\n##____________\n\n\"\"\"Module for plotting multi-track and single-track piano-rolls.\n\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\nimport numpy as np\nimport pretty_midi\n\ntry:\n import matplotlib\n from matplotlib import pyplot as plt\n from matplotlib.patches import Patch\n HAS_MATPLOTLIB = True\nexcept ImportError:\n HAS_MATPLOTLIB = False\n\ntry:\n from moviepy.editor import VideoClip\n from moviepy.video.io.bindings import mplfig_to_npimage\n HAS_MOVIEPY = True\nexcept ImportError:\n HAS_MOVIEPY = False\n\ndef plot_pianoroll(ax, pianoroll, is_drum=False, beat_resolution=None,\n downbeats=None, preset='default', cmap='Blues', xtick='auto',\n ytick='octave', xticklabel=True, yticklabel='auto',\n tick_loc=None, tick_direction='in', label='both',\n grid='both', grid_linestyle=':', grid_linewidth=.5):\n \"\"\"\n Plot a piano-roll given as a numpy array.\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes object\n The :class:`matplotlib.axes.Axes` object where the piano-roll will\n be plotted on.\n pianoroll : np.ndarray\n The piano-roll to be plotted. The values should be in [0, 1] when data\n type is float, and in [0, 127] when data type is integer.\n\n - For a 2D array, shape=(num_time_step, num_pitch).\n - For a 3D array, shape=(num_time_step, num_pitch, num_channel),\n where channels can be either RGB or RGBA.\n\n is_drum : bool\n Drum indicator. True for drums. False for other instruments. Default\n to False.\n beat_resolution : int\n Resolution of a beat (in time step). Required and only effective\n when `xticklabel` is 'beat'.\n downbeats : list\n Indices of time steps that contain downbeats., i.e. the first time\n step of a bar.\n preset : {'default', 'plain', 'frame'}\n Preset themes for the plot.\n\n - In 'default' preset, the ticks, grid and labels are on.\n - In 'frame' preset, the ticks and grid are both off.\n - In 'plain' preset, the x- and y-axis are both off.\n\n cmap : `matplotlib.colors.Colormap`\n Colormap to use in :func:`matplotlib.pyplot.imshow`. Default to\n 'Blues'. Only effective when `pianoroll` is 2D.\n xtick : {'auto', 'beat', 'step', 'off'}\n Use beat number or step number as ticks along the x-axis, or\n automatically set to 'beat' when `beat_resolution` is given and set\n to 'step', otherwise. Default to 'auto'.\n ytick : {'octave', 'pitch', 'off'}\n Use octave or pitch as ticks along the y-axis. Default to 'octave'.\n xticklabel : bool\n Indicate whether to add tick labels along the x-axis. Only effective\n when `xtick` is not 'off'.\n yticklabel : {'auto', 'name', 'number', 'off'}\n If 'name', use octave name and pitch name (key name when `is_drum`\n is True) as tick labels along the y-axis. If 'number', use pitch\n number. If 'auto', set to 'name' when `ytick` is 'octave' and\n 'number' when `ytick` is 'pitch'. Default to 'auto'. Only effective\n when `ytick` is not 'off'.\n tick_loc : tuple or list\n List of locations to put ticks. Availables elements are 'bottom',\n 'top', 'left' and 'right'. If None, default to ('bottom', 'left').\n tick_direction : {'in', 'out', 'inout'}\n Put ticks inside the axes, outside the axes, or both. Default to\n 'in'. Only effective when `xtick` and `ytick` are not both 'off'.\n label : {'x', 'y', 'both', 'off'}\n Add label to the x-axis, y-axis, both or neither. Default to 'both'.\n grid : {'x', 'y', 'both', 'off'}\n Add grid to the x-axis, y-axis, both or neither. Default to 'both'.\n grid_linestyle : str\n Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linestyle'\n argument.\n grid_linewidth : float\n Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linewidth'\n argument.\n\n \"\"\"\n if not HAS_MATPLOTLIB:\n raise ImportError(\"matplotlib package is required for plotting \"\n \"supports.\")\n\n if pianoroll.ndim not in (2, 3):\n raise ValueError(\"`pianoroll` must be a 2D or 3D numpy array\")\n if pianoroll.shape[1] != 128:\n raise ValueError(\"The shape of `pianoroll` must be (num_time_step, \"\n \"128)\")\n if xtick not in ('auto', 'beat', 'step', 'off'):\n raise ValueError(\"`xtick` must be one of {'auto', 'beat', 'step', \"\n \"'none'}\")\n if xtick == 'beat' and beat_resolution is None:\n raise ValueError(\"`beat_resolution` must be a number when `xtick` \"\n \"is 'beat'\")\n if ytick not in ('octave', 'pitch', 'off'):\n raise ValueError(\"`ytick` must be one of {octave', 'pitch', 'off'}\")\n if not isinstance(xticklabel, bool):\n raise TypeError(\"`xticklabel` must be of bool type\")\n if yticklabel not in ('auto', 'name', 'number', 'off'):\n raise ValueError(\"`yticklabel` must be one of {'auto', 'name', \"\n \"'number', 'off'}\")\n if tick_direction not in ('in', 'out', 'inout'):\n raise ValueError(\"`tick_direction` must be one of {'in', 'out',\"\n \"'inout'}\")\n if label not in ('x', 'y', 'both', 'off'):\n raise ValueError(\"`label` must be one of {'x', 'y', 'both', 'off'}\")\n if grid not in ('x', 'y', 'both', 'off'):\n raise ValueError(\"`grid` must be one of {'x', 'y', 'both', 'off'}\")\n\n # plotting\n if pianoroll.ndim > 2:\n to_plot = pianoroll.transpose(1, 0, 2)\n else:\n to_plot = pianoroll.T\n if (np.issubdtype(pianoroll.dtype, np.bool_)\n or np.issubdtype(pianoroll.dtype, np.floating)):\n ax.imshow(to_plot, cmap=cmap, aspect='auto', vmin=0, vmax=1,\n origin='lower', interpolation='none')\n elif np.issubdtype(pianoroll.dtype, np.integer):\n ax.imshow(to_plot, cmap=cmap, aspect='auto', vmin=0, vmax=127,\n origin='lower', interpolation='none')\n else:\n raise TypeError(\"Unsupported data type for `pianoroll`\")\n\n # tick setting\n if tick_loc is None:\n tick_loc = ('bottom', 'left')\n if xtick == 'auto':\n xtick = 'beat' if beat_resolution is not None else 'step'\n if yticklabel == 'auto':\n yticklabel = 'name' if ytick == 'octave' else 'number'\n\n if preset == 'plain':\n ax.axis('off')\n elif preset == 'frame':\n ax.tick_params(direction=tick_direction, bottom=False, top=False,\n left=False, right=False, labelbottom=False,\n labeltop=False, labelleft=False, labelright=False)\n else:\n ax.tick_params(direction=tick_direction, bottom=('bottom' in tick_loc),\n top=('top' in tick_loc), left=('left' in tick_loc),\n right=('right' in tick_loc),\n labelbottom=(xticklabel != 'off'),\n labelleft=(yticklabel != 'off'),\n labeltop=False, labelright=False)\n\n # x-axis\n if xtick == 'beat' and preset != 'frame':\n num_beat = pianoroll.shape[0]//beat_resolution\n xticks_major = beat_resolution * np.arange(0, num_beat)\n xticks_minor = beat_resolution * (0.5 + np.arange(0, num_beat))\n xtick_labels = np.arange(1, 1 + num_beat)\n ax.set_xticks(xticks_major)\n ax.set_xticklabels('')\n ax.set_xticks(xticks_minor, minor=True)\n ax.set_xticklabels(xtick_labels, minor=True)\n ax.tick_params(axis='x', which='minor', width=0)\n\n # y-axis\n if ytick == 'octave':\n ax.set_yticks(np.arange(0, 128, 12))\n if yticklabel == 'name':\n ax.set_yticklabels(['C{}'.format(i - 2) for i in range(11)])\n elif ytick == 'step':\n ax.set_yticks(np.arange(0, 128))\n if yticklabel == 'name':\n if is_drum:\n ax.set_yticklabels([pretty_midi.note_number_to_drum_name(i)\n for i in range(128)])\n else:\n ax.set_yticklabels([pretty_midi.note_number_to_name(i)\n for i in range(128)])\n\n # axis labels\n if label == 'x' or label == 'both':\n if xtick == 'step' or not xticklabel:\n ax.set_xlabel('time (step)')\n else:\n ax.set_xlabel('time (beat)')\n\n if label == 'y' or label == 'both':\n if is_drum:\n ax.set_ylabel('key name')\n else:\n ax.set_ylabel('pitch')\n\n # grid\n if grid != 'off':\n ax.grid(axis=grid, color='k', linestyle=grid_linestyle,\n linewidth=grid_linewidth)\n\n # downbeat boarder\n if downbeats is not None and preset != 'plain':\n for step in downbeats:\n ax.axvline(x=step, color='k', linewidth=1)\n\n\ndef plot_track(track, filepath=None, beat_resolution=None, downbeats=None,\n preset='default', cmap='Blues', xtick='auto', ytick='octave',\n xticklabel=True, yticklabel='auto', tick_loc=None,\n tick_direction='in', label='both', grid='both',\n grid_linestyle=':', grid_linewidth=.5):\n \"\"\"\n Plot the piano-roll or save a plot of the piano-roll.\n\n Parameters\n ----------\n filepath :\n The filepath to save the plot. If None, default to save nothing.\n beat_resolution : int\n Resolution of a beat (in time step). Required and only effective\n when `xtick` is 'beat'.\n downbeats : list\n Indices of time steps that contain downbeats., i.e. the first time\n step of a bar.\n\n preset : {'default', 'plain', 'frame'}\n Preset themes for the plot.\n\n - In 'default' preset, the ticks, grid and labels are on.\n - In 'frame' preset, the ticks and grid are both off.\n - In 'plain' preset, the x- and y-axis are both off.\n\n cmap : `matplotlib.colors.Colormap`\n Colormap to use in :func:`matplotlib.pyplot.imshow`. Default to\n 'Blues'. Only effective when `pianoroll` is 2D.\n xtick : {'auto', 'beat', 'step', 'off'}\n Use beat number or step number as ticks along the x-axis, or\n automatically set to 'beat' when `beat_resolution` is given and set\n to 'step', otherwise. Default to 'auto'.\n ytick : {'octave', 'pitch', 'off'}\n Use octave or pitch as ticks along the y-axis. Default to 'octave'.\n xticklabel : bool\n Indicate whether to add tick labels along the x-axis. Only effective\n when `xtick` is not 'off'.\n yticklabel : {'auto', 'name', 'number', 'off'}\n If 'name', use octave name and pitch name (key name when `is_drum`\n is True) as tick labels along the y-axis. If 'number', use pitch\n number. If 'auto', set to 'name' when `ytick` is 'octave' and\n 'number' when `ytick` is 'pitch'. Default to 'auto'. Only effective\n when `ytick` is not 'off'.\n tick_loc : tuple or list\n List of locations to put ticks. Availables elements are 'bottom',\n 'top', 'left' and 'right'. If None, default to ('bottom', 'left').\n tick_direction : {'in', 'out', 'inout'}\n Put ticks inside the axes, outside the axes, or both. Default to\n 'in'. Only effective when `xtick` and `ytick` are not both 'off'.\n label : {'x', 'y', 'both', 'off'}\n Add label to the x-axis, y-axis, both or neither. Default to 'both'.\n grid : {'x', 'y', 'both', 'off'}\n Add grid to the x-axis, y-axis, both or neither. Default to 'both'.\n grid_linestyle : str\n Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linestyle'\n argument.\n grid_linewidth : float\n Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linewidth'\n argument.\n\n Returns\n -------\n fig : `matplotlib.figure.Figure` object\n A :class:`matplotlib.figure.Figure` object.\n ax : `matplotlib.axes.Axes` object\n A :class:`matplotlib.axes.Axes` object.\n\n \"\"\"\n if not HAS_MATPLOTLIB:\n raise ImportError(\"matplotlib package is required for plotting \"\n \"supports.\")\n\n fig, ax = plt.subplots()\n plot_pianoroll(ax, track.pianoroll, track.is_drum, beat_resolution,\n downbeats, preset=preset, cmap=cmap, xtick=xtick,\n ytick=ytick, xticklabel=xticklabel, yticklabel=yticklabel,\n tick_loc=tick_loc, tick_direction=tick_direction,\n label=label, grid=grid, grid_linestyle=grid_linestyle,\n grid_linewidth=grid_linewidth)\n\n if filepath is not None:\n plt.savefig(filepath)\n\n return fig, ax\n\ndef plot_multitrack(multitrack, filepath=None, mode='stacked',\n track_label='name', preset='frame', cmaps=None,\n xtick='off', ytick='octave', xticklabel=False,\n yticklabel='auto', tick_loc=None, tick_direction='in',\n label='y', grid='off', grid_linestyle=':',\n grid_linewidth=.5, background=np.array([1,1,1])):\n \"\"\"\n Plot the piano-rolls or save a plot of them.\n\n Parameters\n ----------\n filepath : str\n The filepath to save the plot. If None, default to save nothing.\n mode : {'separate', 'stacked', 'hybrid'}\n Plotting modes. Default to 'separate'.\n\n - In 'separate' mode, all the tracks are plotted separately.\n - In 'stacked' mode, a color is assigned based on `cmaps` to the\n piano-roll of each track and the piano-rolls are stacked and\n plotted as a colored image with RGB channels.\n - In 'hybrid' mode, the drum tracks are merged into a 'Drums' track,\n while the other tracks are merged into an 'Others' track, and the\n two merged tracks are then plotted separately.\n\n track_label : {'name', 'program', 'family', 'off'}\n Add track name, program name, instrument family name or none as\n labels to the track. When `mode` is 'hybrid', all options other\n than 'off' will label the two track with 'Drums' and 'Others'.\n preset : {'default', 'plain', 'frame'}\n Preset themes for the plot.\n\n - In 'default' preset, the ticks, grid and labels are on.\n - In 'frame' preset, the ticks and grid are both off.\n - In 'plain' preset, the x- and y-axis are both off.\n\n cmaps : tuple or list\n List of `matplotlib.colors.Colormap` instances or colormap codes.\n\n - When `mode` is 'separate', each element will be passed to each\n call of :func:`matplotlib.pyplot.imshow`. Default to ('Blues',\n 'Oranges', 'Greens', 'Reds', 'Purples', 'Greys').\n - When `mode` is stacked, a color is assigned based on `cmaps` to\n the piano-roll of each track. Default to ('hsv').\n - When `mode` is 'hybrid', the first (second) element is used in the\n 'Drums' ('Others') track. Default to ('Blues', 'Greens').\n\n xtick : {'auto', 'beat', 'step', 'off'}\n Use beat number or step number as ticks along the x-axis, or\n automatically set to 'beat' when `beat_resolution` is given and set\n to 'step', otherwise. Default to 'auto'.\n ytick : {'octave', 'pitch', 'off'}\n Use octave or pitch as ticks along the y-axis. Default to 'octave'.\n xticklabel : bool\n Indicate whether to add tick labels along the x-axis. Only effective\n when `xtick` is not 'off'.\n yticklabel : {'auto', 'name', 'number', 'off'}\n If 'name', use octave name and pitch name (key name when `is_drum`\n is True) as tick labels along the y-axis. If 'number', use pitch\n number. If 'auto', set to 'name' when `ytick` is 'octave' and\n 'number' when `ytick` is 'pitch'. Default to 'auto'. Only effective\n when `ytick` is not 'off'.\n tick_loc : tuple or list\n List of locations to put ticks. Availables elements are 'bottom',\n 'top', 'left' and 'right'. If None, default to ('bottom', 'left').\n tick_direction : {'in', 'out', 'inout'}\n Put ticks inside the axes, outside the axes, or both. Default to\n 'in'. Only effective when `xtick` and `ytick` are not both 'off'.\n label : {'x', 'y', 'both', 'off'}\n Add label to the x-axis, y-axis, both or neither. Default to 'both'.\n grid : {'x', 'y', 'both', 'off'}\n Add grid to the x-axis, y-axis, both or neither. Default to 'both'.\n grid_linestyle : str\n Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linestyle'\n argument.\n grid_linewidth : float\n Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linewidth'\n argument.\n\n Returns\n -------\n fig : `matplotlib.figure.Figure` object\n A :class:`matplotlib.figure.Figure` object.\n axs : list\n List of :class:`matplotlib.axes.Axes` object.\n\n \"\"\"\n if not HAS_MATPLOTLIB:\n raise ImportError(\"matplotlib package is required for plotting \"\n \"supports.\")\n\n def get_track_label(track_label, track=None):\n \"\"\"Convenient function to get track labels\"\"\"\n if track_label == 'name':\n return track.name if track.name != \"\" else pretty_midi.program_to_instrument_class(track.program)\n elif track_label == 'program':\n return pretty_midi.program_to_instrument_name(track.program)\n elif track_label == 'family':\n return pretty_midi.program_to_instrument_class(track.program)\n elif track is None:\n return track_label\n\n def add_tracklabel(ax, track_label, track=None):\n \"\"\"Convenient function for adding track labels\"\"\"\n if not ax.get_ylabel():\n return\n ax.set_ylabel(get_track_label(track_label, track) + '\\n\\n'\n + ax.get_ylabel())\n\n multitrack.check_validity()\n if not multitrack.tracks:\n raise ValueError(\"There is no track to plot\")\n if mode not in ('separate', 'stacked', 'hybrid'):\n raise ValueError(\"`mode` must be one of {'separate', 'stacked', \"\n \"'hybrid'}\")\n if track_label not in ('name', 'program', 'family', 'off'):\n raise ValueError(\"`track_label` must be one of {'name', 'program', \"\n \"'family'}\")\n\n if cmaps is None:\n if mode == 'separate':\n cmaps = ('Blues', 'Oranges', 'Greens', 'Reds', 'Purples', 'Greys')\n elif mode == 'stacked':\n cmaps = ('rainbow',)\n else:\n cmaps = ('Blues', 'Greens')\n\n num_track = len(multitrack.tracks)\n downbeats = multitrack.get_downbeat_steps()\n\n if mode == 'separate':\n if num_track > 1:\n fig, axs = plt.subplots(num_track, sharex=True)\n else:\n fig, ax = plt.subplots()\n axs = [ax]\n\n for idx, track in enumerate(multitrack.tracks):\n now_xticklabel = xticklabel if idx < num_track else False\n plot_pianoroll(axs[idx], track.pianoroll, False,\n multitrack.beat_resolution, downbeats, preset=preset,\n cmap=cmaps[idx%len(cmaps)], xtick=xtick, ytick=ytick,\n xticklabel=now_xticklabel, yticklabel=yticklabel,\n tick_loc=tick_loc, tick_direction=tick_direction,\n label=label, grid=grid,\n grid_linestyle=grid_linestyle,\n grid_linewidth=grid_linewidth)\n if track_label != 'none':\n add_tracklabel(axs[idx], track_label, track)\n\n if num_track > 1:\n fig.subplots_adjust(hspace=0)\n\n if filepath is not None:\n plt.savefig(filepath)\n\n return (fig, axs)\n\n elif mode == 'stacked':\n is_all_drum = True\n for track in multitrack.tracks:\n if not track.is_drum:\n is_all_drum = False\n\n balpha=False\n\n fig, ax = plt.subplots()\n stacked = multitrack.get_stacked_pianorolls()\n indices = tuple(np.reshape(np.concatenate([np.indices(stacked.shape[:-1]),[np.argmax(stacked,-1)]],0),(3,-1,)))\n unique_playin = np.zeros(stacked.shape)\n unique_playin[indices] = 1\n unique_playin = unique_playin*(stacked>0) \n stacked = unique_playin \n unique_volume = unique_playin*stacked\n alpha = np.reshape(np.max(unique_volume,-1),(-1,1))\n colormap = matplotlib.cm.get_cmap(cmaps[0])\n cmatrix = colormap(np.arange(0, 1, 1 / num_track))[:, :3]\n recolored = np.matmul(stacked.reshape(-1, num_track), cmatrix)\n if balpha:\n recolored = np.concatenate([recolored,alpha],-1)\n background = np.tile(np.concatenate([background,[1]],-1),(recolored.shape[0],1))\n n = 4\n else:\n background = np.tile(background,(recolored.shape[0],1))\n n = 3\n mask = np.reshape(np.repeat((np.sum(recolored,1)==0),(n)),(-1,n))\n recolored = np.where(mask,background,recolored)\n \n stacked = recolored.reshape(stacked.shape[:2] + (n, ))\n\n plot_pianoroll(ax, stacked, is_all_drum, multitrack.beat_resolution,\n downbeats, preset=preset, xtick=xtick, ytick=ytick,\n xticklabel=xticklabel, yticklabel=yticklabel,\n tick_loc=tick_loc, tick_direction=tick_direction,\n label=label, grid=grid, grid_linestyle=grid_linestyle,\n grid_linewidth=grid_linewidth)\n\n if track_label != 'none':\n patches = [Patch(color=cmatrix[idx],\n label=get_track_label(track_label, track))\n for idx, track in enumerate(multitrack.tracks)]\n f = lambda x: len(get_track_label(track_label, x))\n l = max(multitrack.tracks, key=f)\n if len(patches) > 3:\n ncol = 2\n anchor = (0.5,1.1)\n else:\n ncol = 1\n anchor = (0.5,1.05)\n\n if len(patches) < 9:\n plt.legend(handles=patches, fancybox=True,loc='upper center',ncol=ncol, bbox_to_anchor=anchor, framealpha=1)\n else:\n plt.legend(handles=patches, fancybox=True,loc='best',ncol=ncol, bbox_to_anchor=(1+.05*f(l)/2,.5), framealpha=1)\n\n if filepath is not None: \n fig.set_size_inches(10, 6)\n plt.savefig(filepath,dpi=400)\n\n return (fig, [ax])\n\n elif mode == 'hybrid':\n drums = [i for i, track in enumerate(multitrack.tracks) if track.is_drum]\n others = [i for i in range(len(multitrack.tracks)) if i not in drums]\n multitrack.merge_tracks(drums,mode='sum',name=\"drums\") if len(drums) > 0 else 0\n multitrack.merge_tracks(others,mode='sum',name=\"others\") if len(others) > 0 else 0\n\n if num_track > 1:\n fig, axs = plt.subplots(num_track, sharex=True)\n else:\n fig, ax = plt.subplots()\n axs = [ax]\n for idx, track in enumerate(multitrack.tracks):\n now_xticklabel = xticklabel if idx < num_track else False\n plot_pianoroll(axs[idx], track.pianoroll, False,\n multitrack.beat_resolution, downbeats, preset=preset,\n cmap=cmaps[idx%len(cmaps)], xtick=xtick, ytick=ytick,\n xticklabel=now_xticklabel, yticklabel=yticklabel,\n tick_loc=tick_loc, tick_direction=tick_direction,\n label=label, grid=grid,\n grid_linestyle=grid_linestyle,\n grid_linewidth=grid_linewidth)\n if track_label != 'none':\n add_tracklabel(axs[idx], track_label, track)\n fig.subplots_adjust(hspace=0)\n\n if track_label != 'none':\n add_tracklabel(axs[0], 'Drums')\n add_tracklabel(axs[1], 'Others')\n\n if filepath is not None:\n plt.savefig(filepath)\n\n return (fig, axs)\n\n\ndef save_animation(filepath, pianoroll, window=1000, hop=24, fps=2, is_drum=False,\n beat_resolution=None, downbeats=None, preset='default',\n cmap='Blues', xtick='auto', ytick='octave', xticklabel=True,\n yticklabel='auto', tick_loc=None, tick_direction='in',\n label='both', grid='both', grid_linestyle=':',\n grid_linewidth=.5, **kwargs):\n \"\"\"\n Save a piano-roll to an animation in video or GIF format.\n\n Parameters\n ----------\n filepath : str\n Path to save the video file.\n pianoroll : np.ndarray\n The piano-roll to be plotted. The values should be in [0, 1] when data\n type is float, and in [0, 127] when data type is integer.\n\n - For a 2D array, shape=(num_time_step, num_pitch).\n - For a 3D array, shape=(num_time_step, num_pitch, num_channel),\n where channels can be either RGB or RGBA.\n\n window : int\n Window size to be applied to `pianoroll` for the animation.\n hop : int\n Hop size to be applied to `pianoroll` for the animation.\n fps : int\n Number of frames per second in the resulting video or GIF file.\n is_drum : bool\n Drum indicator. True for drums. False for other instruments. Default\n to False.\n beat_resolution : int\n Resolution of a beat (in time step). Required and only effective\n when `xtick` is 'beat'.\n downbeats : list\n Indices of time steps that contain downbeats., i.e. the first time\n step of a bar.\n\n preset : {'default', 'plain', 'frame'}\n Preset themes for the plot.\n\n - In 'default' preset, the ticks, grid and labels are on.\n - In 'frame' preset, the ticks and grid are both off.\n - In 'plain' preset, the x- and y-axis are both off.\n\n cmap : `matplotlib.colors.Colormap`\n Colormap to use in :func:`matplotlib.pyplot.imshow`. Default to\n 'Blues'. Only effective when `pianoroll` is 2D.\n xtick : {'auto', 'beat', 'step', 'off'}\n Use beat number or step number as ticks along the x-axis, or\n automatically set to 'beat' when `beat_resolution` is given and set\n to 'step', otherwise. Default to 'auto'.\n ytick : {'octave', 'pitch', 'off'}\n Use octave or pitch as ticks along the y-axis. Default to 'octave'.\n xticklabel : bool\n Indicate whether to add tick labels along the x-axis. Only effective\n when `xtick` is not 'off'.\n yticklabel : {'auto', 'name', 'number', 'off'}\n If 'name', use octave name and pitch name (key name when `is_drum`\n is True) as tick labels along the y-axis. If 'number', use pitch\n number. If 'auto', set to 'name' when `ytick` is 'octave' and\n 'number' when `ytick` is 'pitch'. Default to 'auto'. Only effective\n when `ytick` is not 'off'.\n tick_loc : tuple or list\n List of locations to put ticks. Availables elements are 'bottom',\n 'top', 'left' and 'right'. If None, default to ('bottom', 'left').\n tick_direction : {'in', 'out', 'inout'}\n Put ticks inside the axes, outside the axes, or both. Default to\n 'in'. Only effective when `xtick` and `ytick` are not both 'off'.\n label : {'x', 'y', 'both', 'off'}\n Add label to the x-axis, y-axis, both or neither. Default to 'both'.\n grid : {'x', 'y', 'both', 'off'}\n Add grid to the x-axis, y-axis, both or neither. Default to 'both'.\n grid_linestyle : str\n Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linestyle'\n argument.\n grid_linewidth : float\n Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linewidth'\n argument.\n\n \"\"\"\n if not HAS_MOVIEPY:\n raise ImportError(\"moviepy package is required for animation supports.\")\n\n def make_frame(t):\n \"\"\"Return an image of the frame for time t.\"\"\"\n fig = plt.gcf()\n ax = plt.gca()\n f_idx = int(t * fps)\n start = hop * f_idx\n end = start + window\n to_plot = transposed[:, start:end]\n extent = (start, end - 1, 0, 127)\n ax.imshow(to_plot, cmap=cmap, aspect='auto', vmin=vmin, vmax=vmax,\n origin='lower', interpolation='none', extent=extent)\n\n if xtick == 'beat':\n next_major_idx = beat_resolution - start % beat_resolution\n if start % beat_resolution < beat_resolution//2:\n next_minor_idx = beat_resolution//2 - start % beat_resolution\n else:\n next_minor_idx = (beat_resolution//2 - start % beat_resolution\n + beat_resolution)\n xticks_major = np.arange(next_major_idx, window, beat_resolution)\n xticks_minor = np.arange(next_minor_idx, window, beat_resolution)\n if end % beat_resolution < beat_resolution//2:\n last_minor_idx = beat_resolution//2 - end % beat_resolution\n else:\n last_minor_idx = (beat_resolution//2 - end % beat_resolution\n + beat_resolution)\n xtick_labels = np.arange((start + next_minor_idx)//beat_resolution,\n (end + last_minor_idx)//beat_resolution)\n ax.set_xticks(xticks_major)\n ax.set_xticklabels('')\n ax.set_xticks(xticks_minor, minor=True)\n ax.set_xticklabels(xtick_labels, minor=True)\n ax.tick_params(axis='x', which='minor', width=0)\n\n return mplfig_to_npimage(fig)\n\n if xtick == 'auto':\n xtick = 'beat' if beat_resolution is not None else 'step'\n\n fig, ax = plt.subplots()\n plot_pianoroll(ax, pianoroll[:window], is_drum, beat_resolution, downbeats,\n preset=preset, cmap=cmap, xtick=xtick, ytick=ytick,\n xticklabel=xticklabel, yticklabel=yticklabel,\n tick_loc=tick_loc, tick_direction=tick_direction,\n label=label, grid=grid, grid_linestyle=grid_linestyle,\n grid_linewidth=grid_linewidth)\n\n num_frame = int((pianoroll.shape[0] - window) / hop)\n duration = int(num_frame / fps)\n\n if (np.issubdtype(pianoroll.dtype, np.bool_)\n or np.issubdtype(pianoroll.dtype, np.floating)):\n vmax = 1\n elif np.issubdtype(pianoroll.dtype, np.integer):\n vmax = 127\n else:\n raise TypeError(\"Unsupported data type for `pianoroll`\")\n vmin = 0\n\n transposed = pianoroll.T\n animation = VideoClip(make_frame, duration=duration)\n if filepath.endswith('.gif'):\n animation.write_gif(filepath, fps, **kwargs)\n else:\n animation.write_videofile(filepath, fps, **kwargs)\n plt.close()\n\n","repo_name":"LESSSE/pygmidi","sub_path":"pygmidi/utils/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":30931,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"74635730001","text":"# importing modules\nimport json\nimport requests\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# storing the url in the form of string\nurl = \"https://api.covid19india.org/state_district_wise.json\"\n\n# function to get data from api\n\n\ndef casesData():\n # getting the json data by calling api\n data = ((requests.get(url)).json())\n states = []\n\n # getting states\n for key in data.items():\n states.append(key[0])\n\n # getting statewise data\n for state in states:\n f = (data[state]['districtData'])\n tc = []\n dis = []\n act, con, dea, rec = 0, 0, 0, 0\n\n # getting districtwise data\n for key in (data[state]['districtData']).items():\n district = key[0]\n dis.append(district)\n active = data[state]['districtData'][district]['active']\n confirmed = data[state]['districtData'][district]['confirmed']\n if district == 'Unknown':\n active, confirmed\n tc.append([active, confirmed])\n act = act + active\n con = con + confirmed\n tc.append([act, con])\n dis.append('Total')\n parameters = ['Active', 'Confirmed']\n\n # creating a dataframe\n df = pd.DataFrame(tc, dis, parameters)\n print('COVID - 19', state, 'District Wise Data')\n print(df)\n\n # plotting of data\n plt.bar(dis, df['Active'], width=0.5, align='center')\n fig = plt.gcf()\n fig.set_size_inches(18.5, 10.5)\n plt.xticks(rotation=75)\n plt.show()\n print('*' * 100)\n\n\ncasesData()\n","repo_name":"ihgoyarp/covid-indo","sub_path":"covid-india.py","file_name":"covid-india.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"9178746018","text":"prime_nums = []\r\n\r\n\r\ndef is_prime(num):\r\n for n in range(0, num + 1):\r\n if n > 0:\r\n for i in range(2, n):\r\n if (n % i) == 0:\r\n break\r\n else:\r\n prime_nums.append(n)\r\n return prime_nums\r\n\r\n\r\ndef cnt_primes(num):\r\n cnt_list = is_prime(num)\r\n print(f\"Prime numbers from 1 to {num}\", cnt_list)\r\n print(len(cnt_list) - 1, f\"prime numbers from 1 to {num}\")\r\n\r\n\r\ncnt_primes(6)\r\n","repo_name":"SoeWunna29/Homework-Assignment-2","sub_path":"3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"34509709091","text":"import random\n\n# Creates a rating list\nratings = [18, 15, 12, 8, 0]\n\n# Picks a random movie rating\nmovie_rating = random.choice(ratings)\n\n# Gets user input for age\nuser_age = int(input(\"How old are you?\\n=> \"))\n\n# Check whether the user is old enough for the movie or if the movie is rated \"U\" for everyone\nif user_age >= movie_rating or movie_rating == 0:\n print(\"You are old enough to watch the movie.\\nEnjoy the show!\")\n\n# Elif will only be triggered if age is less than rating\n# Therefore if the rating is PG it requires a parent\n# We check whether the user (who is under the age of 8 => PG) if they with a parent\nelif movie_rating == 8:\n\n # Another input to ask if they with a parent\n parents = input(\"Did you come with a parent?\\n=> \")\n\n # Checking the answer of the user\n if parents.lower() == \"yes\":\n print(\"You can watch the movie!\\nEnjoy the show!\")\n else:\n print(\n \"Sorry you are not old enough to watch this movie!\\nYou will need to come back with a parent!\"\n )\n\n# In every other case user not allowed to watch the movie\nelse:\n print(\"Sorry you are not old enough to watch this movie!\")\n\n# Debugging print to see what the rating was choosen at random\nprint(f\"Movie Rating was: {movie_rating}\")","repo_name":"deviljin112/CourseMasterRepo","sub_path":"Python-Control-Flow/task_1.py","file_name":"task_1.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"12849829012","text":"import argparse\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nfrom sklearn.manifold import TSNE\n\nfrom src import project_dir\nfrom src.models.model import ImageClassifier\nfrom src.data.mnist import MNISTDataModule\n\ndef parser():\n \"\"\"Parses command line.\"\"\"\n parser = argparse.ArgumentParser(\n description=\"Script for visualizing embeddings created by image \" \"classifier\"\n )\n parser.add_argument(\n '--model_path', default=project_dir + '/models/model.pth', type=str)\n parser.add_argument(\n '--fig_path', default=project_dir + 'reports/figures/embeddings.pdf')\n parser.add_argument('--mb_size', default=64, type=int)\n args = parser.parse_args()\n\n return args\n\n\ndef get_embeddings(args, model, data_loader):\n \"\"\"Gets embeddings produced by model.\"\"\"\n with torch.no_grad():\n model.eval()\n\n embeddings = torch.zeros(\n (len(data_loader.dataset.data), model.linear.in_features)\n )\n all_labels = torch.zeros(len(data_loader.dataset.data))\n\n for i, (images, labels) in enumerate(data_loader):\n model(images)\n embeddings[\n i * args.mb_size : i * args.mb_size + images.shape[0], :\n ] = model.embeddings\n all_labels[i * args.mb_size : i * args.mb_size + images.shape[0]] = labels\n\n return embeddings.numpy(), all_labels.numpy()\n\n\ndef plot_embeddings(embeddings, labels):\n \"\"\"Plots embeddings.\"\"\"\n embs_proj = TSNE(\n n_components=2,\n random_state=42,\n verbose=1,\n n_jobs=-1).fit_transform(embeddings)\n\n fig, ax = plt.subplots()\n scatter = ax.scatter(\n embs_proj[:, 0],\n embs_proj[:, 1],\n c=labels,\n cmap=plt.get_cmap(\"tab10\"),\n alpha=0.5,\n s=2,\n )\n ax.set_xlabel(\"t-SNE component 1\")\n ax.set_ylabel(\"t-SNE component 2\")\n ax.set_title(\"Embeddings\")\n\n markers = scatter.legend_elements()[0]\n plt.legend(\n markers,\n np.unique(labels),\n loc=0,\n borderaxespad=0.1,\n title=\"Digit\",\n framealpha=0.6,\n )\n\n return fig\n\n\ndef main():\n args = parser()\n train_loader, test_loader = get_data(args)\n model = ImageClassifier.load_from_checkpoint(\n checkpoint_path=args.model_path)\n embeddings, labels = get_embeddings(args, model, test_loader)\n fig = plot_embeddings(embeddings, labels)\n fig.savefig(args.fig_path)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jonasvj/MLOps","sub_path":"src/visualization/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"10679951352","text":"from sys import stdin\r\n\r\n\r\ndef main():\r\n input = stdin.readline\r\n n = int(input())\r\n a, b, c = sorted(map(int, input().split()))\r\n res = 10000\r\n for i in range(9999, -1, -1):\r\n tmp_c = i * c\r\n if tmp_c > n:\r\n continue\r\n for j in range(9999 - i, -1, -1):\r\n tmp_b = b * j + tmp_c\r\n if tmp_b > n:\r\n continue\r\n if (n - tmp_b) % a == 0:\r\n k = (n - tmp_b) // a\r\n res = min(res, i + j + k)\r\n print(res)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"yu2799/AtCoder","sub_path":"typical90/typical16.py","file_name":"typical16.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"1836915388","text":"def List(bestand):\n Tekst = open(bestand, 'r')\n Inhoud=Tekst.readlines()\n Tekst.close()\n for term in Inhoud:\n ZEnter=term.strip()\n NaNu=ZEnter.split(',')\n print('{} heeft kaartnummer: {}'.format(NaNu[1],NaNu[0]))\n\nKaart=List('kaartnummers')\nprint(Kaart)\n","repo_name":"MTKornet/Opgaven","sub_path":"Les 5/pe5_2.py","file_name":"pe5_2.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"36394339149","text":"\nfor i in range(1,6):\n row = input()\n rowList = row.split()\n rowList = [int(i) for i in rowList]\n if 1 in rowList:\n index = rowList.index(1)\n out = abs(3-(index+1)) + abs(3 - i)\n\nprint(out)","repo_name":"dar4kamal/Proplem_solving","sub_path":"CF263-D2-A.py","file_name":"CF263-D2-A.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"35734439200","text":"from django.test import TestCase\nfrom katalog.models import CatalogItem\n\n# Create your tests here.\nclass katalogTest(TestCase):\n def setUp(self):\n CatalogItem.objects.create(item_name=\"Iphone XR\", item_price=4000000, item_stock=100,\n description=\"New Iphone\", rating=5, item_url=\"https://www.tokopedia.com/spiritcellular-1/iphone-xr-64gb-second-e-x-inter-original-no-minus-fullset-kuning?extParam=ivf%3Dfalse&src=topads\")\n\n def test_is_dummy_valid(self):\n iphone_XR = CatalogItem.objects.get(item_name=\"Iphone XR\", item_price=4000000, item_stock=100,\n description=\"New Iphone\", rating=5, item_url=\"https://www.tokopedia.com/spiritcellular-1/iphone-xr-64gb-second-e-x-inter-original-no-minus-fullset-kuning?extParam=ivf%3Dfalse&src=topads\")\n self.assertEqual(iphone_XR.item_name, \"Iphone XR\")\n self.assertEqual(iphone_XR.item_price, 4000000)\n self.assertEqual(iphone_XR.item_stock, 100)\n self.assertEqual(iphone_XR.description, \"New Iphone\")\n self.assertEqual(iphone_XR.rating, 5)\n self.assertEqual(iphone_XR. item_url, \"https://www.tokopedia.com/spiritcellular-1/iphone-xr-64gb-second-e-x-inter-original-no-minus-fullset-kuning?extParam=ivf%3Dfalse&src=topads\")","repo_name":"gabiiing/pbp-tugas-2-gabing","sub_path":"katalog/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"43313586918","text":"import os\n\nfrom flask import Flask, jsonify # make_response, request, url_for\n\nfrom redis import Redis\n\nfrom rq import Queue\n\nfrom utils import create_task_id\n\n\napp = Flask(__name__)\napp.config[\"DEBUG\"] = True\napp.config[\"REDIS_URL\"] = os.environ.get('REDIS_URL') or 'redis://'\napp.redis = Redis.from_url(app.config['REDIS_URL'])\napp.task_queue = Queue('parsing-tasks', connection=app.redis)\ntasks = {}\n\n\n@app.route('/api/', methods=['POST'])\ndef create_task(url: str) -> str:\n tid = create_task_id(url)\n job = app.task_queue.enqueue('tasks.parse', tid, url)\n tasks[tid] = job\n return jsonify({\"id\": tid})\n\n\n@app.route('/api/', methods=['GET'])\ndef get_task_status(tid: str) -> str:\n if tid not in tasks:\n return jsonify({\"status\": \"Not started\"})\n job = tasks[tid]\n job.refresh()\n return jsonify(job.meta)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n","repo_name":"Antoine-Poincare/timeweb.com","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"34334466351","text":"from django.urls import path\n\nfrom product.api.views import CategoryListCreate, CategoryRetrieveUpdateDestroy, ProductListCreate, \\\n ProductRetrieveUpdateDestroy, search\n\nurlpatterns = [\n path('categories/', CategoryListCreate.as_view()),\n path('categories//', CategoryRetrieveUpdateDestroy.as_view()),\n path('products/', ProductListCreate.as_view()),\n path('products/search/', search),\n path('products//', ProductRetrieveUpdateDestroy.as_view()),\n]\n","repo_name":"mwicwiri-bonface/drf-ecommerce-backend-api","sub_path":"product/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"19398273638","text":"from IPython.display import display\n\nfrom sqlite3 import DatabaseError\nfrom matplotlib import pyplot as plt\nfrom pandas import DataFrame\nfrom sklearn.cluster import KMeans\n\n\ndef plot_kmeans_elbow(\n df: DataFrame, \n max_n_clusters: int = 10, \n max_iter: int = 1000\n ) -> None:\n \"\"\"\n Displays the elbow plot\n \"\"\"\n sse = {\n n_clusters: _get_inertia(df, n_clusters, max_iter)\n for n_clusters in range(1, max_n_clusters + 1)\n }\n plt.figure()\n plt.plot(sse.keys(), sse.values())\n plt.xlabel('Cluster count [k]')\n plt.show()\n\n\ndef _get_inertia(df: DataFrame, n_clusters: int, max_iter: int) -> list:\n \"\"\"\n Returns the inertia value for a given kmeans model\n \"\"\"\n kmeans = KMeans(n_clusters=n_clusters, max_iter=max_iter).fit(df)\n return kmeans.inertia_\n\n\ndef ordered_clustering(\n df: DataFrame,\n n_clusters: int,\n cluster_by_column_name: str, \n ascending: bool\n ) -> DataFrame:\n \"\"\"\n Returns a dataframe with ordered kmeans clusters\n based on the given column name\n \"\"\"\n kmeans = KMeans(n_clusters=n_clusters)\n kmeans.fit(df[[cluster_by_column_name]])\n cluster_name = f'{cluster_by_column_name}_score'\n df[cluster_name] = kmeans.predict(df[[cluster_by_column_name]])\n target_score = (\n df.groupby(cluster_name)\n [cluster_by_column_name].mean()\n .reset_index()\n .sort_values(by=[cluster_by_column_name], ascending=ascending)\n .reset_index(drop=True)\n )\n remap_clusters = {int(row[cluster_name]): idx for idx, row in target_score.iterrows()}\n df[cluster_name] = df[cluster_name].replace(remap_clusters)\n display(df.groupby(cluster_name)[cluster_by_column_name].describe())\n return df\n\n\n","repo_name":"TomaszKaleczyc/customer_order_prediction","sub_path":"src/utilities/clustering_utils.py","file_name":"clustering_utils.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"27720366904","text":"from odoo import models\nimport xlsxwriter\n\nclass PatientCardXLS(models.AbstractModel):\n _name = 'report.om_hospital.report_patient_xls'\n _inherit = 'report.report_xlsx.abstract'\n\n def generate_xlsx_report(self, workbook, data, lines):\n print(\"lines\", lines, data)\n format1= workbook.add_format({'font_size':14, 'align':'vcenter', 'bold':True})\n format2 = workbook.add_format({'font_size': 10, 'align': 'vcenter'})\n sheet = workbook.add_worksheet('Patient Card')\n #for right to left format\n #sheet.right_to_left()\n sheet.set_column(3,3, 10)\n sheet.set_column(2, 2, 20)\n sheet.write(2,2,'Name', format1)\n sheet.write(2, 3, lines.patient_name, format2)\n sheet.write(3, 2, 'Age', format1)\n sheet.write(3, 3, lines.patient_age, format2)","repo_name":"digitalfarmer/om_hospital","sub_path":"reports/patient_card_xls.py","file_name":"patient_card_xls.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"11311730139","text":"from unittest.util import _MAX_LENGTH\nfrom django.db import models\nfrom django.urls import reverse\nimport pandas as pd\nfrom sqlalchemy import create_engine\nfrom datetime import datetime\nfrom django import forms\n\n\nclass Post(models.Model):\n manufacture_date = models.DateField(default =None)\n stencil_number = models.CharField (max_length=3, default=None)\n revision = models.CharField (max_length=2, default=None)\n ZLNumber = models.CharField (max_length=10, default=None)\n material = models.CharField (max_length=15, default=None)\n manufacture_number = models.CharField (max_length=10, default=None)\n thickness = models.CharField (max_length=5, default=None)\n author = models.CharField (max_length=7, default =None)\n\n\n def __str__(self):\n return self.manufacture_date, self.stencil_number, self.revision, self.ZLNumber, self.material, self.manufacture_number, self.thickness, self.author\n \n\n def get_absolute_url(self):\n return reverse(\"post_detail\", args=[str(self.id)])\n\n def save(self, *args, **kwargs):\n self.extra_field = \"extra field\"\n #print(self.manufacture_date, self.stencil_number, self.revision, self.ZLNumber, self.material, self.manufacture_number, self.thickness, self.author)\n super().save(*args, **kwargs)\n mystring = f'{str(self.manufacture_date)},{self.stencil_number},{self.revision},{self.ZLNumber},{self.material},{self.manufacture_number},{self.thickness},{self.author}'\n\n print(mystring)\n\n\n try:\n stringSplit = mystring.split(\",\")\n\n\n dateofmanufacture = stringSplit[0]\n stencilNumber = stringSplit[1]\n revision = stringSplit[2]\n ZLNum = stringSplit[3]\n material = stringSplit[4]\n manuSN = stringSplit[5]\n thickness = stringSplit[6]\n\n\n mydict = {'DateofManufacturer': dateofmanufacture, 'StencilNumber': stencilNumber, 'Revision': revision, 'ZLNumber': ZLNum, 'Material': material, 'ManufacturerNumber': manuSN, 'Thickness': thickness}\n \n\n df = pd.DataFrame.from_dict(mydict, orient='index')\n df = df.transpose()\n print('Test_1')\n\n #SQL Connection Windows Authentication#\n\n Server = 'UKC-VM-SQL01'\n Database = 'ToolBank'\n Driver = 'ODBC Driver 17 for SQL Server'\n Database_con = f'mssql://@{Server}/{Database}?driver={Driver}'\n print('Test_2')\n\n engine = create_engine(Database_con)\n print('Test_2.5')\n con = engine.connect()\n print('Test_3')\n\n\n df.to_sql('Stencil_Bank', con, if_exists='append', index = False)\n print(f'STENCIL LOGGED TO SQL at {datetime.now()}')\n\n\n\n except Exception as exc:\n print(f'ERROR CONNECTING TO SQL:{exc}')\n","repo_name":"JamesB-lab/SCUBALOG_2","sub_path":"blog/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"41563747298","text":"import os\nimport pytest\nimport sqlite3\nfrom pathlib import Path\n\nfrom youtube_transcriber.datapipeline import DataPipeline\nfrom youtube_transcriber.datapipeline import create_hardcoded_data_pipeline\nfrom youtube_transcriber.preprocessing.youtubevideopreprocessor import YoutubeVideoPreprocessor\nfrom youtube_transcriber.loading.loaderiterator import LoaderIterator\nfrom youtube_transcriber.loading.serialization import JsonSerializer\nfrom youtube_transcriber.transforming.addtitletransform import AddTitleTransform\nfrom youtube_transcriber.transforming.adddescriptiontransform import AddDescriptionTransform\nfrom youtube_transcriber.transforming.whispertransform import WhisperTransform\nfrom youtube_transcriber.transforming.batchtransformer import BatchTransformer\nfrom youtube_transcriber.storing.sqlitebatchvideostorer import SQLiteBatchVideoStorer\nfrom youtube_transcriber.storing.sqlitecontextmanager import SQLiteContextManager\nfrom youtube_transcriber.storing.createdb import create_db\n\n@pytest.fixture\ndef expected_db_output():\n return [\n (\"Tquotes\",\n \"https://www.youtube.com/watch?v=NSkoGZ8J1Ag\",\n \"Steve Jobs quotes Bob Dylan\", \n \" Good morning. Good morning and welcome to Apple's 1984 annual shareholders meeting. I'd like to open the meeting with a part of an old poem about a 20-year-old poem by Dylan. That's Bob Dylan. Come writers and critics who prophesize with your pens and keep your eyes wide, the chance won't come again. And don't speak too soon for the wheels still in spin. And there's no telling who that it's naming. For the loser now will be later to win for the times they are a change in. Now.\"),\n (\"changminjen\",\n \"https://www.youtube.com/watch?v=Ak516vtDTEA\",\n \"My allegiance is to the Republic, to democracy!\", \n \" I have brought peace, freedom, justice and security to my new empire. Your new empire don't make me kill you. Anakin, my allegiance is to the Republic, to democracy! If you're not with me, then you're my enemy. Only a Sith deals an absolute.\")\n ]\n\n@pytest.fixture\ndef data_pipeline():\n loader_iterator = LoaderIterator(JsonSerializer(), 2)\n batch_transformer = BatchTransformer([AddTitleTransform(),\n AddDescriptionTransform(),\n WhisperTransform()])\n video_storer = SQLiteBatchVideoStorer()\n sqlite_context_manager = SQLiteContextManager(\"dummy.db\")\n return DataPipeline(loader_iterator,\n batch_transformer,\n video_storer,\n sqlite_context_manager)\n\ndef test_datapipeline_init():\n data_pipeline = DataPipeline(\"loader_iterator\",\n \"transformer\",\n \"storer\",\n \"context\")\n assert type(data_pipeline) == DataPipeline\n assert data_pipeline.loader_iterator == \"loader_iterator\"\n assert data_pipeline.batch_transformer == \"transformer\"\n assert data_pipeline.storer == \"storer\"\n assert data_pipeline.sqlite_context_manager == \"context\"\n \ndef test_process_files(data_pipeline, expected_db_output):\n test_folder = Path.home()/\"whisper_gpt_pipeline/youtube_transcriber/test\"\n files = [Path(test_folder/\"files/6.json\"), Path(test_folder/\"files/7.json\")]\n try:\n create_db(\"dummy.db\")\n connection = sqlite3.connect(\"dummy.db\")\n cursor = connection.cursor()\n \n data_pipeline.process(files)\n \n cursor.execute(\"SELECT CHANNEL_NAME, URL, TITLE, TRANSCRIPTION FROM VIDEO\")\n videos = cursor.fetchall()\n \n for i in range(len(videos)):\n assert videos[i][0] == expected_db_output[i][0]\n assert videos[i][1] == expected_db_output[i][1]\n assert videos[i][2] == expected_db_output[i][2]\n assert videos[i][3] == expected_db_output[i][3]\n finally:\n os.remove(\"dummy.db\")\n\ndef test_process_video_batch(data_pipeline, expected_db_output):\n video_data = [\n {\n \"channel_name\": \"Tquotes\",\n \"url\": \"https://www.youtube.com/watch?v=NSkoGZ8J1Ag\",\n },\n {\n \"channel_name\": \"changminjen\",\n \"url\": \"https://www.youtube.com/watch?v=Ak516vtDTEA\",\n }\n ]\n try:\n create_db(\"dummy.db\")\n connection = sqlite3.connect(\"dummy.db\")\n cursor = connection.cursor()\n\n data_pipeline._process_video_batch(cursor, video_data)\n\n cursor.execute(\"SELECT CHANNEL_NAME, URL, TITLE, TRANSCRIPTION FROM VIDEO\")\n videos = cursor.fetchall()\n\n for i in range(len(videos)):\n assert videos[i][0] == expected_db_output[i][0]\n assert videos[i][1] == expected_db_output[i][1]\n assert videos[i][2] == expected_db_output[i][2]\n assert videos[i][3] == expected_db_output[i][3]\n finally:\n os.remove(\"dummy.db\")\n \ndef test_hardcoded_data_pipeline_is_instantiated():\n data_pipeline = create_hardcoded_data_pipeline()\n assert type(data_pipeline) == DataPipeline ","repo_name":"juancopi81/youtube-transcriber","sub_path":"youtube_transcriber/test/test_datapipeline.py","file_name":"test_datapipeline.py","file_ext":"py","file_size_in_byte":5116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"6848683798","text":"#=================================================================Imports==================================================\r\nfrom tkinter import *\r\nimport tkinter as tttk\r\nfrom ttkthemes import themed_tk as tk\r\nfrom tkinter import ttk\r\nfrom tkinter import messagebox\r\nimport mysql.connector\r\n\r\n#===================================================================Functions=======================================================================\r\n\r\ndef update(rows):\r\n trv.delete(*trv.get_children())\r\n for i in rows:\r\n trv.insert('', 'end', values=i)\r\n\r\ndef search():\r\n import mysql.connector as c\r\n chk=0\r\n con=c.connect(host=\"localhost\",user=\"root\",passwd=\"sandy\",database=\"ebms\")\r\n cursor=con.cursor() \r\n srch2=srch.get()\r\n query = \"select customer.Cust_ID,customer.Customer_First_Name, customer.Customer_Last_Name, customer.Address_Line_1, customer.Address_Line_2, customer.Pincode, customer.Contact_Number, account.Account_ID, account.Account_Type, account.Meter_No, account.Cur_Meter_Reading, account.Prev_Meter_Reading from customer join account on customer.Cust_ID=account.Cust_ID WHERE customer.Cust_ID LIKE '%\"+srch2+\"%';\"\r\n cursor.execute(query)\r\n rows = cursor.fetchall()\r\n con.commit()\r\n update(rows)\r\n\r\ndef display():\r\n query= \"select customer.Cust_ID,customer.Customer_First_Name, customer.Customer_Last_Name, customer.Address_Line_1, customer.Address_Line_2, customer.Pincode, customer.Contact_Number, account.Account_ID, account.Account_Type, account.Meter_No, account.Cur_Meter_Reading, account.Prev_Meter_Reading from customer join account on customer.Cust_ID=account.Cust_ID;\"\r\n cursor.execute(query)\r\n rows=cursor.fetchall()\r\n update(rows)\r\n\r\ndef getrow(event):\r\n rowid = trv.identify_row(event.y)\r\n item = trv.item(trv.focus())\r\n t1.set(item['values'][0])\r\n t2.set(item['values'][1])\r\n t3.set(item['values'][2])\r\n t4.set(item['values'][3])\r\n t5.set(item['values'][4])\r\n t6.set(item['values'][5])\r\n t7.set(item['values'][6])\r\n t8.set(item['values'][7])\r\n t9.set(item['values'][8])\r\n t10.set(item['values'][9])\r\n t11.set(item['values'][10])\r\n t12.set(item['values'][11])\r\n\r\ndef update_customer():\r\n cusid = t1.get()\r\n fname = t2.get()\r\n lname = t3.get()\r\n adln1 = t4.get()\r\n adln2 = t5.get()\r\n pcode = t6.get()\r\n cnnum = t7.get()\r\n accid = t8.get()\r\n acctp = t9.get()\r\n mtrno = t10.get()\r\n cmtrd = t11.get()\r\n pmtrd = t12.get()\r\n\r\n if messagebox.askyesno(\"Confirm Updation\", \"Do you want to update Customer Details?\"):\r\n query = \"Update account set Account_ID = %s, Account_Type = %s, Meter_No = %s, Cur_Meter_Reading = %s, Prev_Meter_Reading = %s where Cust_ID = %s\"\r\n query2 = \"update customer set Customer_First_Name = %s, Customer_Last_Name = %s, Address_Line_1 = %s, Address_Line_2 = %s, Pincode = %s, Contact_Number = %s where Cust_ID = %s\"\r\n cursor.execute (query, (accid, acctp, mtrno, cmtrd, pmtrd, cusid))\r\n cursor.execute (query2, (fname, lname, adln1, adln2, pcode, cnnum, cusid))\r\n mydb.commit()\r\n display()\r\n else:\r\n return True\r\n \r\n\r\n\r\ndef add_new():\r\n cusid = t1.get()\r\n fname = t2.get()\r\n lname = t3.get()\r\n adln1 = t4.get()\r\n adln2 = t5.get()\r\n pcode = t6.get()\r\n cnnum = t7.get()\r\n accid = t8.get()\r\n acctp = t9.get()\r\n mtrno = t10.get()\r\n cmtrd = t11.get()\r\n pmtrd = t12.get()\r\n\r\n query = \"insert into customer (Cust_Id, Customer_First_Name, Customer_Last_Name, Address_Line_1, Address_Line_2, Pincode, Contact_Number) values (%s, %s, %s, %s, %s, %s, %s)\"\r\n query2 = \"insert into account (Account_ID, Cust_ID, Account_Type, Meter_No, Cur_Meter_Reading, Prev_Meter_Reading) values (%s, %s, %s, %s, %s, %s)\"\r\n cursor.execute(query, (cusid, fname, lname, adln1, adln2, pcode, cnnum))\r\n cursor.execute(query2, (accid, cusid, acctp, mtrno, cmtrd, pmtrd))\r\n mydb.commit()\r\n display()\r\n \r\n \r\n\r\ndef delete_customer():\r\n customer_id = t1.get()\r\n if messagebox.askyesno(\"Confirm Deletion?\",\"Do you want to delete Customer Detail?\"):\r\n query = \"delete from account where Cust_ID=\"+customer_id\r\n query2= \"delete from customer where Cust_ID=\"+customer_id\r\n cursor.execute(query)\r\n cursor.execute(query2)\r\n mydb.commit()\r\n display()\r\n else:\r\n return True\r\n\r\ndef clear():\r\n t1.set('')\r\n t2.set('')\r\n t3.set('')\r\n t4.set('')\r\n t5.set('')\r\n t6.set('')\r\n t7.set('')\r\n t8.set('')\r\n t9.set('')\r\n t10.set('')\r\n t11.set('')\r\n t12.set('')\r\n \r\n\r\ndef exitb():\r\n cexit=tttk.messagebox.askyesno('Exit Admin Editor?', 'CONFIRM IF YOU WANT TO EXIT')\r\n if cexit>0:\r\n root.destroy()\r\n return\r\n else:\r\n srch.focus()\r\n\r\n\r\ndef clearlst():\r\n trv.delete(*trv.get_children())\r\n srch.set('')\r\n \r\n \r\n \r\n \r\n \r\n#====================================================================Database Conn.========================================================= \r\n\r\nmydb = mysql.connector.connect (host=\"localhost\", user=\"root\", passwd=\"*********\", database=\"ebms\")\r\ncursor = mydb.cursor()\r\n#====================================================================Tkinter Frame============================================\r\nroot= tk.ThemedTk()\r\nroot.get_themes() \r\nroot.set_theme(\"radiance\")\r\nroot.state(\"zoomed\")\r\nroot.iconbitmap(r'E:\\Electricity Billing System\\icon.ico')\r\nwrapper1 = ttk.LabelFrame (root, text=\"Customer List\")\r\nwrapper2 = ttk.LabelFrame (root, text=\"Search\")\r\nwrapper3 = ttk.LabelFrame (root, text=\"Customer Data\")\r\n\r\n#===================================================StringVars===========================================\r\nsrch=StringVar()\r\nt1=StringVar()\r\nt2=StringVar()\r\nt3=StringVar()\r\nt4=StringVar()\r\nt5=StringVar()\r\nt6=StringVar()\r\nt7=StringVar()\r\nt8=StringVar()\r\nt9=StringVar()\r\nt10=StringVar()\r\nt11=StringVar()\r\nt12=StringVar()\r\n\r\n\r\nwrapper1.pack(fill=\"both\",expand=\"yes\", padx=20, pady=10)\r\nwrapper2.pack(fill=\"both\",expand=\"yes\", padx=20, pady=10)\r\nwrapper3.pack(fill=\"both\",expand=\"yes\", padx=20, pady=10)\r\n\r\ntrv = ttk.Treeview(wrapper1, columns=(1,2,3,4,5,6,7,8,9,10,11,12), show=\"headings\", height=\"5\")\r\ntrv.pack(side=LEFT)\r\ntrv.place(x=0, y=0)\r\n\r\ntrv.heading('#1', text=\"Customer ID\")\r\ntrv.heading('#2', text=\"Cust First Name\")\r\ntrv.heading('#3', text=\"Cust Last Name\")\r\ntrv.heading('#4', text=\"Address Line 1\")\r\ntrv.heading('#5', text=\"Address Line 2\")\r\ntrv.heading('#6', text=\"Pincode\")\r\ntrv.heading('#7', text=\"Contact Number\")\r\ntrv.heading('#8', text=\"Account ID\")\r\ntrv.heading('#9', text=\"Account Type\")\r\ntrv.heading('#10', text=\"Meter No\")\r\ntrv.heading('#11', text=\"Cur Meter Reading\")\r\ntrv.heading('#12', text=\"Prev Meter Reading\")\r\ntrv.column('#1', width=123, minwidth=150)\r\ntrv.column('#2', width=123, minwidth=150)\r\ntrv.column('#3', width=123, minwidth=150)\r\ntrv.column('#4', width=123, minwidth=150)\r\ntrv.column('#5', width=123, minwidth=150)\r\ntrv.column('#6', width=123, minwidth=150)\r\ntrv.column('#7', width=123, minwidth=150)\r\ntrv.column('#8', width=123, minwidth=150)\r\ntrv.column('#9', width=123, minwidth=150)\r\ntrv.column('#10', width=123, minwidth=150)\r\ntrv.column('#11', width=123, minwidth=150)\r\ntrv.column('#12', width=123, minwidth=150)\r\n\r\ntrv.bind('', getrow)\r\n\r\n#=======================================Vertical Scrollbar=========================================\r\nyscrollbar = ttk.Scrollbar(wrapper1, orient='vertical', command=trv.yview)\r\nyscrollbar.pack(side=RIGHT , fill=\"y\")\r\n\r\n\r\n\r\n#========================================Horizontal Scrollbar======================================\r\nxscrollbar = ttk.Scrollbar(wrapper1, orient='horizontal', command=trv.xview)\r\nxscrollbar.pack(side=BOTTOM , fill= BOTH)\r\n\r\ntrv.configure(yscrollcommand=yscrollbar.set, xscrollcommand=xscrollbar.set)\r\n\r\nquery= \"select customer.Cust_ID,customer.Customer_First_Name, customer.Customer_Last_Name, customer.Address_Line_1, customer.Address_Line_2, customer.Pincode, customer.Contact_Number, account.Account_ID, account.Account_Type, account.Meter_No, account.Cur_Meter_Reading, account.Prev_Meter_Reading from customer join account on customer.Cust_ID=account.Cust_ID;\"\r\ncursor.execute(query)\r\nrows = cursor.fetchall()\r\nupdate(rows)\r\n\r\n\r\n\r\n#==========================================Search Section============================================\r\n\r\nlbl = ttk.Label(wrapper2, text =\"Search Using Customer ID:\")\r\nlbl.pack(side=tttk.LEFT, padx=10)\r\nent = ttk.Entry(wrapper2, textvariable = srch)\r\nent.pack(side=tttk.LEFT, padx=6)\r\nbtn = ttk.Button(wrapper2, text=\"Search\", command = search)\r\nbtn.pack(side=tttk.LEFT, padx=6)\r\ndbtn = ttk.Button(wrapper2, text=\"Display All\", command = display)\r\ndbtn.pack(side=tttk.LEFT, padx=6)\r\nccbtn= ttk.Button(wrapper2, text=\"Clear List\", command = clearlst)\r\nccbtn.pack(side=tttk.LEFT, padx=6)\r\n\r\n\r\n#================================================User Data==================================================\r\nlbl1 =ttk.Label(wrapper3, text=\"Customer ID\")\r\nlbl1.grid(row=0, column=1, padx=5, pady=3)\r\nent1 = ttk.Entry(wrapper3, textvariable= t1)\r\nent1.grid(row=0, column=2, padx=5, pady=3)\r\n\r\nlbl2 =ttk.Label(wrapper3, text=\"Customer First Name\")\r\nlbl2.grid(row=1, column=1, padx=5, pady=3)\r\nent2 = ttk.Entry(wrapper3, textvariable= t2)\r\nent2.grid(row=1, column=2, padx=5, pady=3)\r\n\r\nlbl3 =ttk.Label(wrapper3, text=\"Customer Last Name\")\r\nlbl3.grid(row=2, column=1, padx=5, pady=3)\r\nent3 = ttk.Entry(wrapper3, textvariable= t3)\r\nent3.grid(row=2, column=2, padx=5, pady=3)\r\n\r\nlbl4 =ttk.Label(wrapper3, text=\"Address Line 1\")\r\nlbl4.grid(row=3, column=1, padx=5, pady=3)\r\nent4 = ttk.Entry(wrapper3, textvariable= t4)\r\nent4.grid(row=3, column=2, padx=5, pady=3)\r\n\r\nlbl5 =ttk.Label(wrapper3, text=\"Address Line 2\")\r\nlbl5.grid(row=4, column=1, padx=5, pady=3)\r\nent5 = ttk.Entry(wrapper3, textvariable= t5)\r\nent5.grid(row=4, column=2, padx=5, pady=3)\r\n\r\nlbl6 =ttk.Label(wrapper3, text=\"Pincode\")\r\nlbl6.grid(row=5, column=1, padx=5, pady=3)\r\nent6 = ttk.Entry(wrapper3, textvariable= t6)\r\nent6.grid(row=5, column=2, padx=5, pady=3)\r\n\r\nlbl7 =ttk.Label(wrapper3, text=\"Contact Number\")\r\nlbl7.grid(row=6, column=1, padx=5, pady=3)\r\nent7 = ttk.Entry(wrapper3, textvariable= t7)\r\nent7.grid(row=6, column=2, padx=5, pady=3)\r\n\r\nlbl8 =ttk.Label(wrapper3, text=\"Account ID\")\r\nlbl8.grid(row=7, column=1, padx=5, pady=3)\r\nent8 = ttk.Entry(wrapper3, textvariable= t8)\r\nent8.grid(row=7, column=2, padx=5, pady=3)\r\n\r\nlbl9 =ttk.Label(wrapper3, text=\"Account Type\")\r\nlbl9.grid(row=8, column=1, padx=5, pady=3)\r\nent9 = ttk.Entry(wrapper3, textvariable= t9)\r\nent9.grid(row=8, column=2, padx=5, pady=3)\r\n\r\nlbl10 =ttk.Label(wrapper3, text=\"Meter Number\")\r\nlbl10.grid(row=9, column=1, padx=5, pady=3)\r\nent10 = ttk.Entry(wrapper3, textvariable= t10)\r\nent10.grid(row=9, column=2, padx=5, pady=3)\r\n\r\nlbl11 =ttk.Label(wrapper3, text=\"Current Meter Reading\")\r\nlbl11.grid(row=10, column=1, padx=5, pady=3)\r\nent11 = ttk.Entry(wrapper3, textvariable= t11)\r\nent11.grid(row=10, column=2, padx=5, pady=3)\r\n\r\nlbl12 =ttk.Label(wrapper3, text=\"Previous Meter Reading\")\r\nlbl12.grid(row=11, column=1, padx=5, pady=3)\r\nent12 = ttk.Entry(wrapper3, textvariable= t12)\r\nent12.grid(row=11, column=2, padx=5, pady=3)\r\n\r\nup_btn = ttk.Button(wrapper3, text=\"Update\", command = update_customer)\r\nadd_btn = ttk.Button(wrapper3, text=\"Add New\", command= add_new)\r\ndel_btn = ttk.Button(wrapper3, text=\"Delete\", command= delete_customer)\r\ncbtn = ttk.Button(wrapper3, text=\"Clear\", command = clear)\r\nexbtn = ttk.Button(wrapper3, text =\"Exit\", command = exitb)\r\n\r\nadd_btn.grid(row=13, column=0, padx=5, pady=3)\r\nup_btn.grid(row=13, column=1, padx=5, pady=3)\r\ndel_btn.grid(row=13, column=2, padx=5, pady=3)\r\ncbtn.grid(row=13, column=3, padx=5, pady=3)\r\nexbtn.grid(row=13, column=4, padx=5, pady=3)\r\n\r\n\r\n\r\nroot.title(\"Admin Page\")\r\nroot.geometry(\"1500x1000\")\r\nroot.mainloop()\r\n \r\n","repo_name":"volstice/Electricity-Bill-Management-Sys","sub_path":"Adminpage.py","file_name":"Adminpage.py","file_ext":"py","file_size_in_byte":11900,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"}
+{"seq_id":"44266199306","text":"# Ex_1987 알파벳 [골4]\n\ndef stoi(s):\n return ord(s) - ord('A')\n\n\ndef isPromising(x, y):\n # 좌표의 범위\n if 0 <= x < R and 0 <= y < C:\n # 방문했으면 False\n return not visited[stoi(board[x][y])]\n return False\n\n\ndef dfs(x, y, depth):\n ans[0] = max(ans[0], depth)\n # 종료 조건\n if ans[0] == stoi('Z') + 1:\n return\n\n for dx, dy in (0, 1), (0, -1), (-1, 0), (1, 0):\n nx, ny = x + dx, y + dy\n if isPromising(nx, ny):\n visited[stoi(board[nx][ny])] = True\n dfs(nx, ny, depth + 1)\n visited[stoi(board[nx][ny])] = False\n\n\nR, C = map(int, input().split())\nboard = list(input() for _ in range(R))\nvisited = [False] * (stoi('Z') + 1)\n\nans = [0]\nvisited[stoi(board[0][0])] = True\ndfs(0, 0, 1)\n\nprint(*ans)\n","repo_name":"Cha-Ji/Algorithm","sub_path":"Backjoon/old/21_04_str_backT/21_04_02_backtrack/1987.py","file_name":"1987.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"13822221057","text":"# chart/urls.py\nfrom django.contrib import admin\nfrom django.urls import path\nfrom chart import views # !!!\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('ticket-class/',\n views.ticket_class_view, name='ticket_class_view'),\n path('world-population/',\n views.world_population, name='world_population'), # !!!\n path('covid_cases/',\n views.covid_cases, name='covid_cases'),\n path('covid_cases_per_capita/',\n views.covid_cases_per_capita, name='covid_cases_per_capita'),\n path('admin/', admin.site.urls),\n]\n","repo_name":"serin0911/h_chart","sub_path":"chart/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"71570874926","text":"import asyncio\nimport aiohttp\nfrom datetime import datetime, timedelta\nfrom pynws import SimpleNWS, NwsError\nfrom textwrap import TextWrapper\nfrom tkinter import *\nfrom tkinter.font import Font\nfrom tkinter import ttk\n\nLOCATION = (48.1829, -117.0607)\nSTATION = 'ITDA8' # Old Town\nUSERID = \"kmhall@gmail.com\"\nxMax = 80\nREFRESH_RATE = 5 * 60 * 1000 # 5 minutes\n\nnws = None\nmyWrap = None\n\nroot = None\nstatusContents = None\nforecastText = None\n\n\ndef getWindDirection(windDirection):\n if (windDirection > 315.0+1.0):\n return \"NNW\"\n if (windDirection > 292.5+1.0):\n return \"NW\"\n if (windDirection > 270.0+1.0):\n return \"WNW\"\n if (windDirection > 247.5+1.0):\n return \"W\"\n if (windDirection > 225.0+1.0):\n return \"WSW\"\n if (windDirection > 202.5+1.0):\n return \"SW\"\n if (windDirection > 180.0+1.0):\n return \"SSW\"\n if (windDirection > 157.5+1.0):\n return \"S\"\n if (windDirection > 135.0+1.0):\n return \"SSE\"\n if (windDirection > 112.5+1.0):\n return \"SE\"\n if (windDirection > 90.0+1.0):\n return \"ESE\"\n if (windDirection > 67.5+1.0):\n return \"E\"\n if (windDirection > 45.0+1.0):\n return \"ENE\"\n if (windDirection > 22.5+1.0):\n return \"NE\"\n if (windDirection > 0.0+1.0):\n return \"NNE\"\n return \"N\"\n\n\ndef decorateForecast():\n # set up tags\n for tag in forecastText.tag_names():\n forecastText.tag_delete(tag)\n\n myfont = Font(font=forecastText['font']).copy()\n myfont.configure(weight='bold')\n forecastText.tag_configure(\n 'boldline', font=myfont, foreground='white', background='blue')\n forecastText.tag_configure(\n 'alertline', font=myfont, foreground='white', background='red')\n\n # bold tag for Observation: and Forecast:\n loc = '1.0'\n idxStart = forecastText.search('Observation:', loc)\n if idxStart != '':\n idxEnd = idxStart + \" wordend\"\n forecastText.tag_add('boldline', idxStart, idxEnd)\n\n # bold tag for Forecast:\n loc = '1.0'\n idxStart = forecastText.search('Forecast:', loc)\n if idxStart != '':\n idxEnd = idxStart + \" wordend\"\n forecastText.tag_add('boldline', idxStart, idxEnd)\n\n # alert tag for Alert:\n loc = '1.0'\n idxStart = forecastText.search('Alert:', loc)\n if idxStart != '':\n idxEnd = idxStart + \" wordend\"\n forecastText.tag_add('alertline', idxStart, idxEnd)\n\n\ndef observationToText(obs):\n obs_time = datetime.fromisoformat(obs['timestamp']).astimezone()\n obs_temp = 9 * obs['temperature'] / 5.0 + 32.0\n obs_speed = obs['windSpeed'] * 0.6213712\n obs_direction = getWindDirection(obs['windDirection'])\n\n obs_gust = None\n if obs['windGust']:\n obs_gust = obs['windGust'] * 0.6213712\n\n obs_humidity = obs['relativeHumidity']\n lastFetch = datetime.now()\n\n line = \"Observation: \"\n line += f\"{obs_time.strftime('%A %I:%M%p')} \"\n line += f\"Fetched {lastFetch.strftime('%I:%M%p')}\"\n line += '\\n'\n\n line += f\" Temp: {obs_temp:.1f} F \"\n line += f\"Humidity: {obs_humidity:.0f} %\"\n line += f\" Wind: {obs_direction} {obs_speed:.1f} mph\"\n if obs_gust:\n line += \", gusts to {obs_gust:.1f} mph\"\n line += '\\n'\n\n return line\n\n\ndef forecastToText(fc):\n fc_start = datetime.fromisoformat(fc['startTime']).astimezone()\n fc_end = datetime.fromisoformat(fc['endTime']).astimezone()\n fc_name = fc['name']\n fc_shorttext = fc['shortForecast']\n fc_detailed = fc['detailedForecast']\n\n line = \"\"\n if fc_start.day == fc_end.day:\n line += f\" From {fc_start.strftime('%A %I:%M%p')} to {fc_end.strftime('%I:%M%p')}\"\n else:\n line += f\" From {fc_start.strftime('%A %I:%M%p')} to {fc_end.strftime('%A %I:%M%p')}\"\n line += '\\n'\n\n line += f\" {fc_name}: {fc_shorttext} \"\n line += '\\n'\n\n for ll in myWrap.wrap(fc_detailed):\n if ll:\n line += ' ' + f\"{ll.strip()}\"\n line += '\\n'\n return line\n\n\ndef alertToText(alert):\n line = \"\"\n if alert:\n line += \"Alerts:\\n\"\n for stmt in alert:\n msg = stmt['messageType']\n event = stmt['event']\n start = datetime.fromisoformat(stmt['effective']).astimezone()\n stop = datetime.fromisoformat(stmt['expires']).astimezone()\n line += f\" {msg}: {event}\" + \" \"*20\n if start.day == stop.day:\n line += f\"({start.strftime('%A %I:%M%p')} to {stop.strftime('%I:%M%p')})\"\n else:\n line += f\"({start.strftime('%A %I:%M%p')} to {stop.strftime('%A %I:%M%p')})\"\n line += '\\n'\n headline = stmt['parameters']['NWSheadline']\n for hl in myWrap.wrap(\"\".join(headline)):\n line += f\" {hl}\\n\"\n line += '\\n'\n else:\n line += \"No Alerts\\n\"\n\n return line\n\n\ndef getNwsText():\n global nws\n tt = \"No forecast available...\"\n\n if nws is not None:\n tt = \"\"\n tt += observationToText(nws.observation) + \"\\n\"\n\n tt += \"Forecast: \\n\"\n for ff in nws.forecast[0:2]:\n tt += forecastToText(ff) + \"\\n\"\n\n tt += alertToText(nws.alerts_forecast_zone) + \"\\n\"\n\n return tt\n\n\nasync def fetchNws():\n global nws, forecastText, statusContents, root\n statusContents.set('Fetching forecast...')\n try:\n async with aiohttp.ClientSession() as session:\n nws = SimpleNWS(*LOCATION, USERID, session)\n await nws.set_station(STATION)\n await nws.update_observation()\n await nws.update_forecast()\n await nws.update_alerts_forecast_zone()\n forecastText.delete('1.0', 'end')\n forecastText.insert('end', getNwsText())\n decorateForecast()\n statusContents.set('Complete!')\n except Exception as e:\n statusContents.set('Error')\n forecastText.delete('1.0', 'end')\n forecastText.insert('1.0', e)\n\n\ndef getNwsForecast(*args):\n global root\n try:\n loop = asyncio.get_event_loop()\n loop.run_until_complete(fetchNws())\n except Exception as e:\n print(f\"Exception: {e}\")\n root.after(REFRESH_RATE, getNwsForecast)\n\n\ndef main():\n global statusContents, forecastText, nws, myWrap, root\n\n myWrap = TextWrapper(width=xMax - 10)\n\n root = Tk()\n root.title(\"Demo NWS\")\n frm = ttk.Frame(root, padding='10 10 10 10')\n frm.grid()\n\n statusContents = StringVar()\n\n statusLabel = ttk.Label(frm, textvariable=statusContents)\n statusLabel.grid(column=0, row=0)\n statusContents.set('Hello, World!')\n\n quitButton1 = ttk.Button(frm, text=\"Quit\", command=root.destroy)\n quitButton1.grid(column=2, row=0)\n\n nwsButton = ttk.Button(frm, text=\"NWS\", command=getNwsForecast)\n nwsButton.grid(column=1, row=0)\n\n forecastText = Text(frm, width=85, height=20,\n wrap='none', fg='white', bg='black', font='LucidaConsole')\n ys = ttk.Scrollbar(frm, orient='vertical', command=forecastText.yview)\n forecastText.grid(column=0, row=1, columnspan=3, sticky='nwse')\n forecastText['state'] = 'normal'\n forecastText['yscrollcommand'] = ys.set\n ys.grid(column=3, row=1, sticky='nse')\n\n # after 5 seconds for the window to be established, fetch the NWS info\n root.after(5000, getNwsForecast)\n root.mainloop()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"grumble1965/advent2015","sub_path":"tkdemo.py","file_name":"tkdemo.py","file_ext":"py","file_size_in_byte":7369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"18228696701","text":"from struct import pack\nfrom tkinter import *\nroot=Tk()\nroot.geometry(\"700x450\")\nf=Frame(root, bg=\"grey\", borderwidth=3, relief=SUNKEN, padx=9, pady=3)\nf.pack(side=LEFT, fill=\"y\")\nf1=Frame(root, bg=\"grey\", borderwidth=3, relief=SUNKEN, padx=9, pady=3)\nf1.pack(side=TOP, fill=\"x\")\nLabel(f,text=\"Editor\",bg=\"grey\",fg=\"white\",padx=9).pack()\nLabel(f1,text=\"Welcome To VS Code 2.0\", bg=\"grey\", fg=\"white\",padx=9).pack()\nroot.mainloop()\n","repo_name":"ashishalf/Python-Tkinter","sub_path":"frame.py","file_name":"frame.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"4343247567","text":"import sys\nN = 15\nn = int(input())\nvs_col, vs_deg, vs_un_deg = [False] * N, [False] * 2 * N, [False] * 2 * N\npath, cnt = [0] * n, 0\nsys.setrecursionlimit(10000000)\n\n\ndef notOk(x, y):\n return vs_col[y] or vs_deg[y - x + n] or vs_un_deg[y + x]\n\n\ndef dfs(x):\n global cnt\n if x >= n:\n cnt += 1\n if cnt <= 3: print(*map(lambda x: x + 1, path))\n return\n for y in range(n):\n if notOk(x, y): continue\n # try put queue in every col -> y\n path[x] = y\n vs_col[y] = vs_deg[y - x + n] = vs_un_deg[y + x] = True\n dfs(x + 1)\n vs_col[y] = vs_deg[y - x + n] = vs_un_deg[y + x] = False\n\n\ndfs(0)\nprint(cnt)\n","repo_name":"su-Pro/basic_coding_practice","sub_path":"843. n-皇后问题.py","file_name":"843. n-皇后问题.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"12662446297","text":"#!/usr/bin/python3\n#coding=utf-8\n#author: cody\n\nfrom ManifestEditor.BaseChunk import BaseChunk\nfrom ManifestEditor.TypedValue import TypedValue\nfrom ManifestEditor.Util import *\n\nclass XmlContentChunk(BaseChunk):\n START_NAMESPACE = \"0x00100100\"\n END_NAMESPACE = \"0x00100101\"\n START_TAG = \"0x00100102\"\n END_TAG = \"0x00100103\"\n TEXT = \"0x00100104\"\n def __init__(self, all_bytes, offset):\n cursor = offset\n all_chunk_size = len(all_bytes)\n self.chunk_arr = []\n self._tag_chunk_arr = {}\n self.chunk_size = 0\n while (cursor < all_chunk_size):\n chunk_type = bytes_to_hex(all_bytes[cursor: cursor + 4])\n chunk = None\n if chunk_type == self.START_NAMESPACE: \n chunk = NamespaceChunk(all_bytes, cursor)\n elif chunk_type == self.END_NAMESPACE:\n chunk = NamespaceChunk(all_bytes, cursor)\n elif chunk_type == self.START_TAG:\n chunk = TagChunk(all_bytes, cursor)\n if not self._tag_chunk_arr.get(chunk.name):\n self._tag_chunk_arr[chunk.name] = []\n self._tag_chunk_arr[chunk.name].append(chunk)\n chunk.parse_attribute()\n elif chunk_type == self.END_TAG:\n chunk = TagChunk(all_bytes, cursor)\n elif chunk_type == self.TEXT:\n chunk = TextChunk(all_bytes, cursor)\n else:\n break\n self.chunk_size = self.chunk_size + chunk.chunk_size\n self.chunk_arr.append(chunk)\n cursor = chunk.end_offset\n\n def find_tag_chunk(self, tag_name, attr_key = None, attr_value = None):\n if tag_name and self._tag_chunk_arr.get(tag_name):\n if attr_key:\n for chunk in self._tag_chunk_arr[tag_name]:\n if chunk.query_attribute(attr_key, attr_value):\n return chunk\n else:\n return self._tag_chunk_arr[tag_name][0]\n \nclass NamespaceChunk(BaseChunk):\n def __init__(self, all_bytes, offset):\n BaseChunk.__init__(self, all_bytes, offset)\n \n reader = create_reader(self.byte_arr, 8)\n\n self.line_number = bytes_to_int(reader(4))\n\n # unknow 4bytes\n reader(4)\n\n self.prefix = bytes_to_int(reader(4))\n self.uri = bytes_to_int(reader(4))\n\n\nclass TagChunk(BaseChunk):\n def __init__(self, all_bytes, offset):\n BaseChunk.__init__(self, all_bytes, offset)\n\n reader = create_reader(self.byte_arr, 8)\n\n self.line_number = bytes_to_int(reader(4))\n \n # unknow 4bytes\n reader(4)\n\n self.namespace_uri = bytes_to_int(reader(4))\n self.name = TypedValue.convertToString(bytes_to_int(reader(4)))\n\n def parse_attribute(self):\n # 前6个字节已经在构造函数中解析过了\n reader = create_reader(self.byte_arr, 4*6)\n self.flags = reader(4)\n self.attribute_count = bytes_to_int(reader(4))\n self.class_attribute = reader(4)\n\n # print(\"attr count:\", self.attribute_count)\n count = 0\n \"\"\"\n 每个属性固定 20 个字节,包含 5 个字段,每个字段都是 4 字节无符号 int,各个字段含义如下:\n namespaceUri : 属性的命名空间 uri 在字符串池中的索引。此处很少会等于 -1\n name : 属性名称在字符串池中的索引\n valueStr : 属性值\n type : 属性类型\n data : 属性数据\n \"\"\"\n key_arr = [\"namespaceUri\", \"name\", \"valueStr\", \"type\", \"data\"]\n self.attr_arr = []\n while(count < self.attribute_count):\n attr_dict = {}\n for i in range(5):\n b = reader(4)\n value = bytes_to_int(b)\n if i == 0: # namespaceUri\n attr_dict[key_arr[i]] = TypedValue.convertToString(value)\n elif i == 1: # name\n attr_dict[key_arr[i]] = TypedValue.convertToString(value)\n elif i == 2: # valueStr\n attr_dict[key_arr[i]] = value\n elif i == 3: # type 这里为什么要右移24?\n attr_dict[key_arr[i]] = value >> 24\n elif i == 4: # data\n attr_dict[key_arr[i]] = TypedValue.coerceToString(attr_dict[\"type\"], value)\n attr_dict[\"data_orig\"] = value\n else:\n pass\n self.attr_arr.append(attr_dict)\n attr_dict[\"order\"] = count\n count = count + 1\n\n def query_attribute(self, attr_key, attr_value):\n for attr in self.attr_arr:\n if attr[\"name\"] == attr_key:\n if attr_value == None or attr_value == attr[\"data\"]:\n return attr\n else:\n return None\n return None\n\n def get_attr_value(self, attr_key):\n attr_obj = self.query_attribute(attr_key, None)\n return attr_obj[\"data\"]\n\n def modify(self, attr_key, attr_value):\n attr_dict = self.query_attribute(attr_key, None)\n # 如果是字符串类型只需要用索引去修改对应的StringChunk\n if attr_dict[\"type\"] == TypedValue.TYPE_STRING:\n attr_dict[\"data\"] = attr_value\n return True, attr_dict[\"data_orig\"]\n else:\n # 偏移了 4 * 9 个字节\n # # index 第几个属性\n base_offset = 4*9 + 4*4\n t = type(attr_value)\n b = None\n # 判断 attr_value 的类型\n # 先把值转换成对应的bytes\n # 找到对应的位置替换\n if t == int:\n b = int_to_bytes(attr_value)\n elif t == float:\n b = float_to_bytes(attr_value)\n self.replace_bytes(b, base_offset + attr_dict[\"order\"] * 4 * 5)\n return False, None\n\n def print_info(self):\n for attr in self.attr_arr:\n for k in attr:\n print(\" \", k, attr[k])\n print(\"====\")\n\n\n\n\nclass TextChunk(BaseChunk):\n def __init__(self, all_bytes, offset):\n BaseChunk.__init__(self, all_bytes, offset)\n\n\n reader = create_reader(self.byte_arr, 8)\n\n self.line_number = bytes_to_int(reader(4))\n \n # unknow 4bytes\n reader(4)\n\n self.name = bytes_to_int(reader(4))","repo_name":"CodyGit/ManifestEditor","sub_path":"ManifestEditor/XmlContentChunk.py","file_name":"XmlContentChunk.py","file_ext":"py","file_size_in_byte":6403,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"2"}
+{"seq_id":"18235510471","text":"file = open(\"./add.c\", 'r')\nlines = file.readlines()\n\nkeywords = [\"void\", \"main\", \"int\", \"float\", \"bool\", \"if\", \"for\", \"else\", \"while\", \"char\", \"return\"]\noperators = [\"=\", \"==\", \"+\", \"-\", \"*\", \"/\", \"++\", \"--\", \"+=\", \"-=\", \"!=\", \"||\", \"&&\"]\npunctuations= [\";\", \"(\", \")\", \"{\", \"}\", \"[\", \"]\"]\n\ndef is_int(x):\n try:\n int(x)\n return True\n except:\n return False\n\nfor line in lines:\n for i in line.strip().split(\" \"):\n if i in keywords:\n print (i, \" is a keyword\")\n elif i in operators:\n print (i, \" is an operator\")\n elif i in punctuations:\n print (i, \" is a punctuation\")\n elif is_int(i):\n print (i, \" is a number\")\n else:\n print (i, \" is an identifier\")","repo_name":"shushrutsharma/18CSC304J-CD","sub_path":"1-Lexical Analyser/lexicalAnalyser.py","file_name":"lexicalAnalyser.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":92,"dataset":"github-code","pt":"2"}
+{"seq_id":"568542732","text":"import threading\nimport os\nfrom collections import deque\n\nclass SegmentTree:\n\n def __init__(self, data, combine_fn=lambda x,y: x+y, default_leaf_fn=lambda x: x, default_node_val=0):\n \"\"\"Create the segment tree for array data. Complexity: O(n).\"\"\"\n self._combine_fn = combine_fn\n self._default_leaf_fn = default_leaf_fn\n self._default_node_val = default_node_val\n\n self._len = len(data)\n self._size = _size = 1 << (self._len - 1).bit_length()\n\n self.data = [self._default_node_val] * (2 * self._size)\n self.data[self._size:self._size + self._len] = list(map(default_leaf_fn, data))\n for idx in range(self._size-1, 0, -1):\n # self.data[idx] = combine_fn(self.data[self._left(idx)], self.data[self._right(idx)])\n self.data[idx] = combine_fn(self.data[2 * idx], self.data[2 * idx + 1])\n\n # def _parent(self, idx):\n # return idx >> 1\n #\n # def _left(self, idx):\n # return 2 * idx\n #\n # def _right(self, idx):\n # return 2 * idx + 1\n\n def __getitem__(self, idx):\n return self.data[idx + self._size]\n\n def __setitem__(self, idx, value):\n idx += self._size\n self.data[idx] = self._default_leaf_fn(value)\n # self._fix_up_to_root(self._parent(idx))\n self._fix_up_to_root(idx >> 1)\n\n def __len__(self):\n return self._len\n\n def _fix_up_to_root(self, idx):\n \"\"\"Computes the value of each internal node, from the given one up to the root.\"\"\"\n combine_fn = self._combine_fn\n while idx >= 1:\n # self.data[idx] = combine_fn(self.data[self._left(idx)], self.data[self._right(idx)])\n self.data[idx] = combine_fn(self.data[2 * idx], self.data[2 * idx + 1])\n # idx = self._parent(idx)\n idx = idx >> 1\n\n def get_data(self):\n \"\"\"Returns a list of the values stored in the array\"\"\"\n return self.data[self._size:self._size + self._len]\n\n def update(self, idx, x):\n \"\"\"Update the element at index i to += x. Complexity: O(log n)\"\"\"\n idx += self._size\n self.data[idx] += x\n # self._fix_up_to_root(self._parent(idx))\n self._fix_up_to_root(idx >> 1)\n\n def _query(self, left, right, idx, lx, rx):\n \"\"\"Query subroutine: given node idx covering segment [lx, rx], query for segment [left, right]\"\"\"\n if right < lx or left > rx:\n return self._default_node_val # node's interval completely out of query interval\n elif left <= lx and rx <= right:\n return self.data[idx] # node's interval completely contained in query interval\n else:\n mid = (lx + rx) // 2\n # res_left = self._query(left, right, self._left(idx), lx, mid)\n res_left = self._query(left, right, 2 * idx, lx, mid)\n # res_right = self._query(left, right, self._right(idx), mid+1, rx)\n res_right = self._query(left, right, 2 * idx + 1, mid + 1, rx)\n return self._combine_fn(res_left, res_right)\n\n def query(self, left, right):\n \"\"\"Returns the sum of all the elements from index left to index right (inclusive). Complexity: O(log n)\"\"\"\n return self._query(left, right, 1, 0, self._size-1)\n\n def __repr__(self):\n return \"SegmentTree({0})\".format(self.data)\n\n\n\n# x and y - (sum, pref, suf, max_sum)\ndef combine(x, y):\n s = x[0] + y[0]\n pref = max(x[1], x[0]+y[1])\n suf = max(y[2], y[0]+x[2])\n max_sum = max(x[3], y[3], x[2]+y[1])\n return s, pref, suf, max_sum\n\n\nclass MaxSumSegmentTree(SegmentTree):\n\n def __init__(self, data):\n SegmentTree.__init__(self, data,\n combine_fn=combine,\n default_leaf_fn=lambda x: (x, max(x, 0), max(x, 0), max(x, 0)),\n default_node_val=(0, 0, 0, 0)\n )\n\nii = 0\n_inp = b''\n\ndef fast_num_reader():\n def read_char():\n global ii, _inp\n if ii >= len(_inp):\n _inp = os.read(0, 100000)\n # gc.collect()\n ii = 0\n if not _inp:\n return b''\n ii += 1\n return _inp[ii - 1]\n\n def read_int():\n c = read_char()\n if c == b'':\n return None\n if c == b'-'[0]:\n x = 0\n sign = 1\n else:\n x = c - b'0'[0]\n sign = 0\n c = read_char()\n while c >= b'0'[0]:\n x = 10 * x + c - b'0'[0]\n c = read_char()\n if c == b'\\r'[0]:\n read_char()\n return -x if sign else x\n\n while True:\n n = read_int()\n yield n\n if n is None:\n break\n\n\nimport io\nq = deque()\nout = io.StringIO()\n\nclass ProducerThread(threading.Thread):\n def __init__(self, group=None, target=None, name=None,\n args=(), kwargs=None, verbose=None):\n super(ProducerThread,self).__init__()\n self.target = target\n self.name = name\n\n def run(self):\n reader = fast_num_reader()\n for item in iter(reader):\n # print(item)\n q.append(item)\n\n\nclass ConsumerThread(threading.Thread):\n def __init__(self, group=None, target=None, name=None,\n args=(), kwargs=None, verbose=None):\n super(ConsumerThread,self).__init__()\n self.target = target\n self.name = name\n return\n\n def queue_reader(self):\n while True:\n if len(q) > 0:\n n = q.popleft()\n if n is None:\n break\n yield n\n else:\n out.write('x')\n\n def run(self):\n reader = self.queue_reader()\n\n n = next(reader)\n m = next(reader)\n\n a = [0] * n\n st = MaxSumSegmentTree(a)\n\n for i in range(n):\n st[i] = next(reader)\n\n out.write(str(st.query(0, n - 1)[3])+\"\\n\")\n\n for _ in range(m):\n i = next(reader)\n v = next(reader)\n st[i] = v\n out.write(str(st.query(0, n - 1)[3])+\"\\n\")\n\n\nif __name__ == '__main__':\n p = ProducerThread(name='producer')\n c = ConsumerThread(name='consumer')\n\n p.start()\n c.start()\n\n\n p.join()\n c.join()\n\n print(out.getvalue())","repo_name":"x3mka/code-contests-python","sub_path":"codeforces/edu/c273278a/tt.py","file_name":"tt.py","file_ext":"py","file_size_in_byte":6264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"21349564582","text":"\"\"\"\nThis script contains the main code to run the companion demonstrator\n\"\"\"\nfrom concurrent.futures import ThreadPoolExecutor, wait\nimport time\nimport random\n\n# common\nclass SharedInfo():\n\n def __init__(self):\n\n self.currentTime = 0\n self.a = [None, None, None, None]\n self.finished = [False, False]\n return\n \n def run(self):\n print(\"starting SharedInfo.run()\")\n while sum(self.finished) < 2:\n print(\"sharedInfo run step\")\n print(self.a)\n time.sleep(2)\n\n print(\"finishing SharedInfo.run()\")\n return\n\n# video feeds\nclass GlassesFeed():\n\n def __init__(self, sharedInfo):\n\n self.sharedInfo = sharedInfo\n return\n \n def run(self):\n print(\"starting GlassesFeed.run()\")\n\n for i in range(10):\n\n # generate random number\n r = random.randint(10,20)\n idx = random.randint(0,3)\n #print(\"glassesFeed step: %d\" %r)\n\n self.sharedInfo.a[idx] = r\n time.sleep(1)\n\n self.sharedInfo.finished[0] = True\n print(\"finishing GlassesFeed.run()\")\n return\n\nclass RobotFeed():\n\n def __init__(self, sharedInfo):\n\n self.sharedInfo = sharedInfo\n return\n \n def run(self):\n print(\"starting RobotFeed.run()\")\n for i in range(10):\n\n # generate random number\n r = random.randint(21,30)\n idx = random.randint(0,3)\n #print(\"robotFeed step: %d\" %r)\n self.sharedInfo.a[idx] = r\n time.sleep(1)\n\n self.sharedInfo.finished[1] = True\n print(\"finishing RobotFeed.run()\")\n return\n \n# deep models\nclass RDM():\n\n def __init__(self):\n\n return \nclass ASR():\n\n def __init__(self):\n\n return \nclass IntentionsDetector():\n\n def __init__(self):\n\n return\n \n\n\nif __name__ == '__main__':\n\n sharedInfo = SharedInfo()\n glassesFeed = GlassesFeed(sharedInfo)\n robotFeed = RobotFeed(sharedInfo)\n\n # Create a ThreadPoolExecutor\n with ThreadPoolExecutor(max_workers=3) as executor:\n \n future1 = executor.submit(glassesFeed.run)\n future2 = executor.submit(robotFeed.run)\n future3 = executor.submit(sharedInfo.run)\n\n # Wait for all tasks to complete\n wait([future1, future2, future3])\n","repo_name":"SilviaAbal/N_STEP_DQN_COMPANION","sub_path":"liveDemo.py","file_name":"liveDemo.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"27122963716","text":"import time\nimport json\nfrom slackclient import SlackClient\nfrom peewee import *\nimport threading as th\nimport requests\nimport pprint\nimport stellarnetwork\nimport db_explore as db\npp = pprint.PrettyPrinter(indent=4)\nfrom collections import defaultdict\nimport configparser\n\n\nimport stellarnetwork\n\n\nclass Bot1(object):\n\n def __init__(self,token):\n self.client = SlackClient(token)\n #self.user = 'U1KFBF9SN'\n self.user = 'U1N703XRP'\n #self.user = \n self.s = stellarnetwork.StellarNetwork()\n\n def _send_to_slack(self,msg):\n #self.client.api_call(\"chat.postMessage\",text=msg,channel=\"G1KF0R3PH\",type=\"message\",id=1)\n json_att = json.dumps([msg])\n self.client.api_call(\"chat.postMessage\",channel=\"C04FCJXG9\",parse=\"full\",attachments=json_att)\n\n def process_message(self,msg):\n #self.client.rtm_send_message('testing',json_msg)\n #self.client.rtm_send_message('testing',fut.result())\n for mm in msg:\n print(mm)\n if 'text' in mm and 'user' in mm:\n if 'user' != self.user:\n self.parse_message(mm['text'])\n #if 'type' in mm and mm['type'] == 'message' and 'user' in mm and mm['user'] != self.user:\n # print(mm)\n #self.send_to_agents(self.rd_requester,mm['text'])\n # self.send_to_agents(self.wne_requester,mm['text'])\n \n\n\n def nodes(self):\n self.s.update_nodes()\n node_ids = self.s.node_ids\n node_dict = {}\n for nn in node_ids:\n if len(nn) > 40:\n name = \"@\" + nn[0:6]\n else:\n name = nn\n ans = self.s.get_quorum(nn)\n if 'exception' in ans:\n node_dict[name] = 'missing'\n else:\n ledger = self.s.get_most_recent_ledger(ans)\n if ledger is None:\n node_dict[name] = 'missing'\n else:\n node_dict[name] = 'agree'\n\n num_nodes = len(self.s.node_names)\n items = list(node_dict.items())\n sorted_items = sorted(items, key=lambda v: v[1])\n node_status = \"\\n\".join([\"*{}*: _{}_\".format(ii,jj) for ii,jj in sorted_items])\n text = \"\\n*Ledger:* {}\".format(self.s.last_ledger) + \"\\n\\n(*Node name*: _status_)\\n\" + node_status\n\n\n att = {}\n att['title'] = \"Nodes: {}\".format(num_nodes)\n att['title_link'] = \"http://stellar.network\"\n att['text'] = text\n att['color'] = \"#000\"\n att['fallback'] = pp.pformat(node_dict)\n att['footer'] = \"http://stellar.network\"\n att['ts'] = int(time.time())\n #att['author_name'] = \"stellar-core\"\n att['mrkdwn_in'] = ['text', 'title', 'footer','author']\n self._send_to_slack(att)\n return None\n\n\n\n\n\n\n def quorum(self,node_name):\n ans = self.s.get_quorum(node_name)\n if 'exception' in ans:\n att = {}\n att['title'] = \"Error\"\n att['title_link'] = \"http://stellar.network\"\n att['text'] = \"_I_ _thought_ _that_ _you_ _asked_ _me_ _about_ _a_ _node_ _named_ `{}`, _but_ _stellar-core_ _returned_ `{}`\".format(node_name, repr(ans))\n att['color'] = \"#ff0000\"\n att['fallback'] = pp.pformat(ans)\n att['footer'] = \"http://stellar.network\"\n att['ts'] = int(time.time())\n #att['author_name'] = \"stellar-core\"\n att['mrkdwn_in'] = ['text', 'title', 'footer','author']\n self._send_to_slack(att)\n return None\n\n ledger = self.s.get_most_recent_ledger(ans)\n if node_name in self.s.node_names:\n pk = self.s.node_names[node_name]\n else:\n pk = node_name\n\n if ledger is None:\n\n att = {}\n att['title'] = \"Node: {}\".format(ans['node'])\n att['title_link'] = \"http://stellar.network\"\n att['text'] = \"*Public Key:* {}\\n*Status:* _missing_\".format(pk)\n att['color'] = \"#000\"\n att['fallback'] = pp.pformat(ans)\n att['footer'] = \"http://stellar.network\"\n att['ts'] = int(time.time())\n #att['author_name'] = \"stellar-core\"\n att['mrkdwn_in'] = ['text', 'title', 'footer','author']\n self._send_to_slack(att)\n return None\n\n att = {}\n if ledger['missing']:\n miss = \", \".join(ledger['missing'])\n else:\n miss = \"\"\n qset = \", \".join(ledger['value']['v'])\n fw = \", \".join(ledger['fail_with'])\n att['title'] = \"Node: {}\".format(ans['node'])\n att['title_link'] = \"http://stellar.network\"\n att['color'] = \"#000\"\n att['text'] = \"*Ledger:* {}\\n\" \\\n \"*Public Key:* {}\\n\" \\\n \"*Quorum Set:* {}\\n\" \\\n \"*Missing:* {}\\n\" \\\n \"*Fail with:* {}\".format(self.s.last_ledger,\n pk,\n qset,\n miss,\n fw)\n att['fallback'] = pp.pformat(ans)\n att['footer'] = \"http://stellar.network\"\n att['ts'] = int(time.time())\n #att['author_name'] = \"stellar-core\"\n att['mrkdwn_in'] = ['text', 'title', 'footer','author']\n self._send_to_slack(att)\n\n\n def send_error(self,error_text):\n att = {}\n att['title'] = \"_Error_\"\n att['title_link'] = \"http://stellar.network\"\n att['color'] = \"#000\"\n att['text'] = \"_{}_\".format(error_text)\n att['fallback'] = error_text\n #att['footer'] = \"http://stellar.network\"\n att['ts'] = int(time.time())\n #att['author_name'] = \"stellar-core\"\n att['mrkdwn_in'] = ['text', 'title', 'footer','author']\n self._send_to_slack(att)\n return None\n\n def parse_message(self,text):\n tokens = text.split()\n if \"?nodes\" in [ii.lower() for ii in tokens]:\n self.nodes()\n\n\n if \"?node\" in tokens:\n idx = tokens.index('?node')\n try: \n node_name = tokens[idx + 1]\n self.quorum(node_name)\n except IndexError:\n self.send_error(\"Please enter a node name (e.g. ?node sdf_watcher1)\")\n\n if \"?offers\" in tokens:\n idx = tokens.index('?offers')\n try:\n buying_asset_code = tokens[idx + 1].upper()\n selling_asset_code = tokens[idx + 2].upper()\n except IndexError:\n self.send_error(\"Please enter two asset names (e.g. ?offers XLM JPY)\")\n return None\n\n self.offers(selling_asset_code, buying_asset_code)\n\n\n if \"?book\" in tokens:\n idx = tokens.index('?book')\n try: \n buying_asset_code = tokens[idx + 1].upper()\n selling_asset_code = tokens[idx + 2].upper()\n except:\n self.send_error(\"Please enter two asset names (e.g. ?book XLM XRP)\")\n \n self.offers(selling_asset_code, buying_asset_code)\n self.offers(buying_asset_code, selling_asset_code)\n\n\n def build_orderbook(self,offers):\n ob = defaultdict(int)\n buyingasset = offers[0]['buyingassetcode']\n sellingasset = offers[0]['sellingassetcode']\n for oo in offers:\n ob[oo['price']] += oo['amount']\n return ob\n\n def offers(self, selling_asset_code=None, buying_asset_code=None):\n if buying_asset_code == 'XLM':\n offers = db.get_offers(selling_asset_code, None)\n elif selling_asset_code == 'XLM':\n offers = db.get_offers(None, buying_asset_code)\n else:\n offers = db.get_offers(selling_asset_code, buying_asset_code)\n if offers:\n ob = self.build_orderbook(offers)\n textout = \"\\n\".join([\"*{}:* {}\".format(oo,ob[oo]) for oo in sorted(list(ob.keys())) ])\n att = {}\n att['title'] = \"Offers: {} (buy) <-- {} (sell)\".format(buying_asset_code, selling_asset_code)\n att['title_link'] = \"http://stellar.network\"\n att['color'] = \"#000\"\n att['text'] = textout\n att['fallback'] = pp.pformat(offers)\n #att['footer'] = \"http://stellar.network\"\n att['ts'] = int(time.time())\n #att['author_name'] = \"stellar-core\"\n att['mrkdwn_in'] = ['text', 'title', 'footer','author']\n self._send_to_slack(att)\n else:\n att = {}\n att['title'] = \"Offers: {} (buy) <-- {} (sell)\".format(buying_asset_code, selling_asset_code)\n att['title_link'] = \"http://stellar.network\"\n att['color'] = \"#000\"\n att['text'] = \"_No_ _offers_ _found_\"\n att['fallback'] = \"No offers found\"\n #att['footer'] = \"http://stellar.network\"\n att['ts'] = int(time.time())\n #att['author_name'] = \"stellar-core\"\n att['mrkdwn_in'] = ['text', 'title', 'footer','author']\n self._send_to_slack(att)\n\n\n def _listen(self):\n if self.client.rtm_connect():\n #self.channels = self.client.server.channels.find('testing')\n #print(self.channels)\n while True:\n msg = self.client.rtm_read()\n self.process_message(msg)\n #time.sleep(1)\n\n\n\n\n\n def run(self):\n self.listen_thread = th.Thread(target=self._listen)\n self.listen_thread.start()\n\n\nif __name__ == '__main__':\n config = configparser.ConfigParser()\n config.read('bot.cfg')\n token = config['slack']['token']\n b = Bot1(token)\n b.run()\n","repo_name":"sparrow-ai/stellar.network","sub_path":"market_status_bot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":9767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"14412326272","text":"#CATch is a simple game made with pygame \n#\nimport sys\nimport pygame\nimport math\nimport random\nfrom Food import Food \n\npygame.init()\n\n#Colors \nwhite = (255, 255, 255)\nblack = (0, 0, 0)\nred = (255, 0, 0)\ngreen = (0, 255, 0) \n\n#images\nbackground = pygame.image.load('background.jpg')\n\ncat_left = pygame.image.load('CAT_LEFT.png')\ncat_right = pygame.image.load('CAT_RIGHT.png')\n\nfood_burger = pygame.image.load('food_burger.png')\nfood_noodles = pygame.image.load('food_noodles.png')\nfood_pie = pygame.image.load('food_pie.png')\nfood_rice = pygame.image.load('food_rice.png')\nfood_shortcake = pygame.image.load('food_shortcake.png')\nfood_donut = pygame.image.load('food_donut.png')\n\nfoodLst = [food_burger, food_noodles, food_pie, food_rice, food_shortcake, food_donut]\n\n# game parameters\nSCREEN_WIDTH, SCREEN_HEIGHT = 600, 600\nFPS = 10\nclock = pygame.time.Clock()\n\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\nbackgroundRect = background.get_rect()\n\npygame.display.set_caption('CATch')\n\nsmallFont = pygame.font.SysFont(\"comicsansms\", 25)\nmedFont = pygame.font.SysFont(\"comicsansms\", 50)\nlargeFont = pygame.font.SysFont(\"comicsansms\", 80)\n\n\ndef text_objects(text, color, size):\n if size == \"small\": \n textSurface = smallFont.render(text, True, color)\n elif size == \"medium\": \n textSurface = medFont.render(text, True, color)\n elif size == \"large\": \n textSurface = largeFont.render(text, True, color)\n return textSurface, textSurface.get_rect()\n\ndef message_to_screen(msg, color, y_displace = 0, size = \"small\"):\n textSurf, textRect = text_objects(msg, color, size)\n textRect.center = (SCREEN_WIDTH/2), (SCREEN_HEIGHT/2)+y_displace\n screen.blit(textSurf, textRect)\n \ndef gameIntro():\n\n intro = True\n \n while intro:\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if event.type == pygame.KEYDOWN:\n intro = False\n \n screen.fill(white)\n message_to_screen(\"welcome to\", green, -175, \"medium\")\n message_to_screen(\"CATch\", green, -90, \"large\")\n message_to_screen(\"The objective of this game is to\",\n black)\n message_to_screen(\"eat all the food that you possibly can\", black, 30)\n message_to_screen(\"by catching the food as it falls down the screen\", black, 60)\n message_to_screen(\"Press any key to play!\", black, 150)\n\n pygame.display.update()\n clock.tick(5)\n\n\n# main game loop\ndef gameLoop():\n \n gameExit = False\n gameOver = False\n\n points = 0\n lives = 3\n #for now the cat is represented by a square block \n cat_size = SCREEN_WIDTH/6 \n #coordinates of the cat \n cat_x = SCREEN_WIDTH/2 - cat_size/2 \n cat_y = SCREEN_HEIGHT - cat_size\n cat_x_change = 0\n CAT = cat_right\n #characteristics of food for now\n food_size = cat_size/2\n FOOD_LST = []\n\n \n #characteristics of trash for now\n trash_size = cat_size/2\n trash_x = random.randrange(1, int(SCREEN_WIDTH - trash_size))\n trash_y = 0\n trash_y_change = 0\n\n while not gameExit:\n\n\n \n if lives == 0:\n gameOver = True \n \n for event in pygame.event.get():\n #for debugging only \n #print(event)\n \n if event.type == pygame.QUIT:\n gameOver = True\n\n #moving the cat \n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n if cat_x >= 0: \n cat_x_change = -cat_size/4\n CAT = cat_left\n elif event.key == pygame.K_RIGHT:\n if cat_x <= SCREEN_WIDTH - cat_size: \n cat_x_change = cat_size/4\n CAT = cat_right\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT and cat_x_change < 0: \n cat_x_change = 0\n elif event.key == pygame.K_RIGHT and cat_x_change > 0:\n cat_x_change = 0 \n if cat_x_change < 0 and cat_x == 0:\n cat_x = 0\n elif cat_x_change > 0 and cat_x == SCREEN_WIDTH - cat_size:\n cat_x = SCREEN_WIDTH - cat_size\n else:\n cat_x += cat_x_change\n\n #moving the food/trash\n if len(FOOD_LST) == 0 or FOOD_LST[0].get_y() > SCREEN_HEIGHT*random.randrange(4,9)/9:\n food_x = random.randrange(0, int(SCREEN_WIDTH - food_size))\n food_y = 0\n food_type = foodLst[random.randrange(0, len(foodLst))]\n food = Food(food_x, food_y, food_size, food_type) \n FOOD_LST.insert(0, food)\n if trash_y < SCREEN_HEIGHT:\n trash_y += food_size/2\n\n for food in FOOD_LST:\n if food.get_x() in range(int(cat_x - food.get_size()), int(cat_x + cat_size))and \\\n food.get_y() + food.get_size() in range(int(cat_y), SCREEN_HEIGHT):\n FOOD_LST.remove(food)\n points += food.get_points()\n if food.get_y() >= SCREEN_HEIGHT:\n FOOD_LST.remove(food)\n lives -= 1 \n\n #drawing everything \n screen.blit(background, backgroundRect)\n for food in FOOD_LST:\n food.update_y(food.get_size()/2)\n screen.blit(food.get_type(), (food.get_x(), food.get_y()))\n\n screen.blit(CAT, (cat_x, cat_y)) \n HP = 'Lives: ' + str(lives)\n pts = 'Points: ' + str(points)\n HP_text = smallFont.render(HP, True, green)\n pts_text = smallFont.render(pts, True, green)\n screen.blit(HP_text, [SCREEN_WIDTH*.75, 33])\n screen.blit(pts_text, [SCREEN_WIDTH*.75, 66])\n \n pygame.display.update()\n\n clock.tick(FPS)\n\n while gameOver == True:\n screen.fill(white)\n message_to_screen(\"Game Over\", red, -80, \"large\")\n message_to_screen(\"Your score: \" + str(points), black, 40)\n message_to_screen(\"Press C to play again or Q to quit\", black, 75)\n pygame.display.update()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n gameOver = False\n gameExit = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_q:\n gameOver = False \n gameExit = True\n elif event.key == pygame.K_c:\n gameLoop() \n\n pygame.quit()\n quit()\n\ngameIntro() \ngameLoop()\n","repo_name":"julezjw/CATch","sub_path":"CATch.py","file_name":"CATch.py","file_ext":"py","file_size_in_byte":6661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"13071417940","text":"import datetime\nfrom django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.urls import reverse, reverse_lazy\nfrom django.utils import timezone\nfrom django.db.models import Sum\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom dateutil.relativedelta import relativedelta\nfrom django.contrib.auth.views import LoginView, LogoutView\nfrom django.db.models import Q\nfrom bootstrap_modal_forms.generic import (\n BSModalCreateView, BSModalUpdateView, BSModalDeleteView\n)\nfrom .models import (\n Expense, Income, DefaultExpenseMonth, DefaultIncomeMonth, Account,\n Method, TemplateExpense, Loan\n)\nfrom .forms import LoginForm, IncomeForm, ExpenseForm, BalanceForm, LoanForm\nfrom .const import const_data\n\ndef can_add_default_inex(year, month):\n \"\"\"デフォルトの収支を追加可能か判定する。\n\n Parameters\n ----------\n year : int\n 会計年\n month : int\n 会計月\n\n Returns\n -------\n bool\n デフォルト収支を追加可能かどうか\n \"\"\"\n\n # 今月の初日を取得\n current_time = timezone.now()\n current_month_first_date = datetime.date(\n current_time.year, current_time.month, 1\n )\n\n # 過去には追加不可\n if datetime.date(year, month, 1) < current_month_first_date:\n return False\n\n return True\n\ndef can_update_or_delete_inex(year, month):\n \"\"\"収支を更新・削除可能か判定する。\n\n Parameters\n ----------\n year : int\n 現在の支払年\n month : int\n 現在の支払月\n\n Returns\n -------\n bool\n 収支を更新・削除可能かどうか\n \"\"\"\n\n # 前月の初日を取得\n current_time = timezone.now()\n current_month_first_date = datetime.date(\n current_time.year, current_time.month, 1\n )\n last_month_first_date = (current_month_first_date\n - relativedelta(months=1))\n\n # 現在の支払月の初日を取得\n old_pay_date = datetime.date(year, month, 1)\n\n # 現在の支払月が先月より前であった場合、更新を許可しない\n if old_pay_date < last_month_first_date:\n return False\n\n return True\n\ndef add_incs_from_default(year, month):\n \"\"\"デフォルトの収支から収入を追加する。\n\n Parameters\n ----------\n year : int\n 会計年\n month : int\n 会計月\n\n Returns\n -------\n int\n 追加した収入の数\n \"\"\"\n\n # 会計開始日と終了日を取得\n first_date = datetime.date(year, month, 1)\n last_date = (\n first_date + relativedelta(months=1) - datetime.timedelta(days=1)\n )\n\n add_num = 0\n\n # デフォルトの収入から収入を追加\n def_inc_months = DefaultIncomeMonth.objects.filter(month=month)\n this_month_incs = Income.objects.filter(\n pay_date__gte=first_date, pay_date__lte=last_date\n )\n for def_inc_month in def_inc_months:\n can_add = True\n def_inc = def_inc_month.def_inc # 追加対象の収入\n # 既に登録されているかのチェック\n for this_month_inc in this_month_incs:\n if def_inc.name == this_month_inc.name:\n # 既に登録されている場合\n can_add = False\n break\n # 追加\n if can_add:\n # まだ登録されていない場合\n Income(\n name=def_inc.name,\n pay_date=datetime.date(year, month, def_inc.pay_day),\n method=def_inc.method, amount=def_inc.amount,\n undecided=def_inc.undecided,\n ).save()\n add_num += 1\n\n return add_num\n\ndef add_exps_from_default_and_loan(year, month):\n \"\"\"デフォルトの支出とローンから支出を追加する。\n\n Parameters\n ----------\n year : int\n 会計年\n month : int\n 会計月\n\n Returns\n -------\n int\n 追加した支出の数\n \"\"\"\n\n # 会計開始日と終了日を取得\n first_date = datetime.date(year, month, 1)\n last_date = (\n first_date + relativedelta(months=1) - datetime.timedelta(days=1)\n )\n\n add_num = 0\n\n # デフォルトの支出から支出を追加\n def_exp_months = DefaultExpenseMonth.objects.filter(month=month)\n this_month_exps = Expense.objects.filter(\n pay_date__gte=first_date, pay_date__lte=last_date\n )\n for def_exp_month in def_exp_months:\n can_add = True\n def_exp = def_exp_month.def_exp # 追加対象の支出\n # 既に登録されているかのチェック\n for this_month_exp in this_month_exps:\n if def_exp.name == this_month_exp.name:\n # 既に登録されている場合\n can_add = False\n break\n # 追加\n if can_add:\n # まだ登録されていない場合\n Expense(\n name=def_exp.name,\n pay_date=datetime.date(year, month, def_exp.pay_day),\n method=def_exp.method,\n amount=def_exp.amount, undecided=def_exp.undecided,\n ).save()\n add_num += 1\n\n # ローンから支出を追加\n loans = Loan.objects.filter(\n (Q(first_year__lt=year) | Q(first_year=year, first_month__lte=month)),\n (Q(last_year__gt=year) | Q(last_year=year, last_month__gte=month))\n )\n for loan in loans:\n can_add = True\n # 既に登録されているかのチェック\n for this_month_exp in this_month_exps:\n if loan.name == this_month_exp.name:\n # 既に登録されている場合\n can_add = False\n break\n # 追加\n if can_add:\n # まだ登録されていない場合\n if year == loan.first_year and month == loan.first_month:\n amount = loan.amount_first\n else:\n amount = loan.amount_from_second\n\n Expense(\n name=loan.name,\n pay_date=datetime.date(year, month, loan.pay_day),\n method=loan.method, amount=amount, undecided=loan.undecided,\n ).save()\n add_num += 1\n\n return add_num\n\ndef get_balance_done(year, month):\n \"\"\"該当月までの残高(完了分)を取得\n\n Parameters\n ----------\n year : int\n 会計年\n month : int\n 会計月\n\n Returns\n -------\n int\n 該当月までの残高(完了分)\n \"\"\"\n\n # 会計開始日と終了日を取得\n first_date = datetime.date(year, month, 1)\n last_date = (\n first_date + relativedelta(months=1) - datetime.timedelta(days=1)\n )\n\n # 今月までの収支リストを取得\n incs_to_this_month = Income.objects.filter(\n pay_date__lte=last_date\n )\n exps_to_this_month = Expense.objects.filter(\n pay_date__lte=last_date\n )\n\n # 残高を計算\n # 今月までの収入(完了分)の合計\n done_incs = incs_to_this_month.filter(done=True)\n done_inc_sums = done_incs.aggregate(Sum('amount'))\n done_inc_sum = done_inc_sums['amount__sum']\n if done_inc_sum is None:\n done_inc_sum = 0\n # 今月の支出(完了分)を減算\n done_exps = exps_to_this_month.filter(done=True)\n done_exp_sums = done_exps.aggregate(Sum('amount'))\n done_exp_sum = done_exp_sums['amount__sum']\n if done_exp_sum is None:\n done_exp_sum = 0\n\n return done_inc_sum - done_exp_sum\n\ndef get_balance(year, month):\n \"\"\"該当月までの残高を取得\n\n Parameters\n ----------\n year : int\n 会計年\n month : int\n 会計月\n\n Returns\n -------\n int\n 該当月までの残高\n \"\"\"\n\n # 会計開始日と終了日を取得\n first_date = datetime.date(year, month, 1)\n last_date = (\n first_date + relativedelta(months=1) - datetime.timedelta(days=1)\n )\n\n # 今月までの収支リストを取得\n incs_to_this_month = Income.objects.filter(\n pay_date__lte=last_date\n )\n exps_to_this_month = Expense.objects.filter(\n pay_date__lte=last_date\n )\n\n # 残高を計算\n # 今月までの収入(完了分)の合計\n inc_sums = incs_to_this_month.aggregate(Sum('amount'))\n inc_sum = inc_sums['amount__sum']\n if inc_sum is None:\n inc_sum = 0\n # 今月の支出(完了分)を減算\n exp_sums = exps_to_this_month.aggregate(Sum('amount'))\n exp_sum = exp_sums['amount__sum']\n if exp_sum is None:\n exp_sum = 0\n\n return inc_sum - exp_sum\n\n\n# Create your views here.\n\nclass login(LoginView):\n form_class = LoginForm\n template_name = \"income_and_expense/login.html\"\n\n\nclass logout(LogoutView):\n pass\n\n\nclass IncomeCreateView(BSModalCreateView):\n template_name = 'income_and_expense/create_inc.html'\n form_class = IncomeForm\n success_message = '成功: %(name)sが追加されました。'\n\n def get_success_url(self):\n return reverse_lazy(\n 'income_and_expense:income',\n args=[self.kwargs['year'], self.kwargs['month']]\n )\n\n\nclass IncomeUpdateView(BSModalUpdateView):\n model = Income\n template_name = 'income_and_expense/update_inc.html'\n form_class = IncomeForm\n success_message = '成功: %(name)sが更新されました。'\n\n def post(self, request, *args, **kwargs):\n if not can_update_or_delete_inex(kwargs['year'], kwargs['month']):\n messages.error(\n self.request,\n \"失敗: 古い収入は更新できません。\"\n )\n # incomeビューへリダイレクト\n return HttpResponseRedirect(\n reverse(\n 'income_and_expense:income',\n args=(kwargs['year'], kwargs['month'])\n )\n )\n\n return super().post(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse_lazy(\n 'income_and_expense:income',\n args=[self.kwargs['year'], self.kwargs['month']]\n )\n\n\nclass IncomeDeleteView(BSModalDeleteView):\n model = Income\n template_name = 'income_and_expense/delete_inc.html'\n form_class = IncomeForm\n\n def post(self, request, *args, **kwargs):\n pay_date = Income.objects.get(pk=kwargs['pk']).pay_date\n\n if not can_update_or_delete_inex(pay_date.year, pay_date.month):\n messages.error(\n self.request,\n \"失敗: 古い収入は削除できません。\"\n )\n # incomeビューへリダイレクト\n return HttpResponseRedirect(\n reverse(\n 'income_and_expense:income',\n args=(kwargs['year'], kwargs['month'])\n )\n )\n\n messages.success(\n self.request,\n \"成功: %sが削除されました。\" % Income.objects.get(id=kwargs['pk']).name\n )\n return super().delete(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse_lazy(\n 'income_and_expense:income',\n args=[self.kwargs['year'], self.kwargs['month']]\n )\n\n\nclass ExpenseCreateView(BSModalCreateView):\n template_name = 'income_and_expense/create_exp.html'\n form_class = ExpenseForm\n success_message = '成功: %(name)sが追加されました。'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n today = datetime.date.today()\n template_exps = TemplateExpense.objects.all()\n context_template_exps = []\n\n for template_exp in template_exps:\n context_template_exp = {}\n\n # 名前(テンプレート)\n context_template_exp[\"template_name\"] = str(template_exp.template_name)\n # 名前\n context_template_exp[\"name\"] = str(template_exp.name)\n # 支払方法\n context_template_exp[\"method\"] = str(template_exp.method)\n # 未定\n context_template_exp[\"undecided\"] = str(template_exp.undecided)\n # 完了\n context_template_exp[\"done\"] = str(template_exp.done)\n\n # 支払日\n if template_exp.date_type == 'today':\n pay_date = today\n else:\n if today.day <= template_exp.limit_day_of_this_month:\n pay_date = datetime.date(\n today.year, today.month, template_exp.pay_day\n )\n else:\n pay_date = datetime.date(\n today.year, today.month, template_exp.pay_day\n ) + relativedelta(months=1)\n context_template_exp[\"pay_date\"] = \"{0}-{1}-{2}\".format(\n pay_date.year,\n str(pay_date.month).zfill(2),\n str(pay_date.day).zfill(2)\n )\n\n context_template_exps.append(context_template_exp)\n\n context['template_exps'] = context_template_exps\n return context\n\n def get_success_url(self):\n return reverse_lazy(\n 'income_and_expense:expense',\n args=[self.kwargs['year'], self.kwargs['month']]\n )\n\n\nclass ExpenseUpdateView(BSModalUpdateView):\n model = Expense\n template_name = 'income_and_expense/update_exp.html'\n form_class = ExpenseForm\n success_message = '成功: %(name)sが更新されました。'\n\n def post(self, request, *args, **kwargs):\n if not can_update_or_delete_inex(kwargs['year'], kwargs['month']):\n messages.error(\n self.request,\n \"失敗: 古い支出は更新できません。\"\n )\n # incomeビューへリダイレクト\n return HttpResponseRedirect(\n reverse(\n 'income_and_expense:expense',\n args=(kwargs['year'], kwargs['month'])\n )\n )\n\n return super().post(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse_lazy(\n 'income_and_expense:expense',\n args=[self.kwargs['year'], self.kwargs['month']]\n )\n\n\nclass ExpenseDeleteView(BSModalDeleteView):\n model = Expense\n template_name = 'income_and_expense/delete_exp.html'\n form_class = ExpenseForm\n\n def post(self, request, *args, **kwargs):\n pay_date = Expense.objects.get(pk=kwargs['pk']).pay_date\n\n if not can_update_or_delete_inex(pay_date.year, pay_date.month):\n messages.error(\n self.request,\n \"失敗: 古い支出は削除できません。\"\n )\n # expenseビューへリダイレクト\n return HttpResponseRedirect(\n reverse(\n 'income_and_expense:expense',\n args=(kwargs['year'], kwargs['month'])\n )\n )\n\n messages.success(\n self.request,\n \"成功: %sが削除されました。\" % Expense.objects.get(id=kwargs['pk']).name\n )\n return super().delete(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse_lazy(\n 'income_and_expense:expense',\n args=[self.kwargs['year'], self.kwargs['month']]\n )\n\n\nclass BalanceUpdateView(BSModalUpdateView):\n model = Account\n template_name = 'income_and_expense/update_balance.html'\n form_class = BalanceForm\n success_message = '成功: %(user)s%(bank)sが更新されました。'\n\n def get_success_url(self):\n return reverse_lazy(\n 'income_and_expense:balance',\n args=[self.kwargs['year'], self.kwargs['month']]\n )\n\n\nclass LoanCreateView(BSModalCreateView):\n template_name = 'income_and_expense/create_loan.html'\n form_class = LoanForm\n success_message = '成功: %(name)sが追加されました。'\n\n def get_success_url(self):\n return reverse_lazy(\n 'income_and_expense:loan',\n args=[self.kwargs['year'], self.kwargs['month']]\n )\n\n\nclass LoanUpdateView(BSModalUpdateView):\n model = Loan\n template_name = 'income_and_expense/update_loan.html'\n form_class = LoanForm\n success_message = '成功: %(name)sが更新されました。'\n\n def get_success_url(self):\n return reverse_lazy(\n 'income_and_expense:loan',\n args=[self.kwargs['year'], self.kwargs['month']]\n )\n\n\nclass LoanDeleteView(BSModalDeleteView):\n model = Loan\n template_name = 'income_and_expense/delete_loan.html'\n form_class = LoanForm\n\n def post(self, request, *args, **kwargs):\n messages.success(\n self.request,\n \"成功: %sが削除されました。\" % Loan.objects.get(id=kwargs['pk']).name\n )\n return super().delete(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse_lazy(\n 'income_and_expense:loan',\n args=[self.kwargs['year'], self.kwargs['month']]\n )\n\n\n@login_required\ndef index(request):\n \"\"\"トップページ用のビュー関数。\n\n Parameters\n ----------\n request : HttpRequest\n HttpRequestオブジェクト\n\n Returns\n -------\n HttpResponseRedirect\n HttpResponseRedirectオブジェクト\n \"\"\"\n\n current_time = timezone.now()\n\n # incomeビューへリダイレクト\n return HttpResponseRedirect(\n reverse(\n 'income_and_expense:income',\n args=(current_time.year, current_time.month)\n )\n )\n\n@login_required\ndef move_another_page(request):\n \"\"\"別画面移動用のビュー関数。\n\n Parameters\n ----------\n request : HttpRequest\n HttpRequestオブジェクト\n\n Returns\n -------\n HttpResponseRedirect\n HttpResponseRedirectオブジェクト\n \"\"\"\n\n # 適切なビューへリダイレクト\n return HttpResponseRedirect(\n reverse(\n request.GET.get(\"path_name\"),\n args=(request.GET.get(\"year\"), request.GET.get(\"month\"))\n )\n )\n\n@login_required\ndef income(request, year, month):\n \"\"\"income用のビュー関数。\n\n Parameters\n ----------\n request : HttpRequest\n HttpRequestオブジェクト\n year : int\n 会計年\n month : int\n 会計月\n\n Returns\n -------\n HttpResponse\n HttpResponseオブジェクト\n \"\"\"\n\n # 会計開始日と終了日を取得\n first_date = datetime.date(year, month, 1)\n last_date = (\n first_date + relativedelta(months=1) - datetime.timedelta(days=1)\n )\n\n # 先月の代表日\n last_month_date = first_date - relativedelta(months=1)\n\n # 先月までの口座残高を取得\n last_mon_balance = get_balance(\n last_month_date.year, last_month_date.month\n )\n\n # 今月の収入リストを取得\n this_month_incs = Income.objects.order_by(\n 'method__account__user', 'method'\n ).filter(pay_date__gte=first_date, pay_date__lte=last_date)\n\n # 今月の収入の合計を取得\n inc_sum = (last_mon_balance\n + (this_month_incs.aggregate(Sum('amount'))['amount__sum'] or 0))\n\n return render(request, 'income_and_expense/income.html', {\n 'path_name': const_data.const.PATH_NAME_INCOME,\n 'this_year': year,\n 'this_mon': month,\n 'incs': this_month_incs,\n 'last_mon_balance': last_mon_balance,\n 'inc_sum': inc_sum,\n })\n\n@login_required\ndef add_default_incs(request, year, month):\n \"\"\"add_default_incs用のビュー関数。\n\n Parameters\n ----------\n request : HttpRequest\n HttpRequestオブジェクト\n year : int\n 会計年\n month : int\n 会計月\n\n Returns\n -------\n HttpResponseRedirect\n HttpResponseRedirectオブジェクト\n \"\"\"\n\n # デフォルトの収入から収入を追加\n if can_add_default_inex(year, month):\n if add_incs_from_default(year, month) > 0:\n messages.success(request, \"成功: デフォルト収入が追加されました。\")\n else:\n messages.error(request, \"失敗: 追加できるデフォルト収入が存在しませんでした。\")\n else:\n messages.error(request, \"失敗: 過去にはデフォルト収入を追加できません。\")\n\n # incomeビューへリダイレクト\n return HttpResponseRedirect(\n reverse(\n 'income_and_expense:income',\n args=(year, month)\n )\n )\n\n@login_required\ndef expense(request, year, month):\n \"\"\"expense用のビュー関数。\n\n Parameters\n ----------\n request : HttpRequest\n HttpRequestオブジェクト\n year : int\n 会計年\n month : int\n 会計月\n\n Returns\n -------\n HttpResponse\n HttpResponseオブジェクト\n \"\"\"\n\n # 会計開始日と終了日を取得\n first_date = datetime.date(year, month, 1)\n last_date = (\n first_date + relativedelta(months=1) - datetime.timedelta(days=1)\n )\n\n # 先月の代表日\n last_month_date = first_date - relativedelta(months=1)\n\n # 先月の口座残高を取得\n last_mon_balance = get_balance(\n last_month_date.year, last_month_date.month\n )\n\n # 今月の支出リストを取得\n this_month_exps = Expense.objects.order_by(\n 'method__account__user', 'method'\n ).filter(pay_date__gte=first_date, pay_date__lte=last_date)\n\n # 今月の支出の合計を取得\n exp_sum = this_month_exps.aggregate(Sum('amount'))['amount__sum'] or 0\n\n # 今月の残高を取得\n balance = get_balance(year, month)\n\n return render(request, 'income_and_expense/expense.html', {\n 'path_name': const_data.const.PATH_NAME_EXPENSE,\n 'this_year': year,\n 'this_mon': month,\n 'exps': this_month_exps,\n 'exp_sum': exp_sum,\n 'balance': balance,\n })\n\n@login_required\ndef add_default_exps(request, year, month):\n \"\"\"add_default_exps用のビュー関数。\n\n Parameters\n ----------\n request : HttpRequest\n HttpRequestオブジェクト\n year : int\n 会計年\n month : int\n 会計月\n\n Returns\n -------\n HttpResponseRedirect\n HttpResponseRedirectオブジェクト\n \"\"\"\n\n # デフォルトの支出とローンから支出を追加\n if can_add_default_inex(year, month):\n if add_exps_from_default_and_loan(year, month) > 0:\n messages.success(request, \"成功: デフォルト支出が追加されました。\")\n else:\n messages.error(request, \"失敗: 追加できるデフォルト支出が存在しませんでした。\")\n else:\n messages.error(request, \"失敗: 過去にはデフォルト支出を追加できません。\")\n\n # expsenseビューへリダイレクト\n return HttpResponseRedirect(\n reverse(\n 'income_and_expense:expense',\n args=(year, month)\n )\n )\n\n@login_required\ndef balance(request, year, month):\n \"\"\"balanceページ用のビュー関数。\n\n Parameters\n ----------\n request : HttpRequest\n HttpRequestオブジェクト\n year : int\n 会計年\n month : int\n 会計月\n\n Returns\n -------\n HttpResponse\n HttpResponseオブジェクト\n \"\"\"\n\n # 各口座の実残高を取得\n accounts = Account.objects.all().order_by('user') # 全口座\n balances = [] # 各口座の実残高\n balance_sum = 0 # 口座の実残高の合計\n for account in accounts:\n balances.append({\n 'account': account, 'balance': \"¥{:,}\".format(account.balance)\n })\n balance_sum += account.balance\n\n # DB上の残高(完了分)を取得\n balance_on_db = get_balance_done(year, month)\n\n # 口座の実残高とDB上残高(完了分)の誤差を取得\n balance_diff = balance_sum - balance_on_db\n\n return render(request, 'income_and_expense/balance.html', {\n 'path_name': const_data.const.PATH_NAME_BALANCE,\n 'this_year': year,\n 'this_mon': month,\n 'accounts': accounts,\n 'balance_sum': balance_sum,\n 'balance_on_db': balance_on_db,\n 'balance_diff': balance_diff,\n })\n\n@login_required\ndef account_require(request, year, month):\n \"\"\"account_requireページ用のビュー関数。\n\n Parameters\n ----------\n request : HttpRequest\n HttpRequestオブジェクト\n year : int\n 会計年\n month : int\n 会計月\n\n Returns\n -------\n HttpResponse\n HttpResponseオブジェクト\n \"\"\"\n\n # 会計開始日と終了日を取得\n first_date = datetime.date(year, month, 1)\n last_date = (\n first_date + relativedelta(months=1) - datetime.timedelta(days=1)\n )\n\n # 今月の支出リストを取得\n this_month_exps = Expense.objects.filter(\n pay_date__gte=first_date, pay_date__lte=last_date\n )\n\n # 各口座の必要金額を取得\n accounts = Account.objects.all().order_by('user') # 全口座\n account_requires = [] # 各口座の必要金額\n require_sum = 0 # 必要金額の合計値\n insufficient_sum = 0 # 不足額の合計値\n is_insufficient = False # 口座残高が不足しているかどうか\n insufficient_amount = 0 # 各口座の不足額\n for account in accounts:\n require = this_month_exps.filter(\n method__account=account, done=False\n ).aggregate(Sum('amount'))['amount__sum']\n if require is None:\n require = 0\n\n require_sum += require\n\n if account.balance < require:\n is_insufficient = True\n insufficient_amount = require - account.balance\n else:\n is_insufficient = False\n insufficient_amount = 0\n\n insufficient_sum += insufficient_amount\n\n account_require = {\n 'account': account, 'require': \"¥{:,}\".format(require),\n 'is_insufficient': is_insufficient,\n 'insufficient_amount': \"¥{:,}\".format(insufficient_amount)\n }\n account_requires.append(account_require)\n\n return render(request, 'income_and_expense/account_require.html', {\n 'path_name': const_data.const.PATH_NAME_ACCOUNT_REQUIRE,\n 'this_year': year,\n 'this_mon': month,\n 'account_requires': account_requires,\n 'require_sum': \"¥{:,}\".format(require_sum),\n 'insufficient_sum': \"¥{:,}\".format(insufficient_sum),\n })\n\n@login_required\ndef method_require(request, year, month):\n \"\"\"method_requireページ用のビュー関数。\n\n Parameters\n ----------\n request : HttpRequest\n HttpRequestオブジェクト\n year : int\n 会計年\n month : int\n 会計月\n\n Returns\n -------\n HttpResponse\n HttpResponseオブジェクト\n \"\"\"\n\n # 会計開始日と終了日を取得\n first_date = datetime.date(year, month, 1)\n last_date = (\n first_date + relativedelta(months=1) - datetime.timedelta(days=1)\n )\n\n # 今月の支出リストを取得\n this_month_exps = Expense.objects.filter(\n pay_date__gte=first_date, pay_date__lte=last_date\n )\n\n # 支払方法別の必要金額を取得\n # 全支払方法\n methods = Method.objects.all().order_by(\n 'account__user', 'account__bank'\n )\n method_requires = [] # 支払方法別の必要金額\n require_sum = 0 # 必要金額の合計値\n for method in methods:\n require = this_month_exps.filter(\n method=method, done=False\n ).aggregate(Sum('amount'))['amount__sum']\n if require is None:\n require = 0\n\n require_sum += require\n\n method_require = {\n 'method': method, 'require': \"¥{:,}\".format(require),\n }\n method_requires.append(method_require)\n\n return render(request, 'income_and_expense/method_require.html', {\n 'path_name': const_data.const.PATH_NAME_METHOD_REQUIRE,\n 'this_year': year,\n 'this_mon': month,\n 'method_requires': method_requires,\n 'require_sum': \"¥{:,}\".format(require_sum),\n })\n\n\n@login_required\ndef method_done(request, year, month, pk):\n \"\"\"method_doneページ用のビュー関数。\n\n Parameters\n ----------\n request : HttpRequest\n HttpRequestオブジェクト\n year : int\n 会計年\n month : int\n 会計月\n pk : int\n 支払方法のpk\n\n Returns\n -------\n HttpResponse\n HttpResponseオブジェクト\n \"\"\"\n\n # 会計開始日と終了日を取得\n first_date = datetime.date(year, month, 1)\n last_date = (\n first_date + relativedelta(months=1) - datetime.timedelta(days=1)\n )\n\n # 該当の支払方法の支出をすべて支払済に変更\n target_exps = Expense.objects.filter(method__pk=pk,\n pay_date__gte=first_date, pay_date__lte=last_date\n )\n for target_exp in target_exps:\n target_exp.done = True\n target_exp.undecided = False\n target_exp.save()\n\n messages.success(request, \"成功: 支払済一括登録されました。\")\n\n # method_requireビューへリダイレクト\n return HttpResponseRedirect(\n reverse(\n 'income_and_expense:method_require',\n args=(year, month)\n )\n )\n\n@login_required\ndef loan(request, year, month):\n \"\"\"loanページ用のビュー関数。\n\n Parameters\n ----------\n request : HttpRequest\n HttpRequestオブジェクト\n year : int\n 会計年\n month : int\n 会計月\n\n Returns\n -------\n HttpResponse\n HttpResponseオブジェクト\n \"\"\"\n\n loans_and_completes = [] # 各ローンと終了しているかどうか\n\n # ローン一覧を取得\n loans = Loan.objects.all().order_by('method') # 全ローン\n\n for loan in loans:\n loan_and_complete = {}\n loan_and_complete['loan'] = loan\n\n is_over_year = year > loan.last_year\n is_same_year_and_over_month = (\n (year == loan.last_year) and (month > loan.last_month)\n )\n loan_and_complete['complete'] = (\n is_over_year or is_same_year_and_over_month\n )\n\n loans_and_completes.append(loan_and_complete)\n\n return render(request, 'income_and_expense/loan.html', {\n 'path_name': const_data.const.PATH_NAME_LOAN,\n 'this_year': year,\n 'this_mon': month,\n 'loans_and_completes': loans_and_completes,\n })","repo_name":"anndddooh/income_and_expense","sub_path":"income_and_expense/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":30639,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"39279469234","text":"import re\n\nfrom yom.errors import *\nfrom yom.constants import scopes, fields\n\n\nclass YomValidator(object):\n is_valid = False\n\n def validate(self):\n is_valid = self._validate()\n self.is_valid = is_valid\n return is_valid\n\n def cleaned_data(self):\n if not self.is_valid:\n raise YomValidationError('Data is not vaild or not validated yet')\n return self._cleaned_data()\n\n\nclass Hash(YomValidator):\n \"\"\"\n Application hash validator (128-bit hex number as string)\n \"\"\"\n def __init__(self, app_hash):\n self.data = app_hash\n\n def _validate(self):\n if not self.data:\n raise YomValidationError('Hash argument is required')\n if not isinstance(self.data, basestring):\n raise YomValidationError('Hash argument must be a string')\n if re.match(r'[0-9a-f]{32}', self.data, re.I) is None:\n raise YomValidationError('Invalid hash %s' % self.data)\n return True\n\n def _cleaned_data(self):\n return self.data.lower()\n\n\nclass AppForm(YomValidator):\n \"\"\"\n Validator of application's creation/edition form.\n \"\"\"\n def __init__(self, new_item=True, data=None, args=None):\n if not data and not args:\n raise YomError('At least one of data or args should be specified')\n self.data = dict.fromkeys(fields)\n self.data.update(data or self._data_from_args(args))\n self.new_item = new_item\n\n def _data_from_args(self, args):\n return {name: getattr(args, 'input_%s' % name)\n for name in fields}\n\n def _validate(self):\n if self.new_item:\n if not self.data['title']:\n raise YomValidationError('Title field is required')\n if not self.data['scopes']:\n raise YomValidationError('Select at least one scope')\n else:\n if not any(self.data[key] for key in fields):\n raise YomValidationError('Supply data to edit')\n\n if self.data['scopes']:\n undefined_scopes = set(self.data['scopes']) - set(scopes)\n if undefined_scopes:\n raise YomValidationError(\n 'Undefined scopes: %s' % ', '.join(undefined_scopes))\n return True\n\n def _cleaned_data(self):\n return {key: value for key, value in self.data.items()\n if value is not None}\n","repo_name":"hackprime/yom","sub_path":"yom/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"36603002539","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport random\nimport numpy as np\nimport pandas as pd\nimport math\nimport copy\nimport time\n\n\n# In[2]:\n\n\nfpath=\"input.txt\"\n# fpath=\"./test4.txt\"\nf=open(fpath,\"r\")\nf=f.readlines()\ncity_size=int(f[0].replace(\"\\n\",\"\"))\ncities={}\n\nfor i in range(len(f[1:])):\n xyz_list=(f[1:][i].replace(\"\\n\",\"\").split(\" \")) \n xyz=tuple(int(i) for i in xyz_list)\n cities[i]=xyz\n\n# nodes=list(cities.keys())\n\n\n# In[3]:\n\n\n# cities\n\n\n# ## initialization\n\n# In[8]:\n\n\ndef initializePopulation(pop_size,city_size,dist_matrix):\n initial_pop = []\n for i in range(pop_size):\n path = generate_path(city_size,dist_matrix)\n initial_pop.append(path)\n return initial_pop\n\ndef generate_path(city_size,dist_matrix):\n path = []\n person = random.randint(0,city_size-1)\n path.append(person)\n pop_dist = 0\n for _ in range(0,city_size-1):\n k =0\n dist_tmp = float(\"inf\")\n while k < city_size:\n if k not in path and dist_matrix[k][person] < dist_tmp:\n j = k\n dist_tmp = dist_matrix[k][person]\n k += 1\n path.append(j)\n person = j\n pop_dist += dist_tmp\n return path \n\ndef cityInPath(path,city):\n for i in path:\n if i == city:\n return True\n return False\n\n# def pathInPop(pop,path):\n# for i in pop:\n# if i==path:\n# return True\n# return False\n\ndef factorial(n):\n res=1\n for i in range(2,n+1):\n res*=i\n return res\n\ndef generate_tours(initial_pop):\n tours=copy.deepcopy(initial_pop)\n for tour in tours:\n start=tour[0]\n tour.append(start)\n return tours\n\n\n# ## distance & fitness value\n\n# In[5]:\n\n\ndef cal_dis_matrix(cities):\n cities_df=pd.DataFrame(cities)\n cities_v=np.array(cities_df).astype(int)\n cities_d=cities_v\n dist = np.zeros((cities_v.shape[1],cities_d.shape[1])) #distance matrix\n for i in range(cities_v.shape[1]):\n for j in range(cities_d.shape[1]):\n distance=math.sqrt(np.sum((cities_v[:,i]-cities_d[:,j])**2))\n if distance==0:\n distance=None\n dist[i,j]=distance\n return dist\n\ndef cal_tour_dis(tour,dist_matrix):\n distance=0\n for i in range(len(tour)-1):\n distance += dist_matrix[tour[i],tour[i+1]]\n return distance\n\ndef fitness(pop_size,ini_tours,dist_matrix):\n values=[]\n for i in range(pop_size):\n tour=ini_tours[i]\n value=cal_tour_dis(tour,dist_matrix)\n values.append(1000.0/value) #the lower the distance, the higher value for that tour\n if values[i]<0:\n values[i]=0\n return values\n\n\n# ## roulette slection & crossover & mutation\n\n# In[6]:\n\n\ndef roulette_slection(pop,pop_size,values):\n prob=[]\n for i in range(len(values)):\n prob.append(values[i]/sum(values)) \n\n pop_new=[] #select new population\n lower_bound=1/pop_size/2\n higher_bound=1/pop_size\n for i in range(len(values)):\n rand=np.random.uniform(0, higher_bound)\n for j in range(len(values)): \n if rand<=prob[j] and pop[j] not in pop_new: #if its possibility > the random number,take it\n pop_new.append(pop[j])\n \n return pop_new\n\ndef crossover(pop,crossover_rate):\n offspring = []\n cut_point = int(len(pop)/random.randint(2,4))\n father = pop[:cut_point]\n mother = pop[cut_point:]\n np.random.shuffle(father)\n np.random.shuffle(mother)\n son = []\n daughter = []\n\n for i in range(cut_point):\n if np.random.uniform(0,1) <= crossover_rate:\n cross_pos = np.random.randint(1,math.ceil(city_size/2))\n father_cros1 = father[i][:cross_pos]\n mother_cros1 = mother[i][cross_pos:]\n if len(set(father_cros1)&set(mother_cros1)) == 0:\n son = father_cros1+mother_cros1\n daughter = mother[i][:cross_pos]+father[i][cross_pos:]\n else:\n son = father[i]\n daughter = mother[i]\n \n if len(son)&len(daughter) != 0:\n offspring.append(son)\n offspring.append(daughter)\n \n return offspring\n\ndef mutation(offspring,mutation_rate):\n for i in range(len(offspring)):\n if np.random.uniform(0, 1) <= mutation_rate:\n mutation_pos1 = np.random.randint(0,len(offspring[i]))\n mutation_pos2 = np.random.randint(0,len(offspring[i]))\n offspring[i][mutation_pos1],offspring[i][mutation_pos2] = offspring[i][mutation_pos2],offspring[i][mutation_pos1]\n \n return offspring\n\n\n# In[9]:\n\n\npop_size = 20\nshortest_tours = []\nt_dist=1000000000\n#calculate distance matrix\ndist=cal_dis_matrix(cities)\nfor i in range(100): #number of initial populations\n #initialize population\n initial_pop=initializePopulation(pop_size,city_size,dist)\n ini_tours=generate_tours(initial_pop)\n #\n candidates_pop = []\n crossover_rate = 0.65\n mutation_rate = 0.5\n #GA\n #select\n values = fitness(pop_size,ini_tours,dist)\n# pop_new = roulette_slection(initial_pop,pop_size,values)\n for j in range(30):\n #crossover\n c_offspring = crossover(initial_pop,crossover_rate)\n #mutation\n m_offspring = mutation(c_offspring,mutation_rate)\n #select shortest tour for this initial population\n all_pop = initial_pop+c_offspring+m_offspring\n candidates_pop += all_pop\n \n candidates_pop = [list(t) for t in set(tuple(_) for _ in candidates_pop)]\n# print(len(candidates_pop)) \n candidates_tour = generate_tours(candidates_pop)\n for t in candidates_tour:\n if cal_tour_dis(t,dist) str:\n nums = lines[0].raw_line.split(\",\")\n nums = [int(num) for num in nums]\n\n lines = lines[2:]\n i = 0\n bingos = []\n while i < len(lines):\n bingos.append(parse_bingo(lines))\n lines = lines[6:]\n\n game = BingoGame(bingos)\n\n i = 0\n last_num = nums[0]\n while not game.check_bingos():\n game.mark(nums[i])\n last_num = nums[i]\n i += 1\n\n winning_bingo = game.get_winning_bingo()\n sum_unmarked = sum(winning_bingo.get_unmarked_numbers())\n\n solution = last_num * sum_unmarked\n return str(solution)\n\n\nprint(get_solution(input_lines))","repo_name":"MarekChleb/advent-of-code","sub_path":"2021/ex4/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"12725454349","text":"import pandas as pd\nfrom copy import deepcopy\nimport plotly.express as px\nimport plotly.graph_objects as go\nimport plotly\n\nimport plotly.figure_factory as ff\n\n__all__ = [\n \"plot\",\n \"plot_low_high_prices\",\n \"plot_moving_average\",\n \"plot_scatter_matrix\",\n \"plot_dist_returns\",\n \"plot_returns_scatter_matrix\",\n \"plot_cum_return\",\n \"plot_bollinger_bands\",\n \"plot_rsi\",\n \"plot_cum_profits\"\n]\n\n\n\n\n\ndef plot_cum_profits(stock_data:pd.DataFrame, strategy_profit_name:str, params:dict,\n title:str) -> plotly.graph_objects.Figure:\n profits = [stock_data.query(f'stock_name==\"{stock}\"')[[strategy_profit_name, 'stock_name']] for stock in\n params.get(\n 'STOCK_CODES')]\n total_cum_profits = deepcopy(profits[0])\n total_cum_profits = total_cum_profits[~total_cum_profits.index.duplicated()]\n indexes = total_cum_profits.index\n for i in range(1,len(profits)):\n profits_stock = profits[i]\n profits_stock = profits_stock[~profits_stock.index.duplicated()]\n total_cum_profits[strategy_profit_name] += profits_stock[strategy_profit_name]\n\n total_cum_profits_df = pd.DataFrame.from_dict({strategy_profit_name: total_cum_profits[strategy_profit_name],\n 'stock_name': ['Total Strategy Cumulative Profits' for _ in\n range(len(total_cum_profits))]})\n total_cum_profits_df.index = indexes\n cum_profits_data = pd.concat([total_cum_profits_df, *profits])\n cum_profits_data.reset_index(drop=False, inplace=True)\n return plot(cum_profits_data, x='Date', y=strategy_profit_name, title=f\"Cumulative Profits generated by {title} \" \\\n \"strategy\")\n\n\n\n\ndef plot_rsi(stock_data: pd.DataFrame) -> plotly.graph_objects.Figure:\n return plot(stock_data, y=\"RSI\", title=\"Relative Strength Index (RSI)\")\n\n\ndef add_trace_bollinger_bands(\n fig: plotly.graph_objects.Figure, df: pd.DataFrame\n) -> plotly.graph_objects.Figure:\n\n fig.add_trace(\n go.Scatter(\n x=df.index,\n y=df.upper_bound,\n name=\"upper bound\",\n line=dict(color=\"firebrick\", width=1, dash=\"dash\"),\n )\n )\n fig.add_trace(\n go.Scatter(\n x=df.index,\n y=df.lower_bound,\n name=\"lower bound\",\n line=dict(color=\"royalblue\", width=1, dash=\"dash\"),\n )\n )\n fig.add_trace(\n go.Scatter(\n x=df.index, y=df.Close, name=\"Closed\", line=dict(color=\"firebrick\", width=1)\n )\n )\n return fig\n\ndef plot_bollinger_bands(df: pd.DataFrame, name: str) -> plotly.graph_objects.Figure:\n df = df.query(f'stock_name==\"{name}\"')\n fig = go.Figure()\n # Create and style traces\n fig = add_trace_bollinger_bands(fig, df)\n # Edit the layout\n fig.update_layout(\n title=f\"Bollinger Bands and Close {name} stock\",\n xaxis_title=\"Date\",\n yaxis_title=\"Prices\",\n )\n return fig\n\ndef plot_cum_return(stock_data_returns: pd.DataFrame) -> plotly.graph_objects.Figure:\n try:\n return plot(stock_data_returns, y=\"cum_returns\", title=\"Cumulative Returns\")\n except ValueError:\n stock_data_returns = stock_data_returns[~stock_data_returns.index.duplicated()]\n\n return plot(stock_data_returns, y=\"cum_returns\", title=\"Cumulative Returns\")\n\ndef plot_returns_scatter_matrix(stock_data: pd.DataFrame, params:dict, title:str=\"Scatter Matrix for \"\n \"returns\")-> \\\n plotly.graph_objects.Figure:\n\n df_dic = {}\n for stock_name in list(stock_data.stock_name.unique()):\n df = stock_data.query(f'stock_name==\"{stock_name}\"')\n return_stock_name =f'{stock_name.capitalize()} returns'\n df.rename(columns={'returns':return_stock_name}, inplace=True)\n df = df[~df.index.duplicated()]\n df_dic[stock_name] = df\n\n comp = pd.concat(\n [df[f'{stock_name.capitalize()} returns'] for stock_name, df in df_dic.items()], axis=1\n )\n\n\n return px.scatter_matrix(comp, title=title)\n\n\ndef plot_dist_returns(\n stock_data_returns: pd.DataFrame,\n params: dict\n) -> plotly.graph_objects.Figure:\n hist_data = [\n stock_data_returns.query(f'stock_name==\"{stock}\"')[\"returns\"]\n for stock in params.get(\"STOCK_CODES\")\n ]\n group_labels = [stock for stock in params.get(\"STOCK_CODES\")]\n try:\n fig = ff.create_distplot(hist_data, group_labels, bin_size=0.01)\n except ValueError:\n for data in hist_data:\n data.dropna(inplace=True)\n fig = ff.create_distplot(hist_data, group_labels, bin_size=0.01)\n return fig\n\n\ndef plot_scatter_matrix(\n data: dict, params: dict, title=\"Scatter Matrix for Open Prices\"\n) -> plotly.graph_objects.Figure:\n # crypto_comp = pd.concat(\n # [data[stock][\"Open\"] for stock in params.get(\"STOCK_CODES\")], axis=1\n # )\n open_data_dic = {}\n for stock_name, df in data.items():\n open_price_name =f'{stock_name.capitalize()} Open'\n df.rename(columns={'Open':open_price_name}, inplace=True)\n df = df[~df.index.duplicated()]\n open_data_dic[open_price_name] = df\n\n comp = pd.concat(\n [df[open_price_name] for open_price_name, df in open_data_dic.items()], axis=1\n )\n # comp.columns = [\n # f\"{stock.capitalize()} Open\" for stock in params.get(\"STOCK_CODES\")\n # ]\n return px.scatter_matrix(comp, title=title)\n\n\ndef plot(\n data: pd.DataFrame,\n y,\n title=None,\n x=\"date\",\n label=\"stock_name\",\n line_shape=\"spline\",\n render_mode=\"svg\",\n) -> plotly.graph_objects.Figure:\n return px.line(\n data,\n x=x,\n y=y,\n title=title,\n color=label,\n line_group=label,\n hover_name=label,\n line_shape=line_shape,\n render_mode=render_mode,\n )\n\n\ndef add_trace_high_low(\n fig: plotly.graph_objects.Figure, df: pd.DataFrame\n) -> plotly.graph_objects.Figure:\n\n fig.add_trace(\n go.Scatter(\n x=df.index,\n y=df.High,\n name=\"High\",\n line=dict(color=\"firebrick\", width=1, dash=\"dash\"),\n )\n )\n fig.add_trace(\n go.Scatter(\n x=df.index,\n y=df.Low,\n name=\"Low\",\n line=dict(color=\"royalblue\", width=1, dash=\"dash\"),\n )\n )\n fig.add_trace(\n go.Scatter(\n x=df.index, y=df.Open, name=\"Open\", line=dict(color=\"firebrick\", width=1)\n )\n )\n return fig\n\n\ndef add_trace_moving_average(\n fig: plotly.graph_objects.Figure, df: pd.DataFrame\n) -> plotly.graph_objects.Figure:\n\n fig.add_trace(\n go.Scatter(\n x=df.index,\n y=df.MA50,\n name=\"MA50\",\n line=dict(color=\"firebrick\", width=1, dash=\"dash\"),\n )\n )\n fig.add_trace(\n go.Scatter(\n x=df.index,\n y=df.MA200,\n name=\"MA200\",\n line=dict(color=\"royalblue\", width=1, dash=\"dash\"),\n )\n )\n fig.add_trace(\n go.Scatter(\n x=df.index, y=df.Open, name=\"Open\", line=dict(color=\"firebrick\", width=1)\n )\n )\n return fig\n\n\ndef plot_low_high_prices(df: pd.DataFrame, name: str) -> plotly.graph_objects.Figure:\n\n fig = go.Figure()\n # Create and style traces\n fig = add_trace_high_low(fig, df)\n # Edit the layout\n fig.update_layout(\n title=f\"Average High, Low and Open Prices for {name} stock\",\n xaxis_title=\"Date\",\n yaxis_title=\"Prices\",\n )\n return fig\n\n\ndef plot_moving_average(df: pd.DataFrame, name: str) -> plotly.graph_objects.Figure:\n\n df = df.query(f'stock_name==\"{name}\"')\n fig = go.Figure()\n # Create and style traces\n fig = add_trace_moving_average(fig, df)\n # Edit the layout\n fig.update_layout(\n title=f\"Moving Average and Open for {name} stock\",\n xaxis_title=\"Date\",\n yaxis_title=\"Prices\",\n )\n return fig\n","repo_name":"MarinoSanLorenzo/trading_algorithmic","sub_path":"src/frontend/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":8115,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"27142552208","text":"from django.urls import path\n\nfrom main.views import MineView, NewTransationView, TrainView, NodeResolveView, NodeRegisterView\n\nurlpatterns = [\n path('mine/', MineView.as_view()),\n path('transaction/new/', NewTransationView.as_view()),\n path('train/', TrainView.as_view()),\n path('nodes/resolve/', NodeResolveView.as_view()),\n path('nodes/register/', NodeRegisterView.as_view()),\n]\n","repo_name":"sainipray/blockchain-django","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"26999255908","text":"import datetime\nimport pathlib\nimport re\nimport shutil\nfrom pathlib import Path\nfrom typing import Optional, Sequence\n\nimport magic\nfrom loguru import logger\n\nfrom .extractors import (\n ExifImageExtractor,\n ExifToolExtractor,\n Extractor,\n MTimeExtractor,\n RegexExtractor,\n)\nfrom .utils import sha256_file\n\nDEFAULT_IMAGE_EXTRACTORS = [\n ExifImageExtractor(),\n # e.g. '20200101_120101.jpg'\n RegexExtractor(re.compile(r\"(\\d{8}_\\d{6})\"), \"YYYYMMDD_HHmmss\"),\n # e.g. 'Screenshot_20200101-120101_Maps.jpg'\n RegexExtractor(re.compile(r\"Screenshot_(\\d{8}-\\d{6})_\\w.+\"), \"YYYYMMDD-HHmmss\"),\n # e.g. 'IMG-20200101-WA0001.jpg'\n RegexExtractor(re.compile(r\"IMG-(\\d{8})-WA\\d+\"), \"YYYYMMDD\"),\n MTimeExtractor(),\n]\n\nDEFAULT_VIDEO_EXTRACTORS = [\n ExifToolExtractor(),\n # e.g. 'VID-20200101-WA0001.mp4'\n RegexExtractor(re.compile(r\"VID-(\\d{8})-WA\\d+\"), \"YYYYMMDD\"),\n # E.g. # e.g. '20200101_120101.mp4'\n RegexExtractor(re.compile(r\"(\\d{8}_\\d{6})\"), \"YYYYMMDD_HHmmss\"),\n MTimeExtractor(),\n]\n\n\nclass Organizer:\n \"\"\"\n The organizer takes care of actually organizing a source directory to a\n destination.\n\n It uses a sequence of extractors for images and videos to accomplish this task.\n \"\"\"\n\n def __init__(\n self,\n image_extractors: Sequence[Extractor] = (),\n video_extractors: Sequence[Extractor] = (),\n dry_run: bool = False,\n remove_source: bool = False,\n ) -> None:\n \"\"\"Initialize organizer\n\n :param image_extractors: Ordered sequence of date extractors to use for\n images. Dates are extracted in order. I.e, if the 1st extractor manages\n to extract the date for a particular file, we won't even attempt any of\n the next extractors.\n :param video_extractors: Ordered sequence of date extractors to use for\n videos. Dates are extracted in order. I.e, if the 1st extractor manages\n to extract the date for a particular file, we won't even attempt any\n of the next extractors.\n :param dry_run: Whether to perform a dry run. In a dry run some log messages\n are printed, but we won't actually copy over any files.\n :param remove_source: Whether to remove the source path(s) after copying is\n complete. This parameter is ignored when `dry_run` is set to True.\n \"\"\"\n self.image_extractors = image_extractors\n self.video_extractors = video_extractors\n self.dry_run = dry_run\n\n self.remove_source = remove_source if not self.dry_run else False\n\n def extract_date(self, path: pathlib.Path) -> Optional[datetime.date]:\n \"\"\"Extract the date of a single file\n\n :param path: path of file for which we should extract the date.\n :return: Date if file is an image or video and it can be extracted,\n None otherwise.\n \"\"\"\n if is_image(path):\n return self._extract_image_date(path)\n if is_mp4(path):\n return self._extract_video_date(path)\n logger.warning(f\"{path} is not an image or video, skipping...\")\n return None\n\n def _extract_file_date(\n self, path: pathlib.Path, extractors: Sequence[Extractor]\n ) -> Optional[datetime.date]:\n for extractor in extractors:\n logger.debug(f\"Attempting extractor {extractor.__class__.__name__}\")\n extraction = extractor.extract(path)\n if extraction is not None:\n return extraction\n\n logger.info(\"Could not determine date for any extractor.\")\n return None\n\n def _extract_image_date(self, path: pathlib.Path) -> Optional[datetime.date]:\n return self._extract_file_date(path, self.image_extractors)\n\n def _extract_video_date(self, path: pathlib.Path) -> Optional[datetime.date]:\n return self._extract_file_date(path, self.video_extractors)\n\n def organize(self, source: Path, destination: Path) -> None:\n \"\"\"Main organizer\"\"\"\n if source.is_file():\n logger.debug(f\"{source} is a file\")\n self.organize_file(source, destination)\n elif source.is_dir():\n logger.debug(f\"{source} is a directory\")\n self.organize_dir(source, destination)\n else:\n raise NotImplementedError\n\n def organize_file(\n self,\n source: Path,\n destination: Path,\n ) -> None:\n \"\"\"Organize a single file.\"\"\"\n logger.debug(f\"Organizing {source}\")\n date = self.extract_date(source)\n\n if date is None:\n return\n\n dest_dir = create_date_path(destination, date)\n\n dest_path = dest_dir / source.name\n logger.debug(f\"Determined destination path as {dest_path}\")\n dest_dir.mkdir(parents=True, exist_ok=True) # ensure dir exists\n if dest_path.exists():\n logger.warning(f\"{source.name} already exists on destination, skipping\")\n return # skipping, since it already exists.\n logger.info(f\"Copying {source} to {dest_path}\")\n\n # Return early if we are doing a dry run.\n if self.dry_run:\n return\n\n try:\n verify_copy(source, dest_path)\n except ValueError:\n logger.exception(f\"Failed to copy {source} to {dest_path}\")\n raise\n\n if self.remove_source and source.is_file():\n logger.info(f\"Removing {source}\")\n source.unlink() # removing source.\n\n def organize_dir(self, source: Path, destination: Path) -> None:\n \"\"\"\n Recursively organize a directory.\n\n :raises: RuntimeError when trying to process extremely deep directory tree.\n \"\"\"\n logger.debug(f\"Organizing {source}\")\n for item in source.iterdir():\n if item.is_file():\n self.organize_file(item, destination)\n elif item.is_dir():\n # a little recursion\n self.organize_dir(item, destination)\n else:\n # skipping due to don't know how to handle\n continue\n\n if self.remove_source and not self.dry_run:\n try:\n source.rmdir()\n except OSError as error:\n if str(error).startswith(\"[Errno 39]\"):\n # means directory is not empty.\n # should warn that source is unremovable.\n pass\n else:\n raise\n\n\ndef is_image(path: Path) -> bool:\n mimetype = magic.from_file(str(path), mime=True)\n return mimetype.split(\"/\")[0] == \"image\"\n\n\ndef is_mp4(path: Path) -> bool:\n mimetype = magic.from_file(str(path), mime=True)\n return mimetype == \"video/mp4\"\n\n\ndef verify_copy(source: Path, destination: Path) -> None:\n \"\"\"\n Copy a file, verifying that the copied file's contents are identical\n to the source contents.\n\n Will attempt to copy metadata as well, with the caveats listed in:\n https://docs.python.org/3.7/library/shutil.html#shutil.copy2\n\n In case the contents do not match, we will attempt to remove the\n destination if it is a file, after which a ValueError is thrown.\n\n :raises: ValueError in case contents do not match\n :raises: ValueError in case source is not a file\n :raises: ValueError in case destination is a directory.\n :raises: OSError in case file's can't be written.\n \"\"\"\n if not source.is_file():\n raise ValueError(\"Source must be a file\")\n if destination.is_dir():\n raise ValueError(\"Destination may not be a directory\")\n source_sha256 = sha256_file(source)\n copied = Path(shutil.copy2(source, destination))\n dest_sha256 = sha256_file(copied)\n if source_sha256 != dest_sha256:\n if copied.is_file():\n copied.unlink()\n raise ValueError(\"Source' and destination's contents did not match!\")\n\n\ndef create_date_path(root: Path, date: datetime.date) -> Path:\n \"\"\"Create path form a root path and a date.\"\"\"\n return root / Path(str(date.year)) / Path(str(date.month)) / Path(str(date.day))\n","repo_name":"sndrtj/image-date-organizer","sub_path":"src/image_date_organizer/organize.py","file_name":"organize.py","file_ext":"py","file_size_in_byte":8070,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"11786305313","text":"import bisect\nimport gc\nimport glob\nimport random\nimport torch\nfrom others.logging import logger\n\n\nclass Batch(object):\n def _pad(self, data, pad_id, width=-1):\n if (width == -1):\n width = max(len(d) for d in data)\n rtn_data = [d + [pad_id] * (width - len(d)) for d in data]\n return rtn_data\n\n def __init__(self, data=None, device=None, is_test=False, autogressive=False):\n \"\"\"Create a Batch from a list of examples.\"\"\"\n if data is not None:\n self.batch_size = len(data)\n self.autogressive = autogressive\n pre_src = [x[0] for x in data]\n pre_src_mask = [x[1] for x in data]\n pre_state = [x[2] for x in data]\n pre_tgt = [x[3] for x in data]\n pre_auto = [x[4] for x in data]\n pre_len = [x[5] for x in data]\n relations = [x[6] for x in data]\n example_id = [x[7] for x in data]\n\n if not is_test:\n ex_idx, tgt_idx, src, pmt_msk, states, tgt, mask_src, mask_tgt = \\\n self._process(pre_src, pre_src_mask, pre_state, pre_tgt, pre_auto)\n\n setattr(self, 'src', src.to(device))\n setattr(self, 'tgt', tgt.to(device))\n setattr(self, 'pmt_msk', pmt_msk.to(device))\n\n setattr(self, 'states', states)\n setattr(self, 'ex_idx', ex_idx)\n setattr(self, 'tgt_idx', tgt_idx)\n setattr(self, 'tgt_len', sum(pre_len))\n\n setattr(self, 'mask_src', mask_src.to(device))\n setattr(self, 'mask_tgt', mask_tgt.to(device))\n\n setattr(self, 'example_id', example_id)\n else:\n ex_idx, src, pmt_msk, states, mask_src = \\\n self._process_test(pre_src, pre_src_mask, pre_state)\n\n setattr(self, 'src', src.to(device))\n setattr(self, 'pmt_msk', pmt_msk.to(device))\n\n setattr(self, 'states', states)\n setattr(self, 'ex_idx', ex_idx)\n setattr(self, 'relations', relations)\n\n setattr(self, 'mask_src', mask_src.to(device))\n\n src_str = [x[-2] for x in data]\n setattr(self, 'src_str', src_str)\n tgt_str = [x[-1] for x in data]\n setattr(self, 'tgt_str', tgt_str)\n\n setattr(self, 'example_id', example_id)\n\n def _process_test(self, pre_src, pre_mask, pre_state):\n ex_idx = []; tgt_idx = []\n src = []; pmt_msk = []; states = []\n for i in range(len(pre_src)):\n src_ex = pre_src[i]\n mask_ex = pre_mask[i]\n state_ex = pre_state[i]\n step_info = []; s_idx = len(pmt_msk)\n for step in range(len(mask_ex)):\n step_info.append((s_idx, s_idx+len(mask_ex[step])))\n s_idx += len(mask_ex[step])\n pmt_msk.extend(mask_ex[step])\n states.extend(state_ex[step])\n ex_idx.append(step_info)\n src.append(src_ex)\n src = torch.tensor(self._pad(src, 0))\n pmt_msk = torch.tensor(self._pad(pmt_msk, True))\n mask_src = ~(src == 0)\n return ex_idx, src, pmt_msk, states, mask_src\n\n\n def _process(self, pre_src, pre_mask, pre_state, pre_tgt, pre_auto):\n ex_idx = []; tgt_idx = []\n src = []; pmt_msk = []; states = []; tgt = []\n b_tok = pre_auto[0][0][0]\n for i in range(len(pre_src)):\n src_ex = pre_src[i]\n mask_ex = pre_mask[i]\n state_ex = pre_state[i]\n tgt_ex = pre_tgt[i]\n auto_ex = pre_auto[i]\n step_info = []; s_idx = len(pmt_msk)\n for step in range(len(mask_ex)):\n step_info.append((s_idx, s_idx+len(mask_ex[step])))\n s_idx += len(mask_ex[step])\n pmt_msk.extend(mask_ex[step])\n states.extend(state_ex[step])\n if self.autogressive:\n t = auto_ex[step] + tgt_ex[step]\n t_idx = (len(auto_ex[step])-1, len(t)-1)\n else:\n t = [b_tok] + tgt_ex[step]\n t_idx = (1-1, len(t)-1)\n tgt.extend([t for i in range(len(mask_ex[step]))])\n tgt_idx.extend([t_idx for i in range(len(mask_ex[step]))])\n ex_idx.append(step_info)\n src.append(src_ex)\n #print ([len(item) for item in src_ex])\n\n src = torch.tensor(self._pad(src, 0))\n tgt = torch.tensor(self._pad(tgt, 0))\n pmt_msk = torch.tensor(self._pad(pmt_msk, True))\n\n mask_src = ~(src == 0)\n mask_tgt = ~(tgt == 0)\n\n return ex_idx, tgt_idx, src, pmt_msk, states, tgt, mask_src, mask_tgt\n\n\n def __len__(self):\n return self.batch_size\n\n\n\ndef load_dataset(args, corpus_type, shuffle):\n assert corpus_type in [\"train\", \"dev\", \"test\", 'ann']\n\n def _lazy_dataset_loader(pt_file, corpus_type):\n dataset = torch.load(pt_file)\n logger.info('Loading %s dataset from %s, number of examples: %d' %\n (corpus_type, pt_file, len(dataset)))\n return dataset\n\n # Sort the glob output by file name (by increasing indexes).\n pts = sorted(glob.glob(args.data_path + '.' + corpus_type + '.[0-9]*.pt'))\n if pts:\n if (shuffle):\n random.shuffle(pts)\n\n for pt in pts:\n yield _lazy_dataset_loader(pt, corpus_type)\n else:\n # Only one inputters.*Dataset, simple!\n pt = args.data_path + '.' + corpus_type + '.pt'\n yield _lazy_dataset_loader(pt, corpus_type)\n\n\ndef abs_batch_size_fn(new, count):\n src, tgt = new[0], new[1]\n global max_n_sents, max_n_tokens, max_size\n if count == 1:\n max_size = 0\n max_n_sents=0\n max_n_tokens=0\n max_n_sents = max(max_n_sents, len(tgt))\n max_size = max(max_size, max_n_sents)\n src_elements = count * max_size\n if (count > 6):\n return src_elements + 1e3\n return src_elements\n\n\nclass Dataloader(object):\n def __init__(self, args, datasets, batch_size,\n device, shuffle, is_test):\n self.args = args\n self.datasets = datasets\n self.batch_size = batch_size\n self.device = device\n self.shuffle = shuffle\n self.is_test = is_test\n self.cur_iter = self._next_dataset_iterator(datasets)\n assert self.cur_iter is not None\n\n def __iter__(self):\n dataset_iter = (d for d in self.datasets)\n while self.cur_iter is not None:\n for batch in self.cur_iter:\n yield batch\n self.cur_iter = self._next_dataset_iterator(dataset_iter)\n\n def _next_dataset_iterator(self, dataset_iter):\n try:\n # Drop the current dataset for decreasing memory\n if hasattr(self, \"cur_dataset\"):\n self.cur_dataset = None\n gc.collect()\n del self.cur_dataset\n gc.collect()\n\n self.cur_dataset = next(dataset_iter)\n except StopIteration:\n return None\n\n return DataIterator(args = self.args,\n dataset=self.cur_dataset, batch_size=self.batch_size,\n device=self.device, shuffle=self.shuffle, is_test=self.is_test)\n\n\nclass DataIterator(object):\n def __init__(self, args, dataset, batch_size, device=None, is_test=False,\n shuffle=True):\n self.args = args\n self.batch_size, self.is_test, self.dataset = batch_size, is_test, dataset\n self.iterations = 0\n self.device = device\n self.shuffle = shuffle\n\n self.sort_key = lambda x: len(x[1])\n\n self._iterations_this_epoch = 0\n self.batch_size_fn = abs_batch_size_fn\n\n def data(self):\n if self.shuffle:\n random.shuffle(self.dataset)\n xs = self.dataset\n return xs\n\n def preprocess(self, ex, is_test):\n src = ex['src']\n src_mask = ex['src_mask']\n relations = ex['relations']\n comb_rels = ex[\"comb_rels\"]\n tgt = ex['tgt']\n tgt_atg = ex['tgt_atg']\n tgt_len = ex['tgt_len']\n example_id = ex['example_id']\n\n src_txt = ex['src_txt']\n tgt_txt = ex['tgt_txt']\n\n if(is_test):\n return src, src_mask, comb_rels, tgt, tgt_atg, tgt_len, relations, example_id, src_txt, tgt_txt\n else:\n return src, src_mask, comb_rels, tgt, tgt_atg, tgt_len, relations, example_id\n\n def batch_buffer(self, data, batch_size):\n minibatch, size_so_far = [], 0\n for ex in data:\n if(len(ex['src'])==0):\n continue\n ex = self.preprocess(ex, self.is_test)\n if(ex is None):\n continue\n minibatch.append(ex)\n size_so_far = self.batch_size_fn(ex, len(minibatch))\n if size_so_far == batch_size:\n yield minibatch\n minibatch, size_so_far = [], 0\n elif size_so_far > batch_size:\n yield minibatch[:-1]\n minibatch, size_so_far = minibatch[-1:], self.batch_size_fn(ex, 1)\n if minibatch:\n yield minibatch\n\n def batch(self, data, batch_size):\n \"\"\"Yield elements from data in chunks of batch_size.\"\"\"\n minibatch, size_so_far = [], 0\n for ex in data:\n minibatch.append(ex)\n size_so_far = self.batch_size_fn(ex, len(minibatch))\n if size_so_far == batch_size:\n yield minibatch\n minibatch, size_so_far = [], 0\n elif size_so_far > batch_size:\n yield minibatch[:-1]\n minibatch, size_so_far = minibatch[-1:], self.batch_size_fn(ex, 1)\n if minibatch:\n yield minibatch\n\n def create_batches(self):\n \"\"\" Create batches \"\"\"\n data = self.data()\n for buffer in self.batch_buffer(data, self.batch_size * 300):\n\n p_batch = sorted(buffer, key=lambda x: len(x[2]))\n p_batch = sorted(p_batch, key=lambda x: len(x[1]))\n p_batch = self.batch(p_batch, self.batch_size)\n\n p_batch = list(p_batch)\n if (self.shuffle):\n random.shuffle(p_batch)\n for b in p_batch:\n if(len(b)==0):\n continue\n yield b\n\n def __iter__(self):\n while True:\n self.batches = self.create_batches()\n for idx, minibatch in enumerate(self.batches):\n # fast-forward if loaded from state\n if self._iterations_this_epoch > idx:\n continue\n self.iterations += 1\n self._iterations_this_epoch += 1\n batch = Batch(minibatch, self.device, self.is_test, self.args.autogressive)\n\n yield batch\n return\n\n\n","repo_name":"XinnuoXu/AggGen","sub_path":"src/models/data_hmm.py","file_name":"data_hmm.py","file_ext":"py","file_size_in_byte":10892,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"}
+{"seq_id":"10682953487","text":"from libqtile import bar, qtile, widget\nfrom libqtile.config import Screen\n\nfrom config_variables._helpers import Helpers\nfrom config_variables.conf import Config, WidgetsConfig\n\n# WIDGETS\nwidget_defaults = WidgetsConfig.DEFAULT\nSEP = widget.Sep(**WidgetsConfig.SEP)\nSPACER = widget.Spacer(length=bar.STRETCH)\nCLOCK = widget.Clock(**WidgetsConfig.CLOCK)\n\n# BARS AND SCREENS\nprimary_bar = bar.Bar(\n [\n widget.GroupBox(**WidgetsConfig.GROUP_BOX),\n widget.CurrentLayout(**WidgetsConfig.LAYOUT),\n SPACER,\n widget.MemoryGraph(**WidgetsConfig.MEMORY_GRAPH),\n SEP,\n widget.CPUGraph(**WidgetsConfig.CPU_GRAPH),\n SEP,\n widget.ThermalSensor(**WidgetsConfig.CPU_TEMP),\n SEP,\n widget.CheckUpdates(**WidgetsConfig.UPDATES),\n SEP,\n widget.Wlan(**WidgetsConfig.WLAN),\n SEP,\n widget.Battery(**WidgetsConfig.BATTERY),\n SEP,\n widget.Systray(),\n CLOCK,\n ],\n Config.BAR_SIZE,\n **Config.BAR,\n)\n\nsecondary_bar = bar.Bar(\n [\n widget.GroupBox(**WidgetsConfig.GROUP_BOX),\n CLOCK,\n ],\n Config.BAR_SIZE,\n **Config.BAR,\n)\n\nconnected_monitors = len(qtile.core.outputs)\nscreens = [\n Screen(\n bottom=primary_bar if i == 0 else secondary_bar,\n wallpaper=Helpers.get_random_wallpaper(),\n wallpaper_mode=\"fill\",\n )\n for i in range(connected_monitors)\n]\n","repo_name":"gontzalm/dotfiles","sub_path":".config/qtile/config_variables/screens.py","file_name":"screens.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"24659660775","text":"\n\n'''\n\n\n와 이거 풀이 신박,,\n\nvisetd를 중복하게 기록해서 결국의 경로의 수를 알아낸다!\n\n\n\n'''\n\n\nimport sys\nlimit_number = 30000\nsys.setrecursionlimit(limit_number)# 재귀 횟수 제한 해제\n\ndef dfs(row, col) : # row, col <- 현재 점의 위치(now)\n # 반환값 : dp값(현재 좌표에서 도착지까지의 값(경로의 수) )\n # now -> next\n if row == h-1 and col == w-1:\n return 1# 원하는 목적지까지 왔으면 더 갈 필요 없이 끝!\n if dp[row][col] != -1:\n # 계산한 결과가 있다!\n return dp[row][col]\n # 상하좌우\n dr = [-1,1,0,0]\n dc = [0,0,-1,1]\n ret = 0 # row, col에서 도착지까지 경로의 수\n for i in range(4):\n next_row = row + dr[i]\n next_col = col + dc[i]\n if next_row < 0 or next_col < 0 or next_row >= h or next_col >= w:\n continue # 맵을 벗어나는 좌표이면 무시\n if MAP[row][col] <= MAP[next_row][next_col]:\n continue # 내리막길로만 가야 하니, 더 높거나 같으면 갈 수 없다.\n visited[next_row][next_col] = visited[next_row][next_col] + 1\n ret += dfs(next_row, next_col)\n # next방향으로 갈때의 경로 개수를 받아서 누적\n dp[row][col] = ret # 결과 기록\n return ret\n\nh, w = map(int, input().split())\nMAP = [ list(map(int, input().split())) for _ in range(h)]\nvisited = [[0] * w for _ in range(h)] # 갔던 점을 기록\ndp = [[-1] * w for _ in range(h)]\n# dp[row][col] : row,col에서부터 목적지까지의 경로 개수\n# 0도 계산된 값일 수 있으니 -1로 초기화(-1 : 아직 계산해보지 않은 값이라는 의미)\n\nvisited[0][0] = 1\nprint(dfs(0,0))\n\nde = 1","repo_name":"joojeehwan/algo_jjh","sub_path":"0820/1520 내리막길.py","file_name":"1520 내리막길.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"28684072938","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function # (at top of module)\nimport sys\nimport re\nimport os\nfrom jsmin import jsmin\nfrom bs4 import BeautifulSoup\nfrom wiki_login import login\n\nWIKI_API_MAIN = \"http://wiki.tuniu.org\"\nWIKI_SHOW_CHILDREN_SUFFIX = \"&showChildren=true\"\nWIKI_API_ROOT_PAGE = WIKI_API_MAIN + \"/pages/viewpage.action?pageId=71367772\" + WIKI_SHOW_CHILDREN_SUFFIX\nHREF_REGEX = r'href=\"(.*)\"'\nURI_OUTPUT_DIR = '../data/'\n\nuri_set = set()\n\nsuffix_len = len(WIKI_SHOW_CHILDREN_SUFFIX)\n\n\n# 爬到所有有接口数据的html页面, 保存到文件里\ndef dfs_html(browser, uri):\n response = browser.open(uri)\n html_content = response.read()\n response.close()\n soup = BeautifulSoup(html_content, 'html.parser')\n spans = soup.findAll('span', attrs={'class': 'child-display'})\n if not spans:\n real_data_link = uri[0:-suffix_len]\n print(real_data_link)\n uri_set.add(real_data_link)\n return\n for span in spans:\n hrefs = span.findAll('a')\n if not hrefs:\n break\n for href in hrefs:\n dir_link = WIKI_API_MAIN + re.findall(HREF_REGEX, str(href))[0] + WIKI_SHOW_CHILDREN_SUFFIX\n dfs_html(browser, dir_link)\n\n\nif __name__ == '__main__':\n br = login()\n dfs_html(br, WIKI_API_ROOT_PAGE)\n output_dir = URI_OUTPUT_DIR\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n uri_file = output_dir + 'api_uri.txt'\n file_uri = open(uri_file, 'w')\n for uri in uri_set:\n file_uri.write(uri + '\\n')\n","repo_name":"LionelWei/scrap_api_to_model","sub_path":"src/html_scrap.py","file_name":"html_scrap.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"26808136965","text":"import json\nimport logging\nfrom io import BytesIO\nfrom random import choices, shuffle\n\nfrom compass import Attribute, CardData, Rarity\nfrom discord import File, app_commands\nfrom discord.ext import commands\nfrom discord.ext.commands import Bot, Context\n\nfrom .base import Cog\nfrom .path import path\nfrom .translator import locale_str as _\n\n\nlogger = logging.getLogger(__name__)\n\n\nasync def setup(bot: Bot) -> None:\n await bot.add_cog(Gacha(bot))\n\n\nasync def teardown(bot: Bot) -> None:\n await bot.remove_cog(\"Gacha\")\n\n\n# prepares choices for name argument\nwith open(path.gacha_json, \"r\") as f:\n gacha_data = json.load(f)\n\ngacha_list = [app_commands.Choice(name=_(data[\"name\"]), value=idx)\n for idx, data in enumerate(gacha_data)]\n\n\nclass Gacha(Cog):\n def __init__(self, bot: Bot) -> None:\n super().__init__(bot, logger)\n self.data = CardData()\n\n @commands.hybrid_command(\n description = _(\"ガチャシミュレーター\"),\n )\n @app_commands.describe(\n name = _(\"シミュレートするガチャの名前を指定してね!\"),\n )\n @app_commands.choices(name=gacha_list)\n async def gacha(self, ctx: Context, name: int) -> None:\n await ctx.defer()\n\n data = gacha_data[name]\n cards = CardData([])\n\n rarities = choices([*data[\"weight\"]], [*data[\"weight\"].values()], k=data[\"k\"])\n\n for rarity in [*data[\"weight\"]]:\n population = CardData([])\n weights = []\n k = sum(el==rarity for el in rarities)\n for condition in data[rarity]:\n args = list(map(lambda el: Attribute(el), condition[\"attributes\"])) \\\n + list(map(lambda el: Rarity(el), condition[\"rarities\"]))\n tmp = self.data.get_cards(*args, **condition[\"kwargs\"], themes=condition[\"themes\"])\n population.extend(tmp)\n weights.extend([condition[\"weight\"]]*len(tmp))\n cards.extend(choices(population, weights, k=k))\n\n shuffle(cards)\n cards = CardData(sorted(cards, key=lambda card: card.rarity))\n\n img = cards.generate_large_image()\n\n image_bytes = BytesIO()\n img.save(image_bytes, \"PNG\", quality=100, optimize=True)\n image_bytes.seek(0)\n\n await ctx.send(file=File(fp=image_bytes, filename=f\"{ctx.author.id}.png\"))\n return\n","repo_name":"ster-phys/bot_cps","sub_path":"bot_cps/gacha.py","file_name":"gacha.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"}
+{"seq_id":"10279270571","text":"import os\nimport sys\nimport glob\nimport argparse\nimport tarfile\nimport tempfile\nimport logging\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom astropy.io import fits\n\nfrom paths import ARCHIVE_PATH, OUTPUT_PATH\n\nlog = logging.getLogger(__name__)\npd.options.mode.chained_assignment = None\n\nqd_map = {\n 0: 2009131105131,\n 1: 2009166043257,\n 2: 2009259160929,\n 3: 2009350155506,\n 4: 2010078095331,\n 5: 2010174085026,\n 6: 2010265121752,\n 7: 2010355172524,\n 8: 2011073133259,\n 9: 2011177032512,\n 10: 2011271113734,\n 11: 2012004120508,\n 12: 2012088054726,\n 13: 2012179063303,\n 14: 2012277125453,\n 15: 2013011073258,\n 16: 2013098041711,\n 17: 2013131215648,\n}\n\n\ndef do_lookup_table(\n folder=\"0007\",\n quarter=5,\n fits_path=f\"{ARCHIVE_PATH}/data/kepler/tpf\",\n tar_archive=True,\n quiet=False,\n):\n\n if not tar_archive:\n print(\n \"%s/%s/*/kplr*-%s_lpd-targ.fits.gz\"\n % (fits_path, folder, str(qd_map[quarter]))\n )\n tpfs_ = np.sort(\n glob.glob(\n \"%s/%s/*/kplr*-%s_lpd-targ.fits.gz\"\n % (fits_path, folder, str(qd_map[quarter]))\n )\n )\n log.info(f\"Total number of TPFs in {folder}: {tpfs_.shape[0]}\")\n if len(tpfs_) == 0:\n raise ValueError(\"No TPFs for selected quarter %i\" % quarter)\n\n tpfs, channels, quarters, ras, decs, cols, rows = np.array(\n [\n [\n f.split(\"tpf/\")[-1],\n fits.getheader(f, ext=0)[\"CHANNEL\"],\n fits.getheader(f, ext=0)[\"QUARTER\"],\n fits.getheader(f, ext=0)[\"RA_OBJ\"],\n fits.getheader(f, ext=0)[\"DEC_OBJ\"],\n fits.getheader(f, ext=1)[\"1CRV5P\"],\n fits.getheader(f, ext=1)[\"2CRV5P\"],\n ]\n for f in tpfs_\n ]\n ).T\n else:\n tarlist = np.sort(glob.glob(\"%s/%s/%s_*.tar\" % (fits_path, folder, folder)))\n log.info(f\"Total number of tarballs in {folder}/: {tarlist.shape[0]}\")\n if len(tarlist) == 0:\n raise ValueError(f\"No TPFs for selected folder {folder}\")\n tpfs, channels, quarters, ras, decs, cols, rows = [], [], [], [], [], [], []\n with tempfile.TemporaryDirectory(prefix=\"temp_fits\") as tmpdir:\n for tarf in tqdm(tarlist, desc=\"Reading headers\", disable=quiet):\n kic = tarf.split(\".\")[0].split(\"_\")[-1]\n fname = f\"{kic[:4]}/{kic}/kplr{kic}-{qd_map[quarter]}_lpd-targ.fits.gz\"\n try:\n tarfile.open(tarf, mode=\"r\").extract(fname, tmpdir)\n except KeyError:\n continue\n except tarfile.ReadError:\n log.info(f\"tar file fail {tarf}\")\n continue\n tpfs.append(fname)\n header = fits.getheader(f\"{tmpdir}/{fname}\", ext=0)\n channels.append(header[\"CHANNEL\"])\n quarters.append(header[\"QUARTER\"])\n ras.append(header[\"RA_OBJ\"])\n decs.append(header[\"DEC_OBJ\"])\n header = fits.getheader(f\"{tmpdir}/{fname}\", ext=1)\n cols.append(header[\"1CRV5P\"])\n rows.append(header[\"2CRV5P\"])\n\n df = pd.DataFrame(\n [tpfs, quarters, channels, ras, decs, cols, rows],\n index=[\"file_name\", \"quarter\", \"channel\", \"ra\", \"dec\", \"col\", \"row\"],\n ).T\n df.channel = df.channel.astype(np.int8)\n df.quarter = df.quarter.astype(np.int8)\n\n dir_name = f\"{OUTPUT_PATH}/support/\"\n if not os.path.isdir(dir_name):\n os.makedirs(dir_name)\n file_name = \"%s/kepler_tpf_map_%s_q%02i%s.csv\" % (\n dir_name,\n folder,\n quarter,\n \"_tar\" if tar_archive else \"\",\n )\n df.to_csv(file_name)\n\n\ndef concatenate(quarter, tar_archive=True):\n\n log.info(\"Concatenating all lookup tables...\")\n f_list = np.sort(\n glob.glob(\n \"%s/support/kepler_tpf_map_*_q%02i%s.csv\"\n % (OUTPUT_PATH, quarter, \"_tar\" if tar_archive else \"\")\n )\n )\n if len(f_list) == 0:\n raise FileExistsError(\"No files to concatenate\")\n dfs = pd.concat([pd.read_csv(f, index_col=0) for f in f_list], axis=0)\n\n file_name = \"%s/support/kepler_tpf_map_q%02i%s.csv\" % (\n OUTPUT_PATH,\n quarter,\n \"_tar\" if tar_archive else \"\",\n )\n log.info(f\"Output file: {file_name}\")\n dfs.reset_index(drop=True).to_csv(file_name)\n for f in f_list:\n os.remove(f)\n\n\ndef sort_tpfs_in_all_channel(quarter, tar_archive=True, ncols_start=4):\n\n file_name = \"%s/support/kepler_tpf_map_q%02i%s.csv\" % (\n OUTPUT_PATH,\n quarter,\n \"_tar\" if tar_archive else \"\",\n )\n lkp_tbl = pd.read_csv(file_name, index_col=0)\n\n bins = [5, 4, 3, 2, 1]\n sorted_lkp_tbl = []\n log.info(f\"Working with Quarter {quarter}\")\n for ch in tqdm(range(1, 85), total=84, disable=False):\n files_in = lkp_tbl.query(\"channel == %i and quarter == %i\" % (ch, quarter))\n if len(files_in) == 0:\n continue\n log.info(f\"Channel {ch} total TPFS {len(files_in)}\")\n if len(files_in) < 1500:\n ncols = ncols_start - 1\n if len(files_in) < 550:\n ncols = 2\n else:\n ncols = ncols_start\n log.info(f\"Ncols {ncols}\")\n bn = ncols\n sorted_ch = []\n col_size = 1112 // bn\n row_size = 1044 // bn\n bn_row_org = np.arange(bn)\n bn_col = np.arange(bn)\n for i, x in enumerate(range(bn)):\n if i % 2 == 1:\n bn_row = bn_row_org[::-1]\n else:\n bn_row = bn_row_org\n for y in range(bn):\n\n in_cell = files_in.query(\n f\"col >= {bn_col[x]*col_size} and col <= {(bn_col[x]+1)*col_size} and \"\n f\"row >= {bn_row[y]*row_size} and row <= {(bn_row[y]+1)*row_size}\"\n )\n sorted_ch.append(in_cell.sort_values([\"row\"], ascending=i % 2 == 0))\n\n sorted_ch = pd.concat(sorted_ch).reset_index(drop=True).drop_duplicates()\n\n df_with_batch = sort_tpfs_in_channel(sorted_ch, ncols=ncols, batch_size=200)\n sorted_lkp_tbl.append(df_with_batch)\n log.info(\"####\" * 10)\n\n sort_tpfs_in_all_channel = (\n pd.concat(sorted_lkp_tbl).reset_index(drop=True).drop_duplicates()\n )\n if sort_tpfs_in_all_channel.shape[0] != lkp_tbl.shape[0]:\n raise RuntimeError(\"Missing TPFs\")\n sort_tpfs_in_all_channel.to_csv(file_name.replace(\".csv\", \"_new.csv\"))\n\n return\n\n\ndef do_batches_in_col(df, batch_size=200, tolerance=0.5):\n\n if len(df) >= 170:\n left = len(df) % batch_size\n\n if left / batch_size < 0.1:\n pass\n elif left / batch_size < tolerance:\n while (len(df) % batch_size) / batch_size > 0.1:\n batch_size += 1\n elif left / batch_size > tolerance:\n while (len(df) % batch_size) / batch_size > 0.1 and batch_size > 170:\n batch_size -= 1\n tot_b = len(df) // batch_size\n else:\n batch_size = len(df)\n tot_b = 1\n\n log.info(f\"Batch size and total in column {batch_size} {tot_b}\")\n aux = np.zeros(len(df))\n batch_index = np.hstack([np.ones(batch_size) * (k + 1) for k in range(tot_b)])\n aux[: len(batch_index)] = batch_index\n aux[aux == 0] = np.max(batch_index)\n df.loc[:, \"batch\"] = aux\n\n return df\n\n\ndef sort_tpfs_in_channel(df, ncols=4, batch_size=200):\n if len(df) >= 400:\n col_lims = np.linspace(0, 1112, ncols + 1)\n else:\n col_lims = np.array([0, np.median(df.col), 1113])\n sort_new = []\n prev_batch = 0\n for x in range(len(col_lims) - 1):\n in_col = df.query(f\"col >= {col_lims[x]} and col < {col_lims[x + 1]}\")\n log.info(f\"TPFs in column {x+1} {len(in_col)}\")\n in_col_sorted = do_batches_in_col(in_col, batch_size=batch_size)\n aux = in_col_sorted[\"batch\"].max()\n in_col_sorted.loc[:, \"batch\"] += prev_batch\n sort_new.append(in_col_sorted)\n\n prev_batch += aux\n\n return pd.concat(sort_new, axis=0).reset_index(drop=True)\n\n\ndef how_many_batches(quarter, batch_size):\n file_name = \"%s/support/kepler_tpf_map_all_q%02i.csv\" % (OUTPUT_PATH, quarter)\n df = pd.read_csv(file_name, index_col=0)\n\n channels = np.arange(1, 85)\n number_batch, nsources = [], []\n for ch in channels:\n in_channel = df.query(\"channel == %i\" % ch)\n nsources.append(in_channel.shape[0])\n number_batch.append(int(np.ceil(in_channel.shape[0] / batch_size)))\n df_nb = pd.DataFrame(\n np.vstack([channels, nsources, number_batch]).T,\n columns=[\"channel\", \"n_sources\", \"n_batch\"],\n )\n\n file_name = \"%s/support/kepler_tpf_nbatches_bs%03i_q%02i.csv\" % (\n OUTPUT_PATH,\n batch_size,\n quarter,\n )\n df_nb.set_index(\"channel\").to_csv(file_name)\n\n\ndef how_many_tpfs(tar_archive=True):\n df = pd.DataFrame(\n np.zeros((18, 84), dtype=int), index=np.arange(0, 18), columns=np.arange(1, 85)\n )\n for q in df.index:\n file_name = \"%s/support/kepler_tpf_map_q%02i%s.csv\" % (\n OUTPUT_PATH,\n q,\n \"_tar\" if tar_archive else \"\",\n )\n if not os.path.isfile(file_name):\n log.info(f\"Warning: no file map for quarter {q}\")\n continue\n map = pd.read_csv(file_name, index_col=0)\n for ch in df.columns:\n df.loc[q, ch] = map.query(f\"channel == {ch}\").shape[0]\n\n file_name = \"%s/support/kepler_ntpf_qch.csv\" % (OUTPUT_PATH)\n df.to_csv(file_name)\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(\n description=\"Create lookup tables with FITS file path and creates batches\"\n )\n parser.add_argument(\n \"--folder\",\n dest=\"folder\",\n type=str,\n default=\"0007\",\n help=\"First level folder name of Kepler archive directory.\",\n )\n parser.add_argument(\n \"--quarter\",\n dest=\"quarter\",\n type=int,\n default=5,\n help=\"First level folder name of Kepler archive directory.\",\n )\n parser.add_argument(\n \"--path\",\n dest=\"path\",\n type=str,\n default=\"/Volumes/jorge-marpa-personal/work/data/kepler/tpf\",\n help=\"Kepler archive path.\",\n )\n parser.add_argument(\n \"--batch-size\",\n dest=\"batch_size\",\n type=int,\n default=200,\n help=\"Batch size\",\n )\n parser.add_argument(\n \"--concat\",\n dest=\"concat\",\n action=\"store_true\",\n default=False,\n help=\"Concatenate all lookup tables in a quarter.\",\n )\n parser.add_argument(\n \"--sort\",\n dest=\"sort\",\n action=\"store_true\",\n default=False,\n help=\"Sort TPFs.\",\n )\n parser.add_argument(\n \"--sum-tpfs\",\n dest=\"sum_tpfs\",\n action=\"store_true\",\n default=False,\n help=\"Computen number of batches per channel/quarter.\",\n )\n parser.add_argument(\n \"--tar-tpfs\",\n dest=\"tar_archive\",\n action=\"store_true\",\n default=False,\n help=\"Is archive in tarball files.\",\n )\n parser.add_argument(\"--log\", dest=\"log\", default=0, help=\"Logging level\")\n args = parser.parse_args()\n # set verbose level for logger\n try:\n args.log = int(args.log)\n except:\n args.log = str(args.log.upper())\n FORMAT = \"%(filename)s:%(lineno)s : %(message)s\"\n h2 = logging.StreamHandler(sys.stderr)\n h2.setFormatter(logging.Formatter(FORMAT))\n log.addHandler(h2)\n log.setLevel(args.log)\n log.info(vars(args))\n\n if args.concat:\n concatenate(args.quarter, tar_archive=args.tar_archive)\n sort_tpfs_in_all_channel(\n args.quarter, tar_archive=args.tar_archive, ncols_start=4\n )\n elif args.sort:\n sort_tpfs_in_all_channel(\n args.quarter, tar_archive=args.tar_archive, ncols_start=4\n )\n elif args.sum_tpfs:\n how_many_tpfs(tar_archive=args.tar_archive)\n else:\n do_lookup_table(\n folder=args.folder,\n quarter=args.quarter,\n fits_path=args.path,\n tar_archive=args.tar_archive,\n quiet=True if args.log in [0, \"0\", \"NOTSET\"] else False,\n )\n log.info(\"Done!\")\n","repo_name":"jorgemarpa/kepler-workflow","sub_path":"kepler_workflow/make_archive_lookup_table.py","file_name":"make_archive_lookup_table.py","file_ext":"py","file_size_in_byte":12418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"7969926997","text":"import urllib.request\nimport sys\nfrom datetime import date\ndefault_date=date(int(\"2019\"),int(\"01\"),int(\"01\")) #Python3 can't take 0 as the first char here due to octal interpretation\ndefault_equivalence=1546300800 \nprint(\"Welcome! Keep Starting-Ending dates and Stock Tickers handy as we proceed :) \\n\")\nfor i in (0,2):\n if i==0:\n input_taken='Starting'\n else:\n input_taken='Ending'\n \n rawInput=input(\"Enter the {} date in the YYYY-MM-DD format with no spaces between the hiphens: \".format(input_taken))\n try: \n YYYY,MM,DD=map(int,rawInput.split('-'))\n if i==False:\n starting_date=date(YYYY,MM,DD)\n delta=starting_date-default_date\n starting_equivalence=default_equivalence+(86400*delta.days) #This can be changed by yahoo to avoid scrapers\n else: #Just let me know if that happens we can compute it again ;)\n ending_date=date(YYYY, MM, DD)\n delta=ending_date-default_date\n ending_equivalence=default_equivalence+(86400*delta.days+86400)\n except:\n print(\"The format of the entered date was incorrect, the program terminates here. \\n\")\n sys.exit()\n\nif starting_equivalence>ending_equivalence: #You actually deserve to be confused by a HTTPS bad request error here. But I'm a good guy :)\n print(\"Starting Date cannot be after ending date \\n\")\n sys.exit()\n\nticker=input(\"Nice, now type in the ticker for the stock (All caps): \")\n\nurl=\"\"\"https://query1.finance.yahoo.com/v7/finance/download/\"\"\"+ticker+\"\"\"?period1=\"\"\"+str(starting_equivalence)+\"\"\"&period2=\"\"\"+str(ending_equivalence)+\"\"\"&interval=1d&events=history\"\"\"\n\nfile_name=input(\"What should we name the the downloaded csv file? (File name should have a .csv extension & Enter exact path if this isn't the desired download directory): \")\n\ntry: \n urllib.request.urlretrieve(url,file_name) \n print(\"Downloaded Successfully! \\n\")\nexcept:\n print(\"Something Went wrong, I'd request you to try again and recheck your ticker.\") #Invalid ticker, file name without csv and unstable internet connections are the possible issues here.\n","repo_name":"pruhnuhv/Historical-Stock-Data","sub_path":"Getdata.py","file_name":"Getdata.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"}
+{"seq_id":"25956111568","text":"import sys\nfrom os.path import join, abspath, dirname\n\n# PATH vars\n\nhere = lambda *x: join(abspath(dirname(__file__)), *x)\nPROJECT_ROOT = here(\"..\")\nroot = lambda *x: join(abspath(PROJECT_ROOT), *x)\nrepo_root = lambda *x: join(abspath(here(\"../..\")), *x)\n\nsys.path.insert(0, root(\"apps\"))\n\n\nDEBUG = True\n\nADMINS = ()\n\nMANAGERS = ADMINS\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.contrib.gis.db.backends.postgis\",\n \"NAME\": \"polling_stations\",\n \"USER\": \"postgres\",\n \"PASSWORD\": \"\",\n \"HOST\": \"\", # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.\n \"PORT\": \"\", # Set to empty string for default.\n }\n}\n\nimport dj_database_url\n\nDATABASES[\"default\"] = dj_database_url.config()\nDATABASES[\"default\"][\"ENGINE\"] = \"django.contrib.gis.db.backends.postgis\"\n\n# Hosts/domain names that are valid for this site; required if DEBUG is False\n# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts\nALLOWED_HOSTS = []\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# In a Windows environment this must be set to your system time zone.\nTIME_ZONE = \"Europe/London\"\n\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = False\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = True\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = True\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/var/www/example.com/media/\"\nMEDIA_ROOT = root(\"assets\", \"uploads\")\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://media.lawrence.com/media/\", \"http://example.com/media/\"\nMEDIA_URL = \"/media/\"\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_ROOT = root(\"static\")\n\n# URL prefix for static files.\n# Example: \"http://media.lawrence.com/static/\"\nSTATIC_URL = \"/static/\"\n\n# Additional locations of static files\nSTATICFILES_DIRS = (root(\"assets\"), root(\"../node_modules\"))\n\nfrom .static_files import * # noqa\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = \"asdasdasdasdasdasdasd\"\n\nMIDDLEWARE = (\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n # 'django.middleware.csrf.CsrfViewMiddleware',\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"data_finder.middleware.UTMTrackerMiddleware\",\n \"whitelabel.middleware.WhiteLabelMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"pollingstations.middleware.BasicAuthMiddleware\",\n)\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"APP_DIRS\": True,\n \"DIRS\": [root(\"templates\")],\n \"OPTIONS\": {\n \"debug\": DEBUG,\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.i18n\",\n \"django.template.context_processors.media\",\n \"django.template.context_processors.static\",\n \"django.template.context_processors.request\",\n \"django.template.context_processors.tz\",\n \"django.contrib.messages.context_processors.messages\",\n \"django.contrib.auth.context_processors.auth\",\n \"dc_theme.context_processors.dc_theme_context\",\n \"dc_signup_form.context_processors.signup_form\",\n \"feedback.context_processors.feedback_form\",\n \"bug_reports.context_processors.bug_report_form\",\n \"pollingstations.context_processors.google_analytics\",\n \"pollingstations.context_processors.global_settings\",\n \"whitelabel.context_processors.base_template\",\n ],\n },\n }\n]\n\nROOT_URLCONF = \"polling_stations.urls\"\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = \"polling_stations.wsgi.application\"\n\nINSTALLED_APPS = (\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.humanize\",\n \"django.contrib.sessions\",\n \"django.contrib.sites\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.admin\",\n \"django.contrib.gis\",\n \"rest_framework\",\n \"rest_framework.authtoken\",\n \"rest_framework_gis\",\n \"raven.contrib.django.raven_compat\",\n \"django_extensions\",\n \"markdown_deux\",\n \"corsheaders\",\n \"pipeline\",\n \"dc_signup_form\",\n \"apiblueprint_view\",\n)\n\nPROJECT_APPS = (\n \"addressbase\",\n \"api\",\n \"councils\",\n \"data_collection\",\n \"data_finder\",\n \"dc_theme\",\n \"feedback\",\n \"file_uploads\",\n \"pollingstations\",\n \"bug_reports\",\n \"uk_geo_utils\",\n \"whitelabel\",\n)\n\nINSTALLED_APPS += PROJECT_APPS\n\nSESSION_ENGINE = \"django.contrib.sessions.backends.signed_cookies\"\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"filters\": {\n \"require_debug_false\": {\"()\": \"django.utils.log.RequireDebugFalse\"},\n \"ignore_status_checks\": {\"()\": \"pollingstations.filters.StatusCheckFilter\"},\n },\n \"handlers\": {\n \"mail_admins\": {\n \"level\": \"ERROR\",\n \"filters\": [\"require_debug_false\", \"ignore_status_checks\"],\n \"class\": \"django.utils.log.AdminEmailHandler\",\n },\n \"null\": {\"class\": \"logging.NullHandler\"},\n \"sentry\": {\n \"level\": \"ERROR\",\n \"class\": \"raven.contrib.django.raven_compat.handlers.SentryHandler\",\n },\n },\n \"loggers\": {\n # Silence DisallowedHost exception by setting null error handler - see\n # https://docs.djangoproject.com/en/1.8/topics/logging/#django-security\n \"django.security.DisallowedHost\": {\"handlers\": [\"null\"], \"propagate\": False},\n \"file_uploads.views\": {\n \"handlers\": [\"sentry\"],\n \"level\": \"ERROR\",\n \"propagate\": True,\n },\n \"django.request\": {\n \"handlers\": [\"mail_admins\"],\n \"level\": \"ERROR\",\n \"propagate\": True,\n },\n },\n}\n\n\nLANGUAGE_CODE = \"en\"\nLANGUAGES = [(\"en\", \"English\"), (\"cy-gb\", \"Welsh\")]\nUSE_I18N = (True,)\nUSE_L10N = (True,)\nLOCALE_PATHS = (repo_root(\"locale\"),)\n\n\nLOGIN_REDIRECT_URL = \"file_uploads:councils_list\"\nLOGOUT_REDIRECT_URL = \"home\"\n\n\n# API Settings\nREST_FRAMEWORK = {\n # Use Django's standard `django.contrib.auth` permissions,\n # or allow read-only access for unauthenticated users.\n \"DEFAULT_PERMISSION_CLASSES\": [\n \"rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly\"\n ],\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"api.authentication.authentication.TokenAuthSupportQueryString\",\n ),\n \"DEFAULT_THROTTLE_CLASSES\": (\"rest_framework.throttling.AnonRateThrottle\",),\n \"DEFAULT_THROTTLE_RATES\": {\"anon\": \"1000/day\"},\n}\n\nEMBED_PREFIXES = (\"embed\",)\n\nWHITELABEL_PREFIXES = ()\n\n# CorsMiddleware config\nCORS_ORIGIN_ALLOW_ALL = True\nCORS_ORIGIN_WHITELIST = ()\nCORS_URLS_REGEX = r\"^/(api|embed)/.*$\"\n\n\nINTERNAL_IPS = \"127.0.0.1\"\nSITE_TITLE = \"Where Do I Vote?\"\nSITE_LOGO = \"images/logo-with-text.png\"\nSITE_LOGO_WIDTH = \"390px\"\n\nTEST_RUNNER = \"django.test.runner.DiscoverRunner\"\n\n\nADDRESS_MODEL = \"addressbase.Address\"\nONSUD_MODEL = \"addressbase.UprnToCouncil\"\n\nEMAIL_SIGNUP_ENDPOINT = \"https://democracyclub.org.uk/mailing_list/api_signup/v1/\"\nEMAIL_SIGNUP_API_KEY = \"\"\n\n\n# Disable Basic Auth by default\n# We only want to use this on staging deploys\nBASICAUTH_DISABLE = True\n\n\n# settings for load balancer status check\nCHECK_SERVER_CLEAN = True\nCLEAN_SERVER_FILE = \"~/clean\"\n\n\n# import application constants\nfrom .constants.councils import * # noqa\nfrom .constants.directions import * # noqa\nfrom .constants.elections import * # noqa\nfrom .constants.importers import * # noqa\nfrom .constants.tiles import * # noqa\nfrom .constants.uploads import * # noqa\n\n# Import .local.py last - settings in local.py override everything else\ntry:\n\n from .local import * # noqa\n\n try:\n INSTALLED_APPS += PROD_APPS # noqa\n except NameError:\n pass\n\nexcept ImportError:\n pass\n\nif DEBUG:\n INSTALLED_APPS += (\"dashboard\",)\n\n# importing test settings file if necessary (TODO chould be done better)\nif len(sys.argv) > 1 and sys.argv[1] in [\"test\", \"harvest\"]:\n from .testing import * # noqa\n","repo_name":"mbateman/UK-Polling-Stations","sub_path":"polling_stations/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":9298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"2"}
+{"seq_id":"13917226391","text":"#!/usr/bin/env python3\n\nimport requests\nimport os\nimport csv\n\n# SONG = 2421807\n__URL__ = 'https://api.planningcenteronline.com/services/v2/songs/'\n__ID__ = os.environ.get('ID') or ''\n__SECRET__ = os.environ.get('SECRET') or ''\n__COOKIE__ = {'_account_center_session' : os.environ.get('COOKIE') or ''}\n\n\nclass Song:\n\n def __init__(self, id, name, yt, spotify):\n self.id = id\n self.name = name\n self.yt = yt\n self.spotify = spotify\n\ndef get_songs():\n\n param = '?per_page=100'\n urlpath = f'{__URL__}{param}'\n r = requests.get(urlpath, auth=requests.auth.HTTPBasicAuth(__ID__,__SECRET__))\n j = r.json()\n s_total = j['meta']['total_count']\n s_list = []\n\n while len(s_list) < s_total:\n for song in j.get('data'):\n\n # Create Song\n s_obj = Song(song['id'], song['attributes']['title'], [], [])\n\n s_list.append(s_obj)\n if (j['links'].get('next')):\n r = requests.get(j['links']['next'], auth=requests.auth.HTTPBasicAuth(__ID__,__SECRET__))\n j = r.json()\n\n return s_list\n\ndef get_song_attachments(song_obj):\n\n s_id = str(song_obj.id)\n s_name = song_obj.name\n print(f'[{s_id}] {s_name}')\n\n param = f'{s_id}/attachments?per_page=100'\n urlpath = f'{__URL__}{param}'\n\n r1 = requests.get(urlpath, auth=requests.auth.HTTPBasicAuth(__ID__,__SECRET__))\n j1 = r1.json()\n\n for a in j1.get('data'):\n # AttachmentSpotify or AttachmentYoutube or AttachmentLink\n pco = str(a['attributes']['pco_type'])\n if (pco == 'AttachmentYoutube'):\n get_url_from_attachment('yt', a, song_obj)\n elif (pco == 'AttachmentSpotify'):\n get_url_from_attachment('sp', a, song_obj)\n # elif (pco == 'AttachmentLink'):\n #link_url = str(a['attributes']['remote_link'])\n #print(f'Link: {link_url}')\n\n #print('+++')\n\n param2 = f'{s_id}/arrangements?per_page=100'\n urlpath2 = f'{__URL__}{param2}'\n\n r2 = requests.get(urlpath2, auth=requests.auth.HTTPBasicAuth(__ID__,__SECRET__))\n j2 = r2.json()\n\n for ar in j2.get('data'):\n ar_id = str(ar['id'])\n\n param3 = f'{s_id}/arrangements/{ar_id}/attachments?per_page=100'\n urlpath3 = f'{__URL__}{param3}'\n\n r3 = requests.get(urlpath3, auth=requests.auth.HTTPBasicAuth(__ID__,__SECRET__))\n j2 = r3.json()\n\n for a in j2.get('data'):\n # AttachmentSpotify or AttachmentYoutube or AttachmentLink\n pco = str(a['attributes']['pco_type'])\n if (pco == 'AttachmentYoutube'):\n get_url_from_attachment('yt', a, song_obj)\n elif (pco == 'AttachmentSpotify'):\n get_url_from_attachment('sp', a, song_obj)\n elif (pco == 'AttachmentLink'):\n link_url = str(a['attributes']['remote_link'])\n #print(f'Link: {link_url}')\n\n #print('-------------------')\n return 0\n\ndef get_url_from_attachment(urltype, a, song_obj):\n if (a['attributes']['remote_link']):\n link_url = str(a['attributes']['remote_link'])\n if (urltype == 'yt'):\n song_obj.yt.append(link_url)\n #print(f'Youtube: {link_url}')\n elif (urltype == 'sp'):\n song_obj.spotify.append(link_url)\n #print(f'Spotify: {link_url}')\n\n else:\n urlpath = str(a['attributes']['url'])\n # Hack to allow access to attachments\n r2 = requests.get(urlpath, cookies=__COOKIE__)\n link_url = str(r2.url)\n if (urltype == 'yt'):\n song_obj.yt.append(link_url)\n #print(f'Youtube: {link_url}')\n elif (urltype == 'sp'):\n song_obj.spotify.append(link_url)\n #print(f'Spotify: {link_url}')\n\ndef write_links_to_csv(song_list):\n with open('export.csv', mode='w', newline='') as csv_file:\n csv_writer = csv.writer(csv_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n csv_writer.writerow(['ID', 'Name', 'Youtube', 'Spotify'])\n\n for song in song_list:\n csv_writer.writerow([song.id, song.name, song.yt, song.spotify])\n\nsongs = get_songs()\nprint('Got Songs')\nfor song in songs:\n get_song_attachments(song)\nprint('Got Links')\nwrite_links_to_csv(songs)\n","repo_name":"evanhwk/SCPlanningC","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"42506866583","text":"from plivo.utils.validators import *\nfrom plivo.xml import (\n PlivoXMLElement,\n map_type\n)\n\n\nclass StreamElement(PlivoXMLElement):\n _name = 'Stream'\n _nestable = [\n ]\n\n def __init__(\n self,\n content,\n bidirectional=None,\n audioTrack=None,\n streamTimeout=None,\n statusCallbackUrl=None,\n statusCallbackMethod=None,\n contentType=None,\n extraHeaders=None\n ):\n super(StreamElement, self).__init__()\n\n self.content = content\n self.bidirectional = bidirectional\n self.audioTrack = audioTrack\n self.streamTimeout = streamTimeout\n self.statusCallbackUrl = statusCallbackUrl\n self.statusCallbackMethod = statusCallbackMethod\n self.contentType = contentType\n self.extraHeaders = extraHeaders\n\n def to_dict(self):\n d = {\n 'bidirectional': self.bidirectional,\n 'audioTrack': self.audioTrack,\n 'streamTimeout': self.streamTimeout,\n 'statusCallbackUrl': self.statusCallbackUrl,\n 'statusCallbackMethod': self.statusCallbackMethod,\n 'contentType': self.contentType,\n 'extraHeaders': self.extraHeaders,\n }\n return {\n k: six.text_type(map_type(v))\n for k, v in d.items() if v is not None\n }","repo_name":"plivo/plivo-python","sub_path":"plivo/xml/streamElement.py","file_name":"streamElement.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"2"}
+{"seq_id":"4218251339","text":"#!/usr/bin/python3\n\"\"\"script that changes the name of a State object from the\ndatabase hbtn_0e_6_usa\"\"\"\n\nimport sys\nfrom model_state import Base, State\nfrom sqlalchemy import create_engine, MetaData\nfrom sqlalchemy.orm import Session\n\nif __name__ == \"__main__\":\n a1 = sys.argv[1]\n a2 = sys.argv[2]\n a3 = sys.argv[3]\n en = create_engine('mysql+mysqldb://{}:{}@localhost/{}'.format(a1, a2, a3),\n pool_pre_ping=True)\n en.connect()\n metadata = MetaData()\n session = Session(en)\n session.query(State).filter(State.name.ilike(\"%a%\")\n ).delete(synchronize_session='fetch')\n session.commit()\n session.close()\n","repo_name":"rodrigoandresd/holbertonschool-higher_level_programming","sub_path":"python-object_relational_mapping/13-model_state_delete_a.py","file_name":"13-model_state_delete_a.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"71527651565","text":"'''У лінійному масиві знайти максимальний елемент.\r\nВставте порядковий номер елемента за ним, пересунувши всі залишилися на одну позицію вправо. '''\r\nimport random\r\n\r\narray = [random.randint(10, 100) for i in range(10)]\r\nprint('Даний масив:\\n', array)\r\nmaxElemIndex = array.index(max(array))\r\narray.insert(maxElemIndex+1, maxElemIndex)\r\n\r\nprint('Новий масив:\\n',array)","repo_name":"tomasolodun/kolokvium","sub_path":"47.py","file_name":"47.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"3511962069","text":"# Implement a class to hold room information. This should have name and\n# description attributes.\n\nclass Room:\n def __init__(self, name=None, description=None, items=None):\n self.name = name\n self.description = description\n self.n_to = None\n self.s_to = None\n self.e_to = None\n self.w_to = None\n self.items = []\n\n def __str__(self):\n return f'room: {self.name}, description: {self.description}'\n\n def get_item(self, item_name):\n for item in self.items:\n if item.name == item_name:\n return item\n return None\n\n def get_room(self, direction):\n if direction == \"n\":\n return self.n_to\n elif direction == \"s\":\n return self.s_to\n elif direction == \"e\":\n return self.e_to\n elif direction == \"w\":\n return self.w_to\n else:\n return None\n\n# test = Room(\"foyer\", \"kinda dusty\", items =['hat','sword', 'sheild'])\n\n# print(test.room_items())","repo_name":"dlimla/Intro-Python-II","sub_path":"src/room.py","file_name":"room.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"2"}
+{"seq_id":"40180880073","text":"# thanks to @Skastickers for stickers....\n# Among us.....\n#credits to catuserbot\n\n\nimport asyncio\n\nfrom userbot.utils import admin_cmd, edit_or_reply, sudo_cmd\nfrom userbot import ALIVE_NAME, CMD_HELP\n\nDEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else \"Hell User\"\n\n\n@bot.on(admin_cmd(pattern=\"imp(|n) (.*)\", outgoing=True))\n@bot.on(sudo_cmd(pattern=\"imp(|n) (.*)\", allow_sudo=True))\nasync def _(event):\n legendx22 = bot.uid\n USERNAME = f\"tg://user?id={legendx22}\"\n name = event.pattern_match.group(2)\n cmd = event.pattern_match.group(1).lower()\n text1 = await edit_or_reply(event, \"Hmm... Looks like Something is wrong here🤔🧐!!\")\n await asyncio.sleep(2)\n await text1.delete()\n stcr1 = await event.client.send_file(\n event.chat_id, \"CAADAQADRwADnjOcH98isYD5RJTwAg\"\n )\n text2 = await event.reply(\n f\"**[{DEFAULTUSER}]({USERNAME}) :** I have to call discussion😯\"\n )\n await asyncio.sleep(3)\n await stcr1.delete()\n await text2.delete()\n stcr2 = await event.client.send_file(\n event.chat_id, \"CAADAQADRgADnjOcH9odHIXtfgmvAg\"\n )\n text3 = await event.reply(\n f\"**[{DEFAULTUSER}]({USERNAME}) :** We have to eject the imposter or will lose😥 \"\n )\n await asyncio.sleep(3)\n await stcr2.delete()\n await text3.delete()\n stcr3 = await event.client.send_file(\n event.chat_id, \"CAADAQADOwADnjOcH77v3Ap51R7gAg\"\n )\n text4 = await event.reply(f\"**Others :** Where???🤨 \")\n await asyncio.sleep(2)\n await text4.edit(f\"**Others :** Who??🤔 \")\n await asyncio.sleep(2)\n await text4.edit(\n f\"**[{DEFAULTUSER}]({USERNAME}) :** Its {name} , I saw {name} using🤨 vent,\"\n )\n await asyncio.sleep(3)\n await text4.edit(f\"**Others :**Okay.. 😲Vote {name} \")\n await asyncio.sleep(2)\n await stcr3.delete()\n await text4.delete()\n stcr4 = await event.client.send_file(\n event.chat_id, \"CAADAQADLwADnjOcH-wxu-ehy6NRAg\"\n )\n hellevent = await event.reply(f\"{name} is ejected.......🤐\")\n await asyncio.sleep(2)\n await hellevent.edit(\"ඞㅤㅤㅤㅤ ㅤㅤㅤㅤ\")\n await asyncio.sleep(0.5)\n await hellevent.edit(\"ㅤඞㅤㅤㅤㅤ ㅤㅤㅤ\")\n await asyncio.sleep(0.5)\n await hellevent.edit(\"ㅤㅤ ඞㅤㅤㅤㅤㅤㅤ\")\n await asyncio.sleep(0.5)\n await hellevent.edit(\"ㅤㅤㅤ ඞㅤㅤㅤㅤㅤ\")\n await asyncio.sleep(0.5)\n await hellevent.edit(\"ㅤㅤㅤㅤ ඞㅤㅤㅤㅤ\")\n await asyncio.sleep(0.5)\n await hellevent.edit(\"ㅤㅤㅤㅤㅤ ඞㅤㅤㅤ\")\n await asyncio.sleep(0.5)\n await hellevent.edit(\"ㅤㅤㅤㅤㅤㅤ ඞㅤㅤ\")\n await asyncio.sleep(0.5)\n await hellevent.edit(\"ㅤㅤㅤㅤㅤㅤㅤ ඞㅤ\")\n await asyncio.sleep(0.5)\n await hellevent.edit(\"ㅤㅤㅤㅤㅤㅤㅤㅤ ඞ\")\n await asyncio.sleep(0.5)\n await hellevent.edit(\"ㅤㅤㅤㅤㅤㅤㅤㅤ ㅤ\")\n await asyncio.sleep(0.2)\n await stcr4.delete()\n if cmd == \"\":\n await hellevent.edit(\n f\". 。 • ゚ 。 .\\n . . 。 。 . \\n\\n . 。 ඞ 。 . • •\\n\\n ゚{name} was an Imposter. 。 . 。 . 。 . \\n . 。 . \\n ' 0 Impostor remains 。 . . 。 . 。 . 。 . . . , 。\\n ゚ . . , 。 . . 。\"\n )\n await asyncio.sleep(4)\n await hellevent.delete()\n await event.client.send_file(event.chat_id, \"CAADAQADLQADnjOcH39IqwyR6Q_0Ag\")\n elif cmd == \"n\":\n await hellevent.edit(\n f\". 。 • ゚ 。 .\\n . . 。 。 . \\n\\n . 。 ඞ 。 . • •\\n\\n ゚{name} was not an Imposter. 。 . 。 . 。 . \\n . 。 . \\n ' 1 Impostor remains 。 . . 。 . 。 . 。 . . . , 。\\n ゚ . . , 。 . . 。\"\n )\n await asyncio.sleep(4)\n await hellevent.delete()\n await event.client.send_file(event.chat_id, \"CAADAQADQAADnjOcH-WOkB8DEctJAg\")\n\n\n@bot.on(admin_cmd(pattern=\"timp(|n) (.*)\", outgoing=True))\n@bot.on(sudo_cmd(pattern=\"timp(|n) (.*)\", allow_sudo=True))\nasync def _(event):\n name = event.pattern_match.group(2)\n cmd = event.pattern_match.group(1).lower()\n hellevent = await edit_or_reply(event, f\"{name} is ejected.......\")\n await asyncio.sleep(2)\n await hellevent.edit(\"ඞㅤㅤㅤㅤ ㅤㅤㅤㅤ\")\n await asyncio.sleep(0.8)\n await hellevent.edit(\"ㅤඞㅤㅤㅤㅤ ㅤㅤㅤ\")\n await asyncio.sleep(0.8)\n await hellevent.edit(\"ㅤㅤ ඞㅤㅤㅤㅤㅤㅤ\")\n await asyncio.sleep(0.8)\n await hellevent.edit(\"ㅤㅤㅤ ඞㅤㅤㅤㅤㅤ\")\n await asyncio.sleep(0.8)\n await hellevent.edit(\"ㅤㅤㅤㅤ ඞㅤㅤㅤㅤ\")\n await asyncio.sleep(0.8)\n await hellevent.edit(\"ㅤㅤㅤㅤㅤ ඞㅤㅤㅤ\")\n await asyncio.sleep(0.8)\n await hellevent.edit(\"ㅤㅤㅤㅤㅤㅤ ඞㅤㅤ\")\n await asyncio.sleep(0.8)\n await hellevent.edit(\"ㅤㅤㅤㅤㅤㅤㅤ ඞㅤ\")\n await asyncio.sleep(0.8)\n await hellevent.edit(\"ㅤㅤㅤㅤㅤㅤㅤㅤ ඞ\")\n await asyncio.sleep(0.8)\n await hellevent.edit(\"ㅤㅤㅤㅤㅤㅤㅤㅤ ㅤ\")\n await asyncio.sleep(0.2)\n if cmd == \"\":\n await hellevent.edit(\n f\". 。 • ゚ 。 .\\n . . 。 。 . \\n\\n . 。 ඞ 。 . • •\\n\\n ゚ {name} was an Imposter. 。 . 。 . 。 . \\n . 。 . \\n ' 0 Impostor remains 。 . . 。 . 。 . 。 . . . , 。\\n ゚ . . , 。 . . 。\"\n )\n elif cmd == \"n\":\n await hellevent.edit(\n f\". 。 • ゚ 。 .\\n . . 。 。 . \\n\\n . 。 ඞ 。 . • •\\n\\n ゚ {name} was not an Imposter. 。 . 。 . 。 . \\n . 。 . \\n ' 1 Impostor remains 。 . . 。 . 。 . 。 . . . , 。\\n ゚ . . , 。 . . 。\"\n )\n\n\nCMD_HELP.update(\n {\n \"imposter\": \"**Plugin :** `imposter__`\\\n\\n\\n**Syntax : **`.imp` / `.impn` \\\n\\n**Usage : ** Find imposter with stickers.\\\n\\n\\n**Syntax : **`.timp` / `.timpn` \\\n\\n**Usage : ** Find imposter only text.\"\n }\n)\n","repo_name":"LEGENDXOP/LEGEND-BOT","sub_path":"userbot/plugins/amongus.py","file_name":"amongus.py","file_ext":"py","file_size_in_byte":7394,"program_lang":"python","lang":"zh","doc_type":"code","stars":55,"dataset":"github-code","pt":"2"}
+{"seq_id":"916112075","text":"\"\"\"\nLoad training and train data from image files\n\"\"\"\nimport os\nimport numpy as np\nfrom skimage.io import imsave, imread\n\ndb_path = 'db/'\nimg_height = 480\nimg_width = 640\n\ndef create_db(roi=None):\n \"\"\"\n Create db files from raw images\n\n Parameters:\n -roi: region of interest [y,height,x,width]\n \"\"\"\n if roi is not None:\n roi_y = roi[0]\n roi_height = roi[1]\n roi_x = roi[2]\n roi_width = roi[3]\n else:\n roi_y = 0\n roi_height = img_height\n roi_x = 0\n roi_width = img_width\n\n subjects = [name for name in os.listdir(db_path) if os.path.isdir(os.path.join(db_path,name))]\n\n num_img = 0\n db_structure = []\n for subject in subjects:\n subject_path = os.path.join(db_path, subject)\n num_subject_files = len([fname for fname in os.listdir(subject_path) if os.path.isfile(os.path.join(subject_path, fname))])\n num_img += num_subject_files\n db_structure.append([subject,num_subject_files])\n\n imgs = np.ndarray((num_img, roi_height, roi_width), dtype=np.uint8)\n\n idx = 0\n print(\"Loading image...\")\n for subject in subjects:\n subject_path = os.path.join(db_path, subject)\n images = os.listdir(subject_path)\n for image_fname in images:\n if image_fname.endswith('.png'):\n img = imread(os.path.join(subject_path, image_fname), as_grey=True)\n if roi is not None:\n img = np.array([img[roi_y:roi_y+roi_height,roi_x:roi_x+roi_width]])\n else:\n img = np.array([img])\n imgs[idx] = img\n\n if idx % 100 == 0:\n print('Completed {0}/{1} images'.format(idx, num_img))\n\n idx += 1\n\n print(\"Loading complete.\")\n\n np.save('img_db.npy',imgs)\n\n print(\"Images saved to img_db.npy\")\n\ndef load_db():\n try:\n return np.load('img_db.npy')\n except:\n return None\n","repo_name":"mohikhsan/ultrasound-dl","sub_path":"src/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"}
+{"seq_id":"7065535458","text":"class Solution:\n def addStrings(self, num1: str, num2: str) -> str:\n int1 = 0\n int2 = 0\n for i in range(len(num1)):\n int1 = int1 * 10 + int(num1[i])\n for i in range(len(num2)):\n int2 = int2 * 10 + int(num2[i])\n return str(int1 + int2)\n ","repo_name":"ziyuan-shen/leetcode_algorithm_python_solution","sub_path":"easy/ex415.py","file_name":"ex415.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"42324169950","text":"from vision.yolo.detector import Detector\nfrom PIL import Image as pimg\nfrom PIL import ImageDraw\nfrom cv2 import cv2\nimport glob\nimport imutils\nimport numpy as np\nimport scipy.misc\nimport os\nfrom vision.orientation.orientation_detector import OrientationDetectorNet\nfrom utils.image_shifter import RuntimeShifter\nfrom aruco import Calibration\nimport torch\nfrom vision.segmentation.detector import InstanceDetector\nimport torch.utils.model_zoo\nimport os\n\nYOLOCFGPATH = 'vision/yolo/'\nIMAGE_NAME = \"webcam_capture.png\"\nORIENTATION_MODEL_PATH = \"orientation_cnn.pth\"\n\n\nclass Vision:\n def __init__(self, segmentation_weight_path):\n self.current_directory = os.getcwd()\n yolo_cfg_path_absolute = self.current_directory + YOLOCFGPATH\n self.image_path = self.current_directory + \"/\" + IMAGE_NAME\n self.mask_path = self.current_directory + \"/masks/\"\n \"\"\"self.detector = Detector(os.path.join(yolo_cfg_path_absolute, 'cfg/obj.data'),\n os.path.join(yolo_cfg_path_absolute, 'cfg/yolov3-tiny.cfg'),\n os.path.join(yolo_cfg_path_absolute, 'yolov3-tiny_final.weights'))\"\"\"\n self.counter = 0\n self.first_run = True\n self.results = None\n self.orientationCNN = OrientationDetectorNet()\n #self.orientationCNN.load_state_dict(torch.load(ORIENTATION_MODEL_PATH))\n #self.shifter = image_shifter.RuntimeShifter\n self.calibrate = Calibration()\n self.segmentation_detector = InstanceDetector(segmentation_weight_path)\n\n def __del__(self):\n pass\n\n def find_parts(self, class_id, fuse_index=-1):\n class_id1, class_id2 = class_id\n part = (-1, -1, -1, -1, -1)\n # result is an array of dictionaries\n found_class_index = 0\n for x1, y1, x2, y2, conf, cls_conf, cls_pred in self.results:\n if (class_id1 == cls_pred or class_id2 == cls_pred) and cls_conf > 0.6:\n if fuse_index > -1 and fuse_index != found_class_index:\n found_class_index += 1\n continue\n width = x2 - x1\n height = y2 - y1\n x_coord = width / 2 + x1\n y_coord = height / 2 + y1\n if height > width:\n orientation = OrientationEnum.VERTICAL.value\n grip_width = width * 0.58\n elif width > height:\n orientation = OrientationEnum.HORIZONTAL.value\n grip_width = height * 0.58\n else:\n orientation = OrientationEnum.HORIZONTAL.value\n grip_width = height * 0.58\n print(\"[W] Could not determine orientation, using 1 as default\")\n #new_part_id = convert_to_part_id(part_class)\n part = (cls_pred, x_coord, y_coord, orientation, grip_width)\n break\n print(part)\n return part\n\n def segment(self, np_img):\n results = self.segmentation_detector.predict(np_img)\n classes = [\"PCB\", \"BottomCover\", \"BlueCover\", \"WhiteCover\", \"BlackCover\"]\n masks = []\n for i in range(len(results[\"instances\"].pred_classes)):\n mask_image = results[\"instances\"].pred_masks[i].cpu().numpy()\n mask_image = np.asarray(mask_image * 255, dtype=np.uint8)\n moments = cv2.moments(mask_image)\n cX = int(moments[\"m10\"] / moments[\"m00\"])\n cY = int(moments[\"m01\"] / moments[\"m00\"])\n center = (cX, cY)\n area = moments[\"m00\"]\n part = classes[results['instances'].pred_classes[i]]\n score = results['instances'].scores[i]\n mask = {\"part\": part, \"score\": score, \"area\": area, \"center\": center, \"ignored\": False, \"ignore_reason\": \"\", \"mask\": mask_image}\n masks.append(mask)\n return masks\n\n def detect_object(self):\n np_img = pimg.open(self.image_path)\n self.results = self.detector.predict(np_img)\n self.draw_boxes(self.results)\n\n def draw_boxes(self, results):\n source_img = pimg.open(self.image_path).convert(\"RGBA\")\n for x1, y1, x2, y2, conf, cls_conf, cls_pred in self.results:\n if cls_conf > 0.6:\n width = x2 - x1\n height = y2 - y1\n x_coord = width / 2 + x1\n y_coord = height / 2 + y1\n draw = ImageDraw.Draw(source_img)\n draw.rectangle(((x1, y1), (x2, y2)), fill=None, outline=(200, 0, 150), width=6)\n draw.text((x_coord, y_coord), convert_from_part_id(int(cls_pred)))\n source_img.save('boundingboxes.png')\n\n def is_facing_right(self, np_image):\n pil_image = pimg.fromarray(np_image)\n resized_image = pil_image.resize((224, 224))\n resized_image_np = np.array(resized_image) / 255\n image_tensor = torch.from_numpy(resized_image_np).permute(2, 0, 1).float()\n image_tensor = image_tensor.unsqueeze(0)\n self.orientationCNN.eval()\n with torch.no_grad():\n prediction = self.orientationCNN(image_tensor)\n result = prediction[0][0] >= 0.5\n print(\"[INFO] Part is facing right. {}\".format(result))\n return result\n\n def get_image_path(self):\n return self.image_path\n\n def find_part_for_grasp(self):\n masks = glob.glob(self.mask_path + \"*\")\n number_of_masks = len(masks)\n print(f\"There are {number_of_masks} masks\")\n contour_sizes = []\n for index, file_path in enumerate(masks):\n mask = pimg.open(file_path)\n mask = np.array(mask)\n print(f\"Finding contours on image {index + 1}/{number_of_masks}\")\n contours = self.find_contour(mask)\n for c in contours:\n area = cv2.contourArea(c)\n if area < 5000:\n continue\n else:\n contour_sizes.append(area)\n\n print(contour_sizes)\n part_to_grasp = contour_sizes.index(max(contour_sizes))\n print(part_to_grasp)\n return part_to_grasp\n\n\nif __name__ == \"__main__\":\n hey = Vision()\n masks = glob.glob(hey.mask_path + \"*\")\n part_to_grasp = hey.find_part_for_grasp()\n mask = pimg.open(masks[part_to_grasp])\n mask = np.array(mask)\n color_image = cv2.imread(\"color1582023984.5763314-0.png\")\n\n depth = cv2.imread(\"depth.png\")\n #depth = image_shifter.shift_image(depth)\n dim = (720, 1280)\n mask = cv2.resize(mask, dim)\n mask_contours = hey.find_contour(mask)\n x, y = hey.find_center(mask_contours)\n z = hey.get_z(x, y, depth)\n print(x, y, z)\n x, y, z = hey.calibrate.calibrate(color_image, x, y, z)\n print(x, y, z)\n #hey.vector_normal(x, y, img, depth)","repo_name":"EmilRyberg/P6BinPicking","sub_path":"vision/vision.py","file_name":"vision.py","file_ext":"py","file_size_in_byte":6780,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"}
+{"seq_id":"10300142277","text":"# https://leetcode.com/problems/sort-colors/\nclass Solution:\n\n def sortColors(self, nums) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n for i in range(len(nums)):\n for j in range(len(nums)-1):\n if nums[j] > nums[i]:\n nums[j], nums[i] = nums[i], nums[j]\n\n\nif __name__=='__main__':\n nums = [1 ,2 ,2 ,2, 0 , 0, 1]\n s = Solution()\n s.sortColors(nums)\n print(nums)\n","repo_name":"bluex314/Python-Study","sub_path":"leetCode/array/array.py","file_name":"array.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"6380753272","text":"import copy\nimport numpy as np\nfrom geopy.distance import distance\nimport matplotlib.pyplot as plt\n\n\nclass GWO:\n\n def __init__(self, locations, time_windows, service_time, demands, depot, cap, speed):\n \"\"\"\n\n :param locations: 需求点坐标\n :param time_windows: 时间窗\n :param demands: 需求\n :param service_time: 服务时间\n :param depot: 配送点坐标\n :param cap: 车辆最大容量\n :param speed: 车速\n :return:\n \"\"\"\n self.speed = speed\n self.locations = locations\n self.time_windows = time_windows\n self.service_time = service_time\n self.demands = demands\n self.depot = depot\n self.cap = cap\n self.seq_dic = {i: i for i in range(1, len(self.demands) + 1)}\n\n self.process_demand()\n\n self.num_customers = len(self.demands) # 顾客数\n\n self.dist_matrix = self._compute_distance_matrix()\n\n self.pop_size = 100 # 种群个数\n self.max_iter = 200 # 最大迭代次数\n\n def process_demand(self):\n k = len(self.demands) + 1\n reality_customers = len(self.demands)\n i = 0\n while i < reality_customers:\n if self.demands[i] > self.cap:\n self.demands[i] -= self.cap\n self.demands = np.append(self.demands, self.cap)\n self.locations = np.append(self.locations, self.locations[i].reshape(1, 2), axis=0)\n self.time_windows = np.append(self.time_windows, self.time_windows[i].reshape(1, 2), axis=0)\n self.service_time = np.append(self.service_time, self.service_time[i])\n self.seq_dic[k] = i + 1\n k += 1\n else:\n i += 1\n\n # 计算距离矩阵,根据经纬度计算距离\n def _compute_distance_matrix(self):\n dist_matrix = np.zeros((self.num_customers, self.num_customers))\n for i in range(self.num_customers):\n for j in range(self.num_customers):\n if i == j:\n dist_matrix[i][j] = 0\n else:\n dist_matrix[i][j] = distance(self.locations[i], self.locations[j]).km\n return dist_matrix\n\n # 验证方案是否可行,需求约束和时间窗\n def _feasible(self, routes):\n # 检查需求约束\n for route in routes:\n demand = sum([self.demands[i - 1] for i in route])\n if demand > self.cap:\n return False\n\n # 检查时间窗约束\n for route in routes:\n time = 0\n for idx, i in enumerate(route):\n if idx == 0:\n time = distance(self.depot, self.locations[route[0] - 1]).km / self.speed\n else:\n time += self.dist_matrix[route[idx - 1] - 1][i - 1] / self.speed\n # time = max(time, self.time_windows[i - 1][0])\n time += self.service_time[i - 1]\n if time > self.time_windows[i - 1][1]:\n return False\n return True\n\n # 计算目标函数,即总距离最短\n def evaluate(self, routes):\n if not self._feasible(routes):\n return float('inf')\n else:\n cost = 0\n for route in routes:\n route_cost = distance(self.depot, self.locations[route[0] - 1]).km\n for idx, i in enumerate(route):\n route_cost += self.dist_matrix[route[idx - 1] - 1][i - 1]\n route_cost += distance(self.depot, self.locations[route[-1] - 1]).km\n cost += route_cost\n return cost\n\n # 对灰狼个体进行解码,得到运输路线\n def decode(self, x):\n seq = np.argsort(x) + 1\n routes = []\n i = 0\n d = 0\n t = 0\n route = []\n while i < len(seq):\n if d == 0:\n t += distance(self.depot, self.locations[seq[i] - 1]).km / self.speed\n else:\n t += distance(self.locations[seq[i - 1] - 1], self.locations[seq[i] - 1]).km / self.speed\n d += self.demands[seq[i] - 1]\n if d > self.cap or t > self.time_windows[seq[i] - 1][1]:\n routes.append(route)\n d = 0\n t = 0\n route = []\n continue\n route.append(seq[i])\n i += 1\n return routes\n\n # 初始化灰狼个体\n def init_wolf(self):\n return np.random.uniform(-10, 10, size=(self.pop_size, self.num_customers))\n\n # 求解\n def solve(self):\n # 初始化种群\n pop = self.init_wolf()\n # 计算目标函数\n fitness = np.zeros(self.pop_size)\n for i in range(self.pop_size):\n routes = self.decode(pop[i])\n fitness[i] = self.evaluate(routes)\n pop = pop[np.argsort(fitness)]\n fitness.sort()\n alpha_wolf, beta_wolf, gamma_wolf = copy.copy(pop[: 3])\n\n convergence_curve = np.zeros(self.max_iter) # 保存每次迭代的最优个体适应度\n # 开始迭代\n for Iter in range(1, self.max_iter + 1):\n a = 2 * (1 - Iter / self.max_iter)\n for i in range(self.pop_size):\n A1, A2, A3 = a * (2 * np.random.rand() - 1), a * (\n 2 * np.random.rand() - 1), a * (2 * np.random.rand() - 1)\n C1, C2, C3 = 2 * np.random.rand(), 2 * np.random.rand(), 2 * np.random.rand()\n X1 = alpha_wolf - A1 * abs(C1 - alpha_wolf - pop[i])\n X2 = beta_wolf - A2 * abs(C2 - beta_wolf - pop[i])\n X3 = gamma_wolf - A3 * abs(C3 - gamma_wolf - pop[i])\n x_new = (X1 + X2 + X3) / 3\n f_new = self.evaluate(self.decode(x_new))\n if f_new < fitness[i]:\n pop[i] = x_new.copy()\n fitness[i] = f_new\n pop = pop[np.argsort(fitness)]\n fitness.sort()\n alpha_wolf, beta_wolf, gamma_wolf = copy.copy(pop[: 3])\n convergence_curve[Iter - 1] = fitness[0]\n print(f\"第{Iter}次迭代:目标值{fitness[0]}\")\n return fitness[0], self.decode(alpha_wolf), convergence_curve, self.seq_dic\n\n\nif __name__ == '__main__':\n import pandas as pd\n\n df = pd.read_excel(\"vrptw数据.xlsx\")\n num_customers = 36 # 客户数量\n depot_location = np.array([24.212273, 109.338894]) # 车库位置\n customer_locations = df.iloc[:, 0].str.split(',').tolist() # 客户位置\n customer_locations = np.array([list(map(float, i)) for i in customer_locations])\n customer_demands = df.iloc[:, 1].values # 客户需求\n time_windows = df.iloc[:, 2].str.split('-').tolist() # 客户时间窗口\n time_windows = np.array([list(map(int, i)) for i in time_windows])\n service_time = df.iloc[:, 3].values # 服务时间\n cap = 13 # 车辆容量\n speed = 40 # 车速,KM/h\n\n # obj表示最优目标函数,routes表示最优方案,convergence_curve存储每次迭代的最优个体目标值\n obj, routes, convergence_curve, seq_dic = GWO(customer_locations, time_windows, service_time, customer_demands,\n depot_location, cap, speed).solve()\n\n for i in range(len(routes)):\n for j in range(len(routes[i])):\n routes[i][j] = seq_dic[routes[i][j]]\n\n # 最优配送方案\n file = open(\"solution.txt\", \"w\", encoding='utf-8')\n print(f\"最优配送方案如下,总距离为{obj}, 共有{len(routes)}辆车:\", file=file)\n for idx, route in enumerate(routes):\n print(f\"第{idx + 1}辆车:{route}\", file=file)\n\n plt.plot(range(len(convergence_curve)), convergence_curve)\n plt.xlabel(\"Iteration\")\n plt.ylabel(\"distance\")\n plt.savefig(\"迭代图像.png\")\n plt.show()\n\n","repo_name":"MrBin226/code","sub_path":"接单项目/灰狼算法求解VRPTW/GWO_VRPTW.py","file_name":"GWO_VRPTW.py","file_ext":"py","file_size_in_byte":7850,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"34583060285","text":"import pandas as pd\nimport math\nimport glob\nimport re\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom bokeh.io import output_file, show\nfrom bokeh.plotting import figure\nfrom bokeh.models import ColumnDataSource, HoverTool\n\n#this imports all the data into one list with no marking for where the years start and st\npath = 'yob*.csv'\nfiles = glob.glob(path)\n\ndf= []\nfor file in files :\n df.append(pd.read_csv(file, index_col= None, header=None))\n\n#Inspect loaded files\nprint('INSPECT NAMES')\nprint(len(df))\nprint(df[0].head)\nprint(df[0].info())\n\n# Add column names\ni=0\nn = 1880\nfor item in df :\n df[i].columns = ['Name', 'Sex', 'Count']\n #print(df[i].info())\n df[i]['Year']= n\n i= i + 1\n n= n + 1\n\n#Concatenate the list of dataframes into one dataframe\nnames= pd.concat(df)\nprint('CONCATENTATED NAMES DATAFRAME')\nprint(names.info())\n\n#_______________________________________________________________________________\n# Initial Exploratory data review\n# plt.scatter(x=names.Year, y=names.Count)\n# plt.xlabel('Year')\n# plt.ylabel('Count')\n# plt.title('Frequency and Number of Names')\n# plt.savefig('by_year_scatter.pdf')\n# plt.show()\n#\n# names= names.set_index('Sex')\n# plt.scatter(x=names.loc['F'].Year, y=names.loc['F'].Count)\n# plt.xlabel('Year')\n# plt.ylabel('Count')\n# plt.title('Frequency and Number of Girls Names')\n# plt.savefig('female_by_year_scatter.pdf')\n# plt.show()\n#\n# plt.scatter(x=names.loc['M'].Year, y=names.loc['M'].Count)\n# plt.xlabel('Year')\n# plt.ylabel('Count')\n# plt.title('Frequency and Number of Boys Names')\n# plt.savefig('male_by_year_scatter.pdf')\n# plt.show()\n#_______________________________________________________________________________\n#calculate proportion of count for each name\nnames2 = names.copy()\ntotal_births_by_year = names2.groupby('Year')['Count'].transform('sum')\nnames2['pct_name']= (names2['Count']/total_births_by_year)* 100\nprint('NAMES DATAFRAME WITH PCT NAME ADDED')\nprint(names2.tail())\nprint(names2.shape)\n#_______________________________________________________________________________\n#create dataframe with female names\nfemale = names2['Sex'] == 'F'\nnames_f= names2[female]\nprint('FEMALE NAME DATAFRAME')\nprint(names_f.tail())\n#Select top 5 female names for each year\ntop5_f= names_f.groupby('Year').head()\ntop5_female= top5_f.reset_index()\ndel top5_female['index']\ndel top5_female['Sex']\ndel top5_female['Count']\ntop5_fnames= top5_female.set_index('Name')\nprint('TOP 5 FEMALE NAMES')\nprint(top5_fnames.head())\n\ntop5_fnames1= top5_fnames.reset_index()\ntop5_fnames1= top5_fnames1.set_index('Year')\nprint(top5_fnames1.head())\n\n# i=2000\n# for item in top5_fnames1 :\n# # if i <= 1990 :\n# x = top5_fnames1['Name'][i]\n# y = top5_fnames1['pct_name'][i]\n# plt.scatter(x, y)\n# i= i + 5\n#\n# plt.xticks(rotation='vertical')\n# plt.ylim(-0.005, 5)\n# plt.subplots_adjust(left=0.1)\n# plt.ylabel('Pecent of Names')\n# plt.title('Top 5 Girls Names')\n# plt.legend(loc='best', fontsize='xx-small', markerscale=0.7)\n# plt.margins(0.1)\n# plt.savefig('scatter_top5_girls_names1.pdf')\n# plt.show()\n#\n#\n# pritn()\n\n# #Pivot the dataframe to make years columns\ntop5_fnames_tidy = top5_fnames.pivot_table(values='pct_name', index=['Name'], columns=['Year'])\ntop5_fnames_tidy = top5_fnames_tidy.fillna(0)\ntop5_fnames_tidy= top5_fnames_tidy.reset_index()\ntop5_fnames_tidy= top5_fnames_tidy.set_index('Name')\n\ndf=[]\nn= 0\nfor item in top5_fnames_tidy :\n if n <= 45 :\n top5f_by_year= top5_fnames_tidy.iloc[n]\n df.append(top5f_by_year)\n n = n + 1\ntop5_fnames= pd.concat(df, axis=1)\ntop5_fnames= top5_fnames.reset_index()\n\n#Plots for top 5 girls names over the years\nn=1\nfor item in top5_fnames :\n if n <= 2 :\n x= top5_fnames['Year']\n y = top5_fnames.iloc[0:, n]\n plt.scatter(x, y)\n n= n + 1\nplt.xticks(rotation='vertical')\nplt.ylim(-0.005, 5)\nplt.subplots_adjust(left=0.1)\nplt.ylabel('Pecent of Names')\nplt.title('Top 5 Girls Names')\nplt.legend(loc='best', fontsize='xx-small', markerscale=0.7)\nplt.margins(0.1)\nplt.savefig('scatter_top5_girls_names1.pdf')\nplt.show()\n\nn=3\nfor item in top5_fnames :\n if n <= 4 :\n x= top5_fnames['Year']\n y = top5_fnames.iloc[0:, n]\n plt.scatter(x, y)\n n= n + 1\nplt.xticks(rotation='vertical')\nplt.ylim(-0.005, 5)\nplt.subplots_adjust(left=0.1)\nplt.ylabel('Pecent of Names')\nplt.title('Top 5 Girls Names')\nplt.legend(loc='best', fontsize='xx-small', markerscale=0.7)\nplt.margins(0.1)\nplt.savefig('scatter_top5_girls_names1.pdf')\nplt.show()\n\nn=5\nfor item in top5_fnames :\n if n <= 6 :\n x= top5_fnames['Year']\n y = top5_fnames.iloc[0:, n]\n plt.scatter(x, y)\n n= n + 1\nplt.xticks(rotation='vertical')\nplt.ylim(-0.005, 5)\nplt.subplots_adjust(left=0.1)\nplt.ylabel('Pecent of Names')\nplt.title('Top 5 Girls Names')\nplt.legend(loc='best', fontsize='xx-small', markerscale=0.7)\nplt.margins(0.1)\nplt.savefig('scatter_top5_girls_names1.pdf')\nplt.show()\n\nn=7\nfor item in top5_fnames :\n if n <= 8 :\n x= top5_fnames['Year']\n y = top5_fnames.iloc[0:, n]\n plt.scatter(x, y)\n n= n + 1\nplt.xticks(rotation='vertical')\nplt.ylim(-0.005, 5)\nplt.subplots_adjust(left=0.1)\nplt.ylabel('Pecent of Names')\nplt.title('Top 5 Girls Names')\nplt.legend(loc='best', fontsize='xx-small', markerscale=0.7)\nplt.margins(0.1)\nplt.savefig('scatter_top5_girls_names1.pdf')\nplt.show()\n\nn=9\nfor item in top5_fnames :\n if n <= 10 :\n x= top5_fnames['Year']\n y = top5_fnames.iloc[0:, n]\n plt.scatter(x, y)\n n= n + 1\nplt.xticks(rotation='vertical')\nplt.ylim(-0.005, 5)\nplt.subplots_adjust(left=0.1)\nplt.ylabel('Pecent of Names')\nplt.title('Top 5 Girls Names')\nplt.legend(loc='best', fontsize='xx-small', markerscale=0.7)\nplt.margins(0.1)\nplt.savefig('scatter_top5_girls_names1.pdf')\nplt.show()\n\nn=11\nfor item in top5_fnames :\n if n <= 12 :\n x= top5_fnames['Year']\n y = top5_fnames.iloc[0:, n]\n plt.scatter(x, y)\n n= n + 1\nplt.xticks(rotation='vertical')\nplt.ylim(-0.005, 5)\nplt.subplots_adjust(left=0.1)\nplt.ylabel('Pecent of Names')\nplt.title('Top 5 Girls Names')\nplt.legend(loc='best', fontsize='xx-small', markerscale=0.7)\nplt.margins(0.1)\nplt.savefig('scatter_top5_girls_names1.pdf')\nplt.show()\n\nn=13\nfor item in top5_fnames :\n if n <= 14 :\n x= top5_fnames['Year']\n y = top5_fnames.iloc[0:, n]\n plt.scatter(x, y)\n n= n + 1\nplt.xticks(rotation='vertical')\nplt.ylim(-0.005, 5)\nplt.subplots_adjust(left=0.1)\nplt.ylabel('Pecent of Names')\nplt.title('Top 5 Girls Names')\nplt.legend(loc='best', fontsize='xx-small', markerscale=0.7)\nplt.margins(0.1)\nplt.savefig('scatter_top5_girls_names1.pdf')\nplt.show()\n\nn=15\nfor item in top5_fnames :\n if n <= 16 :\n x= top5_fnames['Year']\n y = top5_fnames.iloc[0:, n]\n plt.scatter(x, y)\n n= n + 1\nplt.xticks(rotation='vertical')\nplt.ylim(-0.005, 5)\nplt.subplots_adjust(left=0.1)\nplt.ylabel('Pecent of Names')\nplt.title('Top 5 Girls Names')\nplt.legend(loc='best', fontsize='xx-small', markerscale=0.7)\nplt.margins(0.1)\nplt.savefig('scatter_top5_girls_names1.pdf')\nplt.show()\n\nn=17\nfor item in top5_fnames :\n if n <= 18 :\n x= top5_fnames['Year']\n y = top5_fnames.iloc[0:, n]\n plt.scatter(x, y)\n n= n + 1\nplt.xticks(rotation='vertical')\nplt.ylim(-0.005, 5)\nplt.subplots_adjust(left=0.1)\nplt.ylabel('Pecent of Names')\nplt.title('Top 5 Girls Names')\nplt.legend(loc='best', fontsize='xx-small', markerscale=0.7)\nplt.margins(0.1)\nplt.savefig('scatter_top5_girls_names1.pdf')\nplt.show()\n\nn=19\nfor item in top5_fnames :\n if n <= 20 :\n x= top5_fnames['Year']\n y = top5_fnames.iloc[0:, n]\n plt.scatter(x, y)\n n= n + 1\nplt.xticks(rotation='vertical')\nplt.ylim(-0.005, 5)\nplt.subplots_adjust(left=0.1)\nplt.ylabel('Pecent of Names')\nplt.title('Top 5 Girls Names')\nplt.legend(loc='best', fontsize='xx-small', markerscale=0.7)\nplt.margins(0.1)\nplt.savefig('scatter_top5_girls_names1.pdf')\nplt.show()\n\nn=21\nfor item in top5_fnames :\n if n <= 22 :\n x= top5_fnames['Year']\n y = top5_fnames.iloc[0:, n]\n plt.scatter(x, y)\n n= n + 1\nplt.xticks(rotation='vertical')\nplt.ylim(-0.005, 5)\nplt.subplots_adjust(left=0.1)\nplt.ylabel('Pecent of Names')\nplt.title('Top 5 Girls Names')\nplt.legend(loc='best', fontsize='xx-small', markerscale=0.7)\nplt.margins(0.1)\nplt.savefig('scatter_top5_girls_names1.pdf')\nplt.show()\n\nn=23\nfor item in top5_fnames :\n if n <= 24 :\n x= top5_fnames['Year']\n y = top5_fnames.iloc[0:, n]\n plt.scatter(x, y)\n n= n + 1\nplt.xticks(rotation='vertical')\nplt.ylim(-0.005, 5)\nplt.subplots_adjust(left=0.1)\nplt.ylabel('Pecent of Names')\nplt.title('Top 5 Girls Names')\nplt.legend(loc='best', fontsize='xx-small', markerscale=0.7)\nplt.margins(0.1)\nplt.savefig('scatter_top5_girls_names1.pdf')\nplt.show()\n\nn=25\nfor item in top5_fnames :\n if n <= 26 :\n x= top5_fnames['Year']\n y = top5_fnames.iloc[0:, n]\n plt.scatter(x, y)\n n= n + 1\nplt.xticks(rotation='vertical')\nplt.ylim(-0.005, 5)\nplt.subplots_adjust(left=0.1)\nplt.ylabel('Pecent of Names')\nplt.title('Top 5 Girls Names')\nplt.legend(loc='best', fontsize='xx-small', markerscale=0.7)\nplt.margins(0.1)\nplt.savefig('scatter_top5_girls_names1.pdf')\nplt.show()\n\n\ndef top5_girls_names_1():\n n=1\n for item in top5_fnames :\n if n <= 9 :\n x= top5_fnames['Year']\n y = top5_fnames.iloc[0:, n]\n plt.scatter(x, y)\n n= n + 1\n plt.xticks(rotation='vertical')\n plt.ylim(-0.005, 5)\n plt.subplots_adjust(left=0.1)\n plt.ylabel('Pecent of Names')\n plt.title('Top 5 Girls Names')\n plt.legend(loc='best', fontsize='xx-small', markerscale=0.7)\n plt.margins(0.1)\n plt.savefig('scatter_top5_girls_names1.pdf')\n plt.show()\n\ndef top5_girls_names_2():\n n=10\n for item in top5_fnames :\n if n <= 19:\n x= top5_fnames['Year']\n y = top5_fnames.iloc[0:, n]\n plt.scatter(x, y)\n n= n + 1\n plt.xticks(rotation='vertical')\n plt.ylim(-0.005, 5)\n plt.subplots_adjust(left=0.1)\n plt.ylabel('Percent of Names')\n plt.title('Top 5 Girls Names')\n plt.legend(loc='best', fontsize='xx-small', markerscale=0.7)\n plt.margins(0.1)\n plt.savefig('scatter_top5_girls_names2.pdf')\n plt.show()\n\ndef top5_girls_names_3():\n n=20\n for item in top5_fnames :\n if n <= 29:\n x= top5_fnames['Year']\n y = top5_fnames.iloc[0:, n]\n plt.scatter(x, y)\n n= n + 1\n plt.xticks(rotation='vertical')\n plt.ylim(-0.005, 5)\n plt.subplots_adjust(left=0.1)\n plt.ylabel('Percent of Names')\n plt.title('Top 5 Girls Names')\n plt.legend(loc='best', fontsize='xx-small', markerscale=0.7)\n plt.margins(0.1)\n plt.savefig('scatter_top5_girls_names3.pdf')\n plt.show()\n\ndef top5_girls_names_4():\n n=30\n for item in top5_fnames :\n if n <= 39:\n x= top5_fnames['Year']\n y = top5_fnames.iloc[0:, n]\n plt.scatter(x, y)\n n= n + 1\n plt.xticks(rotation='vertical')\n plt.ylim(-0.005, 5)\n plt.subplots_adjust(left=0.1)\n plt.ylabel('Percent of Names')\n plt.title('Top 5 Girls Names')\n plt.legend(loc='best', fontsize='xx-small', markerscale=0.7)\n plt.margins(0.1)\n plt.savefig('scatter_top5_girls_names4.pdf')\n plt.show()\n\ndef top5_girls_names_5():\n n=40\n for item in top5_fnames :\n if n <= 46:\n x= top5_fnames['Year']\n y = top5_fnames.iloc[0:, n]\n plt.scatter(x, y)\n n= n + 1\n plt.xticks(rotation='vertical')\n plt.ylim(-0.005, 5)\n plt.subplots_adjust(left=0.1)\n plt.ylabel('Percent of Names')\n plt.title('Top 5 Girls Names')\n plt.legend(loc='best', fontsize='xx-small', markerscale=0.7)\n plt.margins(0.1)\n plt.savefig('scatter_top5_girls_names5.pdf')\n plt.show()\n\n#_______________________________________________________________________________\n#Create dataframe with males names\nnames_m = names2['Sex'] == 'M'\nnames_m= names2[names_m]\nprint('MALE NAME DATAFRAME')\nprint(names_m.tail())\n#Select top 5 male names for each year\ntop5_m= names_m.groupby('Year').head()\ntop5_male= top5_m.reset_index()\ndel top5_male['index']\ndel top5_male['Sex']\ndel top5_male['Count']\ntop5_mnames= top5_male.set_index('Name')\nprint('TOP 5 MALE NAMES')\nprint(top5_male.head())\n# #Pivot the dataframe to make years columns\ntop5_mnames_tidy = top5_mnames.pivot_table(values='pct_name', index=['Name'], columns=['Year'])\ntop5_mnames_tidy = top5_mnames_tidy.fillna(0)\ntop5_mnames_tidy= top5_mnames_tidy.reset_index()\nprint('TOP 5 MALE NAMES, PIVOTED')\nprint(top5_mnames_tidy.head())\nprint(top5_mnames_tidy.tail())\nprint(top5_mnames_tidy.info())\n#_____________________________________________________________________\n#create 'tidy' dataframe for boys names\ntop5_mnames_tidy= top5_mnames_tidy.set_index('Name')\ndf=[]\nn= 0\nfor item in top5_mnames_tidy :\n if n <= 24 :\n top5m_by_year= top5_mnames_tidy.iloc[n]\n df.append(top5m_by_year)\n n = n + 1\ntop5_mnames= pd.concat(df, axis=1)\ntop5_mnames= top5_mnames.reset_index()\n\n#Plot the % change of use of top 5 boys names\ndef top5_boys_names_1():\n n=1\n for item in top5_mnames :\n if n <= 5 :\n x= top5_mnames['Year']\n y = top5_mnames.iloc[0:, n]\n plt.scatter(x, y)\n n= n + 1\n plt.xticks(rotation='vertical')\n plt.ylim(-0.005, 5)\n plt.subplots_adjust(left=0.1)\n plt.ylabel('Percent of Names')\n plt.title('Top 5 Boys Names')\n plt.legend(loc='best', fontsize='xx-small', markerscale=0.7)\n plt.margins(0.1)\n plt.savefig('scatter_top5_boyss_names1.pdf')\n plt.show()\n\ndef top5_boys_names_2():\n n=6\n for item in top5_mnames :\n if n <= 10:\n x= top5_mnames['Year']\n y = top5_mnames.iloc[0:, n]\n plt.scatter(x, y)\n n= n + 1\n plt.xticks(rotation='vertical')\n plt.ylim(-0.005, 5)\n plt.subplots_adjust(left=0.1)\n plt.ylabel('Percent of Names')\n plt.title('Top 5 Boys Names')\n plt.legend(loc='best', fontsize='xx-small', markerscale=0.7)\n plt.margins(0.1)\n plt.savefig('scatter_top5_boyss_names2.pdf')\n plt.show()\n\ndef top5_boys_names_3():\n n=11\n for item in top5_mnames :\n if n <= 20:\n x= top5_mnames['Year']\n y = top5_mnames.iloc[0:, n]\n plt.scatter(x, y)\n n= n + 1\n plt.xticks(rotation='vertical')\n plt.ylim(-0.005, 5)\n plt.subplots_adjust(left=0.1)\n plt.ylabel('Percent of Names')\n plt.title('Top 5 Boys Names')\n plt.legend(loc='best', fontsize='xx-small', markerscale=0.7)\n plt.margins(0.1)\n plt.savefig('scatter_top5_boys_names3.pdf')\n plt.show()\n\ndef top5_boys_names_4():\n n=21\n for item in top5_mnames :\n if n <= 25:\n x= top5_mnames['Year']\n y = top5_mnames.iloc[0:, n]\n plt.scatter(x, y)\n n= n + 1\n plt.xticks(rotation='vertical')\n plt.ylim(-0.005, 5)\n plt.subplots_adjust(left=0.1)\n plt.ylabel('Percent of Names')\n plt.title('Top 5 Boys Names')\n plt.legend(loc='best', fontsize='xx-small', markerscale=0.7)\n plt.margins(0.1)\n plt.savefig('scatter_top5_boys_names4.pdf')\n plt.show()\n\n#_______________________________________________________________________________\n#Review dataframe with only female names\nprint('REVIEW OF FEMALE NAME DATAFRAME')\nprint(names_f.info())\nprint(names_f.head())\n#create a list frequent female names\ndupf= names_f.groupby('Name').sum()\nfreq_f = dupf['pct_name'] >= 10\ncommon_f= dupf[freq_f]\ncommon_f= common_f.reset_index()\ncommon_fnames= common_f['Name']\nfreq_fnames= common_fnames.tolist()\n\n#set fnames index to Name and pull out the common names\nfnames= names_f.set_index('Name')\ncommon_girls= fnames.loc[freq_fnames]\ncommon_df= common_girls.reset_index()\nprint('COMMON FEMALE NAMES')\nprint(common_df.head())\nprint(common_df.info())\n#\n# #Pivot the dataframe to make years columns\nfnames_tidy = common_df.pivot_table(values='pct_name', index=['Name'], columns=['Year'])\nfnames_tidy = fnames_tidy.fillna(0)\nprint('COMMON FEMALE NAMES, PIVOTED')\nprint(fnames_tidy.head())\nprint(fnames_tidy.info())\nfnames_tidy['Total']= fnames_tidy.sum(axis=1)\nfnames_tidy= fnames_tidy.sort_values(by= 'Total', ascending= False)\n\n#Select very popular female names\ntop10_fnames= fnames_tidy[0:9]\ntop10_fnames= top10_fnames.reset_index()\n\n# #Bar plot of most popluar girls\n\ni=1880\nfor item in top10_fnames :\n if i <= 2010 :\n # print(over_1mf['Name'])\n # print(over_1mf[i])\n x= top10_fnames['Name']\n y= top10_fnames[i]\n plt.bar(x, y, label= i)\n i= i + 10\nplt.xticks(rotation='vertical')\nplt.subplots_adjust(bottom=0.2)\nplt.ylabel('Percent of Names')\nplt.title('Top 10 Traditional Girls Names')\nplt.legend(loc='best', fontsize='xx-small', markerscale=0.7)\nplt.margins(0.1)\nplt.savefig('bar_top10_traditional_girls_names.pdf')\nplt.show()\n\n# #Calulate % change of name popularity top 10 girls names\ndel top10_fnames['Total']\ntop10_fnames= top10_fnames.set_index('Name')\ntop10_fnames_chg = top10_fnames.apply('pct_change', axis=1)*100\npd.set_option('use_inf_as_na', True)\ntop10_fnames_chg= top10_fnames_chg.replace(np.inf, 0)\ntop10_fnames_chg= top10_fnames_chg.fillna(0)\ndel top10_fnames_chg[1880]\ntop10_fnames_chg = top10_fnames_chg.reset_index()\nprint('TOP 10 COMMON FEMALE NAMES')\nprint(top10_fnames.head())\n\n#Create dataframe of the top 10 Traditional girls names\ntop10_fnames_chg= top10_fnames_chg.set_index('Name')\ndf=[]\nn= 0\nfor item in top10_fnames_chg :\n if n <= 8 :\n f_by_year= top10_fnames_chg.iloc[n]\n df.append(f_by_year)\n n = n + 1\nfnames= pd.concat(df, axis=1)\nfnames= fnames.reset_index()\n\n#plot %Change of top 10 Traditional girls names\ndef pct_change_top10_gnames_1():\n n=1\n for item in fnames :\n if n <= 3 :\n x= fnames['Year']\n y = fnames.iloc[0:, n]\n plt.scatter(x, y)\n n= n + 1\n plt.xticks(rotation='vertical')\n plt.subplots_adjust(bottom=0.2)\n plt.ylabel('% Change')\n plt.title('% Change Traditional Girls Names')\n plt.legend(loc='best', fontsize='xx-small', markerscale=0.7)\n plt.margins(0.1)\n plt.savefig('scatter_traditional_girls_names1.pdf')\n plt.show()\n\ndef pct_change_top10_gnames_2():\n n=4\n for item in fnames :\n if n <= 6 :\n x= fnames['Year']\n y = fnames.iloc[0:, n]\n plt.scatter(x, y)\n n= n + 1\n plt.xticks(rotation='vertical')\n plt.subplots_adjust(bottom=0.2)\n plt.ylabel('% Change')\n plt.title('% Change Traditional Girls Names')\n plt.legend(loc='best', fontsize='xx-small', markerscale=0.7)\n plt.margins(0.1)\n plt.savefig('scatter_traditional_girls_names2.pdf')\n plt.show()\n\ndef pct_change_top10_gnames_3():\n n=7\n for item in fnames :\n if n <= 9 :\n x= fnames['Year']\n y = fnames.iloc[0:, n]\n plt.scatter(x, y)\n n= n + 1\n plt.xticks(rotation='vertical')\n plt.subplots_adjust(bottom=0.2)\n plt.ylabel('% Change')\n plt.title('% Change Traditional Girls Names')\n plt.legend(loc='best', fontsize='xx-small', markerscale=0.7)\n plt.margins(0.1)\n plt.savefig('scatter_traditional_girls_names3.pdf')\n plt.show()\n#___________________________________________________________________________________\n#review dataframe with males names\nprint('REVIEW OF MALE NAMES DATAFRAME')\nprint(names_m.info())\nprint(names_m.head())\n#create a list frequent male names\ndupm= names_m.groupby('Name').sum()\nfreq_m = dupm['pct_name'] >= 10\ncommon_m= dupm[freq_m]\ncommon_m= common_m.reset_index()\ncommon_mnames= common_m['Name']\nfreq_mnames= common_mnames.tolist()\n\n#set names_m index to Name and pull out the common names\nnames_m= names_m.set_index('Name')\ncommon_boys= names_m.loc[freq_mnames]\ncommon_dm= common_boys.reset_index()\nprint('COMMON MALE NAMES')\nprint(common_dm.head())\nprint(common_dm.info())\n#\n# #Pivot the dataframe to make years columns\nmnames_tidy = common_dm.pivot_table(values='pct_name', index=['Name'], columns=['Year'])\nmnames_tidy = mnames_tidy.fillna(0)\nprint('COMMON MALE NAMES, PIVOTED')\nprint(mnames_tidy.head())\nprint(mnames_tidy.info())\n#\nmnames_tidy['Total']= mnames_tidy.sum(axis=1)\nmnames_tidy= mnames_tidy.sort_values(by= 'Total', ascending= False)\n\n#Select very popular male names\ntop10_mnames= mnames_tidy[0:9]\ntop10_mnames= top10_mnames.reset_index()\n\n# #Bar plot of most popluar boys names\n\ni=1880\nfor item in top10_mnames :\n if i <= 2010 :\n # print(over_1mf['Name'])\n # print(over_1mf[i])\n x= top10_mnames['Name']\n y= top10_mnames[i]\n plt.bar(x, y, label= i)\n i= i + 10\nplt.xticks(rotation='vertical')\nplt.subplots_adjust(bottom=0.2)\nplt.ylabel('Percent of Names')\nplt.title('Top 10 Traditional Boys Names')\nplt.legend(loc='best', fontsize='xx-small', markerscale=0.7)\nplt.margins(0.1)\nplt.savefig('bar_top10_traditional_boys_names.pdf')\nplt.show()\n\n# #Calulate % change of name popularity top 10 girls names\ndel top10_mnames['Total']\ntop10_mnames= top10_mnames.set_index('Name')\ntop10_mnames_chg = top10_mnames.apply('pct_change', axis=1)*100\npd.set_option('use_inf_as_na', True)\ntop10_mnames_chg= top10_mnames_chg.replace(np.inf, 0)\ntop10_mnames_chg= top10_mnames_chg.fillna(0)\ndel top10_mnames_chg[1880]\ntop10_mnames_chg = top10_mnames_chg.reset_index()\nprint(top10_mnames_chg.info())\nprint(top10_mnames_chg.head())\n\n#Create dataframe of the top 10 Traditional boys names\ntop10_mnames_chg= top10_mnames_chg.set_index('Name')\ndm=[]\nn= 0\nfor item in top10_mnames_chg :\n if n <= 8 :\n m_by_year= top10_mnames_chg.iloc[n]\n dm.append(m_by_year)\n n = n + 1\nmnames= pd.concat(dm, axis=1)\nmnames= mnames.reset_index()\n\n#plot %Change of top 10 Traditional boys names\ndef pct_change_top10_bnames_1():\n n=1\n for item in mnames :\n if n <= 3 :\n x= mnames['Year']\n y = mnames.iloc[0:, n]\n plt.scatter(x, y)\n n= n + 1\n plt.xticks(rotation='vertical')\n plt.subplots_adjust(bottom=0.2)\n plt.ylabel('% Change')\n plt.title('% Change Traditional Boys Names')\n plt.legend(loc='best', fontsize='xx-small', markerscale=0.7)\n plt.margins(0.1)\n plt.savefig('scatter_traditional_boys_names1.pdf')\n plt.show()\n\ndef pct_change_top10_bnames_2():\n n=4\n for item in mnames :\n if n <= 6 :\n x= mnames['Year']\n y = mnames.iloc[0:, n]\n plt.scatter(x, y)\n n= n + 1\n plt.xticks(rotation='vertical')\n plt.subplots_adjust(bottom=0.2)\n plt.ylabel('% Change')\n plt.title('% Change Traditional Boys Names')\n plt.legend(loc='best', fontsize='xx-small', markerscale=0.7)\n plt.margins(0.1)\n plt.savefig('scatter_traditional_boys_names2.pdf')\n plt.show()\n\ndef pct_change_top10_bnames_3():\n n=7\n for item in mnames :\n if n <= 9 :\n x= mnames['Year']\n y = mnames.iloc[0:, n]\n plt.scatter(x, y)\n n= n + 1\n plt.xticks(rotation='vertical')\n plt.subplots_adjust(bottom=0.2)\n plt.ylabel('% Change')\n plt.title('% Change Traditional Boys Names')\n plt.legend(loc='best', fontsize='xx-small', markerscale=0.7)\n plt.margins(0.1)\n plt.savefig('scatter_traditional_boys_names3.pdf')\n plt.show()\n\n\n# top5_girls_names_1()\n# top5_girls_names_2()\n# top5_girls_names_3()\n# top5_girls_names_4()\n# top5_girls_names_5()\n#\n# pct_change_top10_gnames_1()\n# pct_change_top10_gnames_2()\n# pct_change_top10_gnames_3()\n#\ntop5_boys_names_1()\ntop5_boys_names_2()\ntop5_boys_names_3()\ntop5_boys_names_4()\n#\n# pct_change_top10_bnames_1()\n# pct_change_top10_bnames_2()\n# pct_change_top10_bnames_3()\n\n\n#_______________________________________________________________________________\n\n#___________________________________________________________________________________\n#source= ColumnDataSource(fnames_pctchange)\n# plot=figure()\n#plot.vbar(y='Mary' top='top', width= 0.5, source=source, legend='Counts')\n\n# plot.legend.location='top_right'\n# hover = HoverTool(tooltips=[('Name', '@Name')])\n# plot = figure(tools=[hover, 'pan'])\n# plot.add_tools(hover)\n# output_file('hover.html')\n#show(plot)\n","repo_name":"lynda-anne/names_project","sub_path":"glob_import.py","file_name":"glob_import.py","file_ext":"py","file_size_in_byte":24333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"16001126634","text":"import os\nfrom env import script_runner\n\ncleanup_list = [\n 'build/',\n 'result.txt',\n 'result_graphs/',\n]\n\nif __name__ == \"__main__\":\n project_path = os.path.dirname(os.path.abspath(__file__))\n for target in cleanup_list:\n script_runner.rm(os.path.join(project_path, target))","repo_name":"hyoungjk/gpudiag","sub_path":"clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"35100317780","text":"DEPS = [\n 'recipe_engine/properties',\n 'recipe_engine/step',\n]\n\nfrom recipe_engine import recipe_api\n\ndef RunSteps(api):\n # TODO(martinis) change this\n # The api.step object is directly callable.\n api.step('hello', ['echo', 'Hello World'])\n api.step('hello', ['echo', 'Why hello, there.'])\n\n # You can also manipulate various aspects of the step, such as env.\n # These are passed straight through to subprocess.Popen.\n # Also, abusing bash -c in this way is a TERRIBLE IDEA DON'T DO IT.\n api.step('goodbye', ['bash', '-c', 'echo Good bye, $friend.'],\n env={'friend': 'Darth Vader'})\n\n # Finally, you can make your step accept any return code\n api.step('anything is cool', ['bash', '-c', 'exit 3'],\n ok_ret='any')\n\n # We can manipulate the step presentation arbitrarily until we run\n # the next step.\n step_result = api.step('hello', ['echo', 'hello'])\n step_result.presentation.status = api.step.EXCEPTION\n\n try:\n api.step('goodbye', ['echo', 'goodbye'])\n # Modifying step_result now would raise an AssertionError.\n except api.step.StepFailure:\n # Raising anything besides StepFailure or StepWarning causes the build to go \n # purple.\n raise ValueError('goodbye must exit 0!')\n\n try:\n api.step('warning', ['echo', 'warning'])\n except api.step.StepFailure as e:\n e.result.presentation.status = api.step.WARNING\n raise api.step.StepWarning(e.message)\n\n\n # Aggregate failures from tests!\n try:\n with recipe_api.defer_results():\n api.step('testa', ['echo', 'testa'])\n api.step('testb', ['echo', 'testb'])\n except recipe_api.AggregatedStepFailure as f:\n raise api.step.StepFailure(\"You can catch step failures.\")\n\n # Some steps are needed from an infrastructure point of view. If these\n # steps fail, the build stops, but doesn't get turned red because it's\n # not the developers' fault.\n try:\n api.step('cleanup', ['echo', 'cleaning', 'up', 'build'], infra_step=True)\n except api.step.InfraFailure as f:\n assert f.result.presentation.status == api.step.EXCEPTION\n\n # Run a step through a made-up wrapper program.\n api.step('application', ['echo', 'main', 'application'],\n wrapper=['python', '-c', 'import sys; print sys.argv'])\n\n if api.properties.get('access_invalid_data'):\n result = api.step('no-op', ['echo', 'I', 'do', 'nothing'])\n # Trying to access non-existent attributes on the result should raise.\n _ = result.json.output\n\n\ndef GenTests(api):\n yield (\n api.test('basic') +\n api.step_data('anything is cool', retcode=3)\n )\n\n # If you don't have the expect_exception in this test, you will get something\n # like this output.\n # ======================================================================\n # ERROR: step:example.exceptional (..../exceptional.json)\n # ----------------------------------------------------------------------\n # Traceback (most recent call last):\n # \n # File \"annotated_run.py\", line 537, in run\n # retcode = steps_function(api)\n # File \"recipe_modules/step/example.py\", line 39, in RunSteps\n # raise ValueError('goodbye must exit 0!')\n # ValueError: goodbye must exit 0!\n\n yield (\n api.test('exceptional') +\n api.step_data('goodbye (2)', retcode=1) +\n api.expect_exception('ValueError')\n )\n\n yield (\n api.test('warning') +\n api.step_data('warning', retcode=1) +\n api.expect_exception('StepWarning')\n )\n\n yield (\n api.test('defer_results') +\n api.step_data('testa', retcode=1)\n )\n\n yield (\n api.test('invalid_access') +\n api.properties(access_invalid_data=True) +\n api.expect_exception('StepDataAttributeError')\n )\n\n yield (\n api.test('infra_failure') +\n api.properties(raise_infra_failure=True) +\n api.step_data('cleanup', retcode=1)\n )\n","repo_name":"luqui/recipe_engine","sub_path":"recipe_modules/step/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":3838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"2904555135","text":"\"\"\"The endpoints and functions which handle interpolating and evaluating a given set of points.\"\"\"\n\nfrom flask import Blueprint, flash, redirect, render_template, request, url_for\n\nfrom secret_sharing.polynomial import ModPolynomial\n\nbp = Blueprint('interp', __name__, url_prefix='/interpolator')\n\ndef is_ascending(xs):\n # pylint: disable=missing-docstring\n for i in range(1, len(xs)):\n if xs[i] < xs[i-1]:\n return False\n return True\n\n@bp.route('/', methods=(\"GET\", \"POST\"))\ndef interpolate():\n \"\"\"The basic interpolation page.\"\"\"\n return render_template('interpolate.html')\n\n@bp.route('/eval', methods=(\"POST\",))\ndef evaluate():\n \"\"\"The results page for a evaluation/interpolation of points.\"\"\"\n points = request.form['points']\n modulus = request.form['modulus']\n x = request.form['x-val']\n\n # Lots of validation\n\n # Points must be a space-separated list of points x,y - i.e '3,4 5,6 9,2'\n # The x values of the points must be increasing, and the modulus (if one is supplied)\n # must be greater than or equal to one\n\n error = ''\n if not (points or x or modulus):\n error = 'All fields must be filled in.'\n\n parsed = [pair.split(',') for pair in points.split(' ')]\n try:\n coords = [(int(a), int(b)) for (a, b) in parsed]\n x = int(x)\n modulus = int(modulus)\n except ValueError:\n error = 'Invalid values. Please check your input.'\n else:\n xs = [a for a, b in coords]\n if not is_ascending(xs):\n error = 'X values must be ascending.'\n if modulus < 1:\n error = 'The modulus cannot be less than 1.'\n\n if error:\n flash(error)\n return redirect(url_for('interp.interpolate'))\n\n # Validation over, real work now\n\n # Generate the interpolating polynomial for the supplied coords,\n # And evaluate it at the given x and computing with the given modulus\n lagrange = ModPolynomial.interpolating(coords, modulus)\n result = lagrange(x)\n\n # The template needs the first (x-less) coefficient of the polynomial\n # and then the rest of the coefficients as a list\n # They need to be converted from mod.Mod to regular ints\n coef = [int(c) for c in lagrange.coefficients()]\n base, remaining = coef[0], coef[1:]\n\n return render_template('interp_eval.html', n=len(coords), x=x, base=base,\n modulus=modulus, answer=result, polynomial=remaining)\n","repo_name":"Jmc18134/secret_sharing","sub_path":"secret_sharing/interpolate.py","file_name":"interpolate.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"21365691575","text":"#--------------------------------------------------------------------------------------------------------------------\r\n\r\n#This module contains task-related classes and functions\r\n\r\n#------------------ Dependencies ----------------------------#\r\n\r\n## External dependencies\r\nimport numpy\r\nfrom scipy.stats.stats import pearsonr\r\n \r\n## Internal dependencies\r\n\r\n#------------------ Global Variables ------------------------#\r\n\r\nISOTOPE_MASS_ERROR_BOUNDARY_TABLE = {}\r\nPPM = 1000000.0\r\n\r\nMASS_DIFFERENCE_COLUMN_NAME = 'Mass Difference'\r\nISOTOPE_CORRELATION_COLUMN_NAME = 'Isotope Distribution Correlation'\r\nH_L_RATIO_COLUMN_NAME = 'H/L Ratio'\r\nELUTION_CORRELATION_COLUMN_NAME = 'Elution Profile Correlation'\r\nELUTION_COUNT_COLUMN_NAME = '# Good Elution Profile Correlations' \r\nMQ_CONFIDENCE_COLUMN_NAME = 'MethylQuant Confidence'\r\nMQ_SCORE_COLUMN_NAME = 'MethylQuant Score'\r\n\r\n#------------------ Classes & Functions ---------------------#\r\n\r\n\"\"\" Returns expected mass difference between light and heavy methylSILAC partners \r\n \r\n This is based on the number of Methionine(M) residues in the peptide sequence\r\n \r\n Keyword arguments:\r\n peptide_sequence -- Amino acid sequence of peptide from MS/MS searches\r\n modifications -- Modifications identified on the peptide\r\n charge -- Charge state of peptide\r\n labelling -- Labelling\r\n silac_type -- Light or heavy peptide sequenced\r\n\"\"\"\r\ndef calculateMassShift(peptide_seq, modifications, charge, silac_type, mass_shifts):\r\n #adjust the expected mass shift based on the mass of the labels\r\n #adjust the expected mass shift based on the modifications that contribute to the mass shift\r\n expected_mass_shift = (mass_shifts.calculateMassShiftForLabels(peptide_seq) +\r\n mass_shifts.calculateMassShiftForModifications(modifications)) \r\n \r\n #adjust the expected mass shift based on the silac type\r\n #adjust the expected mass shift based on the peptide charge\r\n expected_mass_shift = (expected_mass_shift * silac_type) / float(charge)\r\n return expected_mass_shift\r\n\r\n#########################################################################################################\r\n\r\n\"\"\" Returns list of masses corresponding to isotope envelops of light and heavy methylSILAC partners \r\n \r\n This is a list of 3 light and 3 heavy peaks based on a given mass shift\r\n \r\n Keyword arguments:\r\n precursor_mass -- Precursor mass for a given scan\r\n calc_mz_pipeline -- Calculated m/z value for a peptide\r\n charge -- Charge state of peptide\r\n mz_shift_for_partner -- Mass difference between light and heavy methylSILAC partners\r\n\"\"\" \r\ndef calculatePeptideIsotopeMasses(precursor_mass, charge, calc_mz, mz_shift_for_partner):\r\n #Carbon 13 = +1.00335\r\n #calculate the minimum difference between the isotope peaks for the peptide\r\n isotope_states_mass_difference = 1.00335/charge\r\n \r\n #determine difference between exp and calc mz, then see how many times isotope state\r\n #mass difference divides into it to determine the isotope peak number that was selected for fragmentation\r\n #sometimes 2nd or 3rd isotopic peak and not the monoisotopic peak is selected for fragmentation\r\n isotope_peak_num = round((precursor_mass - calc_mz) / isotope_states_mass_difference)\r\n \r\n #calculate mz of first, second, third isotope peaks\r\n first_peak_mz = precursor_mass - (isotope_peak_num * isotope_states_mass_difference)\r\n second_peak_mz = first_peak_mz + isotope_states_mass_difference\r\n third_peak_mz = first_peak_mz + (2 * (isotope_states_mass_difference))\r\n \r\n #now calculate the peaks for heavy or light partner\r\n #if we initially have a light, then the mass shift will be in the positive direction \r\n #if we initially have a heavy, then the mass shift will be in the negative direction (See CalculateMassShift function above)\r\n first_peak_mz_partner = first_peak_mz + mz_shift_for_partner\r\n second_peak_mz_partner = second_peak_mz + mz_shift_for_partner\r\n third_peak_mz_partner = third_peak_mz + mz_shift_for_partner\r\n \r\n #assemble all the masses to look for\r\n isotope_masses = numpy.array([first_peak_mz, second_peak_mz, third_peak_mz])\r\n isotope_masses_partner = numpy.array([first_peak_mz_partner, second_peak_mz_partner, third_peak_mz_partner]) \r\n \r\n #Sort the masses such that it is always [Light, Heavy]\r\n peptide_isotope_masses = (numpy.array([isotope_masses_partner, isotope_masses]) \r\n if (isotope_masses_partner < isotope_masses).all() \r\n else numpy.array([isotope_masses, isotope_masses_partner]))\r\n return peptide_isotope_masses\r\n\r\n#########################################################################################################\r\n\r\n\"\"\" Returns tuple start and end RT \r\n \r\n This is +- the time window overlap for a given RT\r\n \r\n Keyword arguments:\r\n time_window_overlap -- Time window\r\n RT_MSMS -- Retention time for a MS/MS scan\r\n run_start_time -- Run start time\r\n run_end_time -- Run end time\r\n\"\"\"\r\ndef calculateTimeWindow(time_window_overlap, RT_MSMS, run_start_time, run_end_time): \r\n #scan back over 0.22min and forward 0.22min from MS/MS to search for maximum overlap\r\n #these times were chosen as 0.22min is the maximum delay between elution of heavy and elution of light\r\n time_window_start_RT = RT_MSMS - time_window_overlap\r\n time_window_stop_RT = RT_MSMS + time_window_overlap\r\n \r\n #make sure search window is within range of the run time\r\n time_window_start_RT = run_start_time if time_window_start_RT < run_start_time else time_window_start_RT\r\n time_window_stop_RT = run_end_time if time_window_stop_RT > run_end_time else time_window_stop_RT\r\n\r\n return (time_window_start_RT, time_window_stop_RT)\r\n \r\n#########################################################################################################\r\n\r\n\"\"\" Returns tuple of upper and lower mass boundaries\r\n \r\n This is +- the mass error ppm for a given isotope\r\n \r\n Keyword arguments:\r\n mass_error -- Error tolerance\r\n isotope -- Isotopic mass of a peptide\r\n\"\"\"\r\ndef calculateIsotopeMassErrorBoundary(mass_error, isotope):\r\n if isotope not in ISOTOPE_MASS_ERROR_BOUNDARY_TABLE:\r\n #calculate upper and lower mass errors when searching for signals matching the predicted\r\n #masses of the peptide isotopomers, was set to 20ppm\r\n isotope_mass_error_ppm = (isotope/PPM) * mass_error\r\n mass_upper = isotope + isotope_mass_error_ppm\r\n mass_lower = isotope - isotope_mass_error_ppm \r\n ISOTOPE_MASS_ERROR_BOUNDARY_TABLE[isotope] = (mass_upper, mass_lower)\r\n \r\n (mass_upper, mass_lower) = ISOTOPE_MASS_ERROR_BOUNDARY_TABLE[isotope]\r\n return (mass_upper, mass_lower)\r\n\r\n#########################################################################################################\r\n\r\n\"\"\" Returns the H/L ratio of light and heavy methylSILAC partners \r\n \r\n H/L ratio = sum(intensities for heavy) / sum(intensities for light)\r\n \r\n Keyword arguments:\r\n light_average_mass_intensities -- numpy.array of averaged mass intensities for light isotope envelopes\r\n heavy_average_mass_intensities -- numpy.array of averaged mass intensities for heavy isotope envelopes\r\n\"\"\"\r\ndef calculateHtoLRatio(light_average_mass_intensities, heavy_average_mass_intensities):\r\n light_average_intensities = light_average_mass_intensities[:, 1]\r\n heavy_average_intensities = heavy_average_mass_intensities[:, 1]\r\n \r\n #if there are any isotope envelope members missing, intensity of whole peptide is set to 0\r\n #this provides more specificity and minimises amount of rubbish being quantified\r\n light_intensity = 0 if 0 in light_average_intensities else numpy.sum(light_average_mass_intensities, axis = 0)[1]\r\n heavy_intensity = 0 if 0 in heavy_average_intensities else numpy.sum(heavy_average_mass_intensities, axis = 0)[1]\r\n\r\n if (float(heavy_intensity) != 0 and float(light_intensity) != 0):\r\n ratio = (float(heavy_intensity) / float(light_intensity))\r\n return ratio\r\n \r\n #partner wasn't found\r\n return 'NA'\r\n \r\n#########################################################################################################\r\n\r\n\"\"\" Returns pearson correlation coefficient\r\n \r\n This is a correlation between light and heavy isotope envelopes\r\n Code from dfrankow on stackoverflow\r\n \r\n Keyword arguments:\r\n light_average_mass_intensities -- numpy.array of averaged intensities for each light isotope envelopes\r\n heavy_average_mass_intensities -- numpy.array of averaged intensities for each heavy isotope envelopes\r\n\"\"\"\r\ndef calculatePearsonCorrelationCoefficient(light_average_mass_intensities, heavy_average_mass_intensities):\r\n light_average_intensities = light_average_mass_intensities[:, 1]\r\n heavy_average_intensities = heavy_average_mass_intensities[:, 1]\r\n\r\n #the function returns a (coefficient, p-value) tuple\r\n pearson_correlation_coefficient = pearsonr(light_average_intensities, heavy_average_intensities)[0]\r\n if (pearson_correlation_coefficient is not None and not numpy.isnan(pearson_correlation_coefficient)):\r\n return pearson_correlation_coefficient\r\n \r\n #Correlation could not be calculated\r\n return 'NA'\r\n\r\n","repo_name":"aidantay/MethylQuant","sub_path":"root/src/task/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":9540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"15694832635","text":"import torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass LeNet5(nn.Module):\n \"\"\"\n x: (n, num_channels, 32, 32)\n :return: (n, num_classes)\n \"\"\"\n\n def __init__(self, in_channels, num_classes):\n super(LeNet5, self).__init__()\n self.c1 = nn.Conv2d(in_channels, 6, (5, 5))\n self.s2 = nn.MaxPool2d(2, stride=2)\n self.c3 = nn.Conv2d(6, 16, (5, 5))\n self.s4 = nn.MaxPool2d(2, stride=2)\n self.c5 = nn.Conv2d(16, 120, (5, 5))\n self.f6 = nn.Linear(120, 84)\n self.f7 = nn.Linear(84, num_classes)\n\n def forward(self, x):\n x = F.relu(self.c1(x))\n x = self.s2(x)\n x = F.relu(self.c3(x))\n x = self.s4(x)\n x = F.relu(self.c5(x))\n x = x.reshape(x.shape[0], -1)\n x = F.relu(self.f6(x))\n x = self.f7(x)\n return x\n","repo_name":"skyworld123/fl_exp","sub_path":"models/lenet.py","file_name":"lenet.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"32459716941","text":"#--------------------------------------------------------------------\n# bakery.util: Common utility functions.\n#\n# Author: Lain Supe (supelee)\n# Date: Thursday, March 23 2017\n#--------------------------------------------------------------------\n\nimport inspect\nimport logging\n\nfrom .error import *\n\n#--------------------------------------------------------------------\ndef has_method(obj, name):\n return callable(getattr(obj, name, None))\n\n#--------------------------------------------------------------------\ndef compose(arg, *functions):\n result = arg\n for f in functions:\n result = f(result)\n return result\n\n#--------------------------------------------------------------------\ndef degenerate(arg):\n if inspect.isgenerator(arg):\n return list(arg)\n else:\n return arg\n\n#--------------------------------------------------------------------\ndef flat_map(arg, f = lambda x: x):\n if isinstance(arg, (list, tuple)):\n results = []\n for x in arg:\n results.extend(flat_map(x, f))\n return results\n\n elif isinstance(arg, dict):\n return flat_map(list(arg.values()), f)\n\n else:\n return [f(arg)]\n\n#--------------------------------------------------------------------\ndef wide_foreach(arg, f = lambda x: x):\n if isinstance(arg, (list, tuple)):\n for x in arg:\n wide_foreach(x, f)\n elif isinstance(arg, dict):\n for x in arg.values():\n wide_foreach(x, f)\n else:\n f(arg)\n\n#--------------------------------------------------------------------\ndef wide_map(arg, f = lambda x: x):\n if isinstance(arg, (list, tuple)):\n return [wide_map(x, f) for x in arg]\n elif isinstance(arg, dict):\n return {key: wide_map(value, f) for key, value in arg.items()}\n else:\n return f(arg)\n\n#--------------------------------------------------------------------\ndef log_for(obj):\n if inspect.isfunction(obj) or inspect.ismethod(obj):\n return logger_for_function(obj)\n else:\n return logger_for_class(obj)\n\n#--------------------------------------------------------------------\ndef logger_for_class(obj):\n return logging.getLogger(name_for_class(obj))\n\n#--------------------------------------------------------------------\ndef logger_for_function(f):\n return logging.getLogger(name_for_function(f))\n\n#--------------------------------------------------------------------\ndef name_for_class(obj):\n if inspect.isclass(obj):\n return obj.__module__ + '.' + obj.__qualname__\n else:\n return obj.__module__ + '.' + obj.__class__.__qualname__\n\n#--------------------------------------------------------------------\ndef short_name_for_function(f):\n return f.__qualname__\n\n#--------------------------------------------------------------------\ndef name_for_function(f):\n return f.__module__ + '.' + f.__qualname__\n\n#--------------------------------------------------------------------\ndef tree_to_depth_list(tree, depth_list = None, depth = 0):\n if depth_list is None:\n depth_list = []\n \n if len(depth_list) <= depth:\n depth_list.append([])\n\n if isinstance(tree, dict):\n for key, value in tree.items():\n depth_list[depth].append(key)\n tree_to_depth_list(value, depth_list, depth + 1)\n\n elif isinstance(tree, (list, tuple, set)):\n for value in tree:\n if isinstance(value, (dict, list, tuple, set)):\n tree_to_depth_list(value, depth_list, depth + 1)\n else:\n depth_list[depth].append(value)\n else:\n depth_list[depth].append(tree)\n\n return depth_list\n","repo_name":"Hodapp87/python3-bakery","sub_path":"bakery/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"29488633296","text":"import sys\nimport argparse\nimport numpy as np\n\nfrom Utils.data_utils import *\nfrom Utils.evaluation import *\nfrom Utils.dataloader import train_dataset, test_dataset\n\nimport torch\nimport torch.optim as optim\nimport torch.utils.data as data\n\nfrom Models.HetComp import HetComp_MF\n\ndef get_NDCG_u(sorted_list, teacher_t_items, user, k=50):\n\n\twith torch.no_grad():\n\t\ttop_scores = np.asarray([np.exp(-t/10) for t in range(k)])\n\t\ttop_scores = ((2 ** top_scores)-1)\n\t\t\n\t\tt_items = teacher_t_items[:k]\n\n\t\tsorted_list_tmp = []\n\t\tfor item in sorted_list:\n\t\t\tif user in train_mat and item not in train_mat[user]:\n\t\t\t\tsorted_list_tmp.append(item)\n\t\t\tif len(sorted_list_tmp) == k: break \n\n\t\tif user not in train_mat:\n\t\t\tsorted_list_tmp = sorted_list\n\n\t\tdenom = np.log2(np.arange(2, k + 2))\n\t\tdcg_50 = np.sum((np.in1d(sorted_list_tmp[:k], list(t_items)) * top_scores) / denom)\n\t\tidcg_50 = np.sum((top_scores / denom)[:k])\n\n\t\treturn round(dcg_50 / idcg_50, 4)\n\ndef DKC(sorted_mat, last_max_idx, last_dist, is_first, epoch, alpha=1.05):\n\t\n\tnext_idx = last_max_idx[:] \n\tif is_first:\n\t\tlast_dist = np.ones_like(next_idx)\n\t\tfor model_idx, model_type in enumerate(perm_dict):\n\t\t\tfor user in range(user_count):\n\t\t\t\tcurrent_selction = int(last_max_idx[model_idx][user])\n\t\t\t\tnext_v = min(3, int(next_idx[model_idx][user]) + 1)\n\n\t\t\t\tnext_perm = perm_dict[model_type][next_v][user]\n\t\t\t\tnext_dist = 1 - get_NDCG_u(sorted_mat[user], next_perm, user)\n\t\t\t\t\n\t\t\t\tlast_dist[model_idx][user] = next_dist\n\n\t\treturn next_idx.T, next_idx, last_dist\n\n\tth = alpha * (0.995 ** (epoch // p))\n\n\tfor model_idx, model_type in enumerate(perm_dict):\n\t\tfor user in range(user_count):\n\t\t\tcurrent_selction = int(last_max_idx[model_idx][user])\n\t\t\tnext_v = min(3, int(next_idx[model_idx][user]) + 1)\n\t\t\tnext_next_v = min(3, int(next_idx[model_idx][user]) + 2)\n\t\t\t\n\t\t\tif current_selction == 3:\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tcurrent_perm = perm_dict[model_type][current_selction][user]\n\t\t\tnext_perm = perm_dict[model_type][next_v][user]\n\t\t\tnext_next_perm = perm_dict[model_type][next_next_v][user]\n\t\t\t\n\t\t\tnext_dist = 1 - get_NDCG_u(sorted_mat[user], next_perm, user)\n\t\t\t\n\t\t\tif ((last_dist[model_idx][user] / next_dist) > th) or (last_dist[model_idx][user] / next_dist) < 1:\n\t\t\t\tnext_idx[model_idx][user] += 1\n\t\t\t\tnext_next_dist = 1 - get_NDCG_u(sorted_mat[user], next_next_perm, user)\n\t\t\t\tlast_dist[model_idx][user] = next_next_dist\n\n\treturn next_idx.T, next_idx, last_dist\n\t\n\n\n###########################################################################################################################\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--lr', type=float, default=0.001)\nparser.add_argument('--reg', type=float, default=1e-5)\nparser.add_argument('--dim', type=int, default=6)\nparser.add_argument('--batch_size', type=int, default=1024)\nparser.add_argument('--num_ns', type=int, default=1)\n\nparser.add_argument('--test_ratio', type=float, default=0.20)\nparser.add_argument('--random_seed', type=int, default=0)\nparser.add_argument('--alpha', type=float, default=1.05)\nparser.add_argument('--p', type=int, default=10)\n\nopt = parser.parse_args()\n\ngpu = torch.device('cuda:3') \n\nrandom.seed(opt.random_seed)\nnp.random.seed(opt.random_seed)\ntorch.manual_seed(opt.random_seed)\n\nalpha = opt.alpha\np = opt.p\nK = 100\n\n#############################################################################################################################\n# data load\nuser_count, item_count, train_mat, train_interactions, valid_mat, test_mat = load_data()\n\n# teacher trajectory needs to be located in the below directory\npath = './Teachers/'\nmodel_list = ['MF', 'ML', 'DL', 'GNN', 'AE', 'I-AE']\n\n# load trajectory and initial supervision\nstate_dict, perm_dict, t_results, p_results, exception_ints = load_teacher_trajectory(path, model_list, train_interactions, K, gpu)\ntrain_dataset = train_dataset(user_count, item_count, train_mat, 1, train_interactions, exception_ints)\ntest_dataset = test_dataset(user_count, item_count, valid_mat, test_mat)\ntrain_loader = data.DataLoader(train_dataset, batch_size=1024, shuffle=True)\n\n##############################################################################################################################\n# HetComp model \nmodel = HetComp_MF(user_count, item_count, opt.dim, gpu)\nmodel = model.to(gpu)\noptimizer = optim.Adam(model.parameters(), lr=opt.lr, weight_decay=opt.reg)\n\n##############################################################################################################################\n# distillation\n\ntrain_losses = []\nb_recall = -999\nb_result, f_result = -1, -1\n\nes = 0\nverbose = 10\nlast_dist = None\nis_first = True\nv_results = np.asarray([0, 0, 0, 0, 0, 0])\n\nlast_max_idx = np.zeros((len(perm_dict), user_count))\nnext_idx = np.clip(last_max_idx + 1, a_min=0, a_max=3)\n\nfor epoch in range(1000):\n\n\ttic1 = time.time()\n\ttrain_loader.dataset.negative_sampling()\n\tep_loss = []\n\n\tfor mini_batch in train_loader:\n\n\t\tb_u = mini_batch['u'].unique()\n\t\t\n\t\tmini_batch = {key: value.to(gpu) for key, value in mini_batch.items()}\n\n\t\tmodel.train()\n\t\toutput = model(mini_batch)\n\n\t\tb_u = torch.LongTensor(b_u).to(gpu)\n\t\tb_u_mask = train_loader.dataset.get_user_side_mask(b_u).to(gpu)\n\t\t\n\t\tt_items = torch.index_select(t_results, 0, b_u) \n\t\tp_items = torch.index_select(p_results, 0, b_u) \n\t\t\n\t\tif v_results.sum() < 18: \n\t\t\tKD_loss = model.get_KD_loss(b_u, p_items, t_items, b_u_mask, False)\n\t\t\tb_loss = KD_loss * 0.01\n\t\telse:\n\t\t\tKD_loss = model.get_KD_loss(b_u, p_items, t_items, b_u_mask, True)\n\t\t\tb_loss = KD_loss * 0.005\n\t\t\n\t\tep_loss.append(b_loss)\n\t\toptimizer.zero_grad()\n\t\tb_loss.backward()\n\t\toptimizer.step()\n\n\tep_loss = torch.mean(torch.stack(ep_loss)).data.cpu().numpy()\n\ttrain_losses.append(ep_loss)\n\n\ttoc1 = time.time()\n\tif epoch % verbose == 0:\n\t\timp = False\n\n\t\tmodel.eval()\n\t\twith torch.no_grad():\n\t\t\ttic2 = time.time()\n\t\t\te_results, sorted_mat = evaluate(model, gpu, train_loader, test_dataset, return_sorted_mat=True)\n\t\t\ttoc2 = time.time()\n\n\t\t\tif e_results['valid']['R50'] > b_recall: \n\t\t\t\timp = True\n\t\t\t\tb_recall = e_results['valid']['R50']\n\t\t\t\tb_result = e_results['valid']\n\t\t\t\tf_result = e_results['test']\n\t\t\t\tes = 0\t\t\t\t\t\t\n\t\t\telse:\n\t\t\t\timp = False\n\t\t\t\tes += 1\n\n\t\t\tprint_result(epoch, 1000, ep_loss, e_results, is_improved=imp, train_time=toc1-tic1, test_time=toc2-tic2)\n\n\t### DKC\n\tif (epoch % p == 0) and (epoch >= 10) and v_results.sum() < 18:\n\t\t\n\t\tif is_first == True:\n\t\t\tv, last_max_idx, last_dist = DKC(sorted_mat, last_max_idx, last_dist, True, epoch, alpha=alpha)\n\t\t\tis_first = False\n\t\telse:\n\t\t\tv, last_max_idx, last_dist = DKC(sorted_mat, last_max_idx, last_dist, False, epoch, alpha=alpha)\n\n\t\tt_results = g_torch(state_dict, v, train_interactions, gpu)\n\t\tt_results = t_results[:, :K]\n\n\t\tv_results = np.asarray([round(x, 2) for x in v.mean(0)])\n\t\tprint(v_results)\n\n\tif (epoch % verbose) == 0:\n\t\tprint(\"=\"* 50)\n\n\tif es >= 5:\n\t\tbreak","repo_name":"postech-di-lab/METIS","sub_path":"model-layer/knowledge-distillation-module/HetComp/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6852,"program_lang":"python","lang":"en","doc_type":"code","stars":84,"dataset":"github-code","pt":"3"}
+{"seq_id":"15720037847","text":"\"\"\"Alpha Vantage Model\"\"\"\n__docformat__ = \"numpy\"\n\nimport logging\nfrom typing import Dict, List\n\nimport numpy as np\nimport pandas as pd\nimport requests\nfrom alpha_vantage.fundamentaldata import FundamentalData\nfrom openbb_terminal import config_terminal as cfg\nfrom openbb_terminal.decorators import log_start_end\nfrom openbb_terminal.helper_funcs import lambda_long_number_format\nfrom openbb_terminal.rich_config import console\nfrom openbb_terminal.stocks.stocks_helper import clean_fraction\nfrom openbb_terminal.stocks.fundamental_analysis.fa_helper import clean_df_index\n\nlogger = logging.getLogger(__name__)\n\n\n@log_start_end(log=logger)\ndef get_overview(ticker: str) -> pd.DataFrame:\n \"\"\"Get alpha vantage company overview\n\n Parameters\n ----------\n ticker : str\n Stock ticker\n\n Returns\n -------\n pd.DataFrame\n Dataframe of fundamentals\n \"\"\"\n # Request OVERVIEW data from Alpha Vantage API\n s_req = f\"https://www.alphavantage.co/query?function=OVERVIEW&symbol={ticker}&apikey={cfg.API_KEY_ALPHAVANTAGE}\"\n result = requests.get(s_req, stream=True)\n\n df_fa = pd.DataFrame()\n\n # If the returned data was unsuccessful\n if \"Error Message\" in result.json():\n console.print(result.json()[\"Error Message\"])\n else:\n # check if json is empty\n if not result.json():\n console.print(\"No data found\")\n # Parse json data to dataframe\n elif \"Note\" in result.json():\n console.print(result.json()[\"Note\"], \"\\n\")\n else:\n df_fa = pd.json_normalize(result.json())\n\n # Keep json data sorting in dataframe\n df_fa = df_fa[list(result.json().keys())].T\n df_fa.iloc[5:] = df_fa.iloc[5:].applymap(\n lambda x: lambda_long_number_format(x)\n )\n clean_df_index(df_fa)\n df_fa = df_fa.rename(\n index={\n \"E b i t d a\": \"EBITDA\",\n \"P e ratio\": \"PE ratio\",\n \"P e g ratio\": \"PEG ratio\",\n \"E p s\": \"EPS\",\n \"Revenue per share t t m\": \"Revenue per share TTM\",\n \"Operating margin t t m\": \"Operating margin TTM\",\n \"Return on assets t t m\": \"Return on assets TTM\",\n \"Return on equity t t m\": \"Return on equity TTM\",\n \"Revenue t t m\": \"Revenue TTM\",\n \"Gross profit t t m\": \"Gross profit TTM\",\n \"Diluted e p s t t m\": \"Diluted EPS TTM\",\n \"Quarterly earnings growth y o y\": \"Quarterly earnings growth YOY\",\n \"Quarterly revenue growth y o y\": \"Quarterly revenue growth YOY\",\n \"Trailing p e\": \"Trailing PE\",\n \"Forward p e\": \"Forward PE\",\n \"Price to sales ratio t t m\": \"Price to sales ratio TTM\",\n \"E v to revenue\": \"EV to revenue\",\n \"E v to e b i t d a\": \"EV to EBITDA\",\n }\n )\n return df_fa\n\n\n@log_start_end(log=logger)\ndef get_key_metrics(ticker: str) -> pd.DataFrame:\n \"\"\"Get key metrics from overview\n\n Parameters\n ----------\n ticker : str\n Stock ticker\n\n Returns\n -------\n pd.DataFrame\n Dataframe of key metrics\n \"\"\"\n # Request OVERVIEW data\n s_req = f\"https://www.alphavantage.co/query?function=OVERVIEW&symbol={ticker}&apikey={cfg.API_KEY_ALPHAVANTAGE}\"\n result = requests.get(s_req, stream=True)\n\n # If the returned data was unsuccessful\n if \"Error Message\" in result.json():\n console.print(result.json()[\"Error Message\"])\n else:\n # check if json is empty\n if not result.json() or len(result.json()) < 2:\n console.print(\"No data found\")\n return pd.DataFrame()\n\n df_fa = pd.json_normalize(result.json())\n df_fa = df_fa[list(result.json().keys())].T\n df_fa = df_fa.applymap(lambda x: lambda_long_number_format(x))\n clean_df_index(df_fa)\n df_fa = df_fa.rename(\n index={\n \"E b i t d a\": \"EBITDA\",\n \"P e ratio\": \"PE ratio\",\n \"P e g ratio\": \"PEG ratio\",\n \"E p s\": \"EPS\",\n \"Return on equity t t m\": \"Return on equity TTM\",\n \"Price to sales ratio t t m\": \"Price to sales ratio TTM\",\n }\n )\n as_key_metrics = [\n \"Market capitalization\",\n \"EBITDA\",\n \"EPS\",\n \"PE ratio\",\n \"PEG ratio\",\n \"Price to book ratio\",\n \"Return on equity TTM\",\n \"Price to sales ratio TTM\",\n \"Dividend yield\",\n \"50 day moving average\",\n \"Analyst target price\",\n \"Beta\",\n ]\n return df_fa.loc[as_key_metrics]\n\n return pd.DataFrame()\n\n\n@log_start_end(log=logger)\ndef get_income_statements(\n ticker: str, number: int, quarterly: bool = False\n) -> pd.DataFrame:\n \"\"\"Get income statements for company\n\n Parameters\n ----------\n ticker : str\n Stock ticker\n number : int\n Number of past to get\n quarterly : bool, optional\n Flag to get quarterly instead of annual, by default False\n\n Returns\n -------\n pd.DataFrame\n Dataframe of income statements\n \"\"\"\n url = (\n f\"https://www.alphavantage.co/query?function=INCOME_STATEMENT&symbol={ticker}\"\n f\"&apikey={cfg.API_KEY_ALPHAVANTAGE}\"\n )\n r = requests.get(url)\n\n # If the returned data was unsuccessful\n if \"Error Message\" in r.json():\n console.print(r.json()[\"Error Message\"])\n else:\n # check if json is empty\n if not r.json():\n console.print(\"No data found\")\n else:\n statements = r.json()\n df_fa = pd.DataFrame()\n\n if quarterly:\n if \"quarterlyReports\" in statements:\n df_fa = pd.DataFrame(statements[\"quarterlyReports\"])\n else:\n if \"annualReports\" in statements:\n df_fa = pd.DataFrame(statements[\"annualReports\"])\n\n if df_fa.empty:\n console.print(\"No data found\")\n return pd.DataFrame()\n\n df_fa = df_fa.set_index(\"fiscalDateEnding\")\n df_fa = df_fa.head(number)\n df_fa = df_fa.applymap(lambda x: lambda_long_number_format(x))\n return df_fa[::-1].T\n return pd.DataFrame()\n\n\n@log_start_end(log=logger)\ndef get_balance_sheet(\n ticker: str, number: int, quarterly: bool = False\n) -> pd.DataFrame:\n \"\"\"Get balance sheets for company\n\n Parameters\n ----------\n ticker : str\n Stock ticker\n number : int\n Number of past to get\n quarterly : bool, optional\n Flag to get quarterly instead of annual, by default False\n\n Returns\n -------\n pd.DataFrame\n Dataframe of income statements\n \"\"\"\n url = f\"https://www.alphavantage.co/query?function=BALANCE_SHEET&symbol={ticker}&apikey={cfg.API_KEY_ALPHAVANTAGE}\"\n r = requests.get(url)\n\n # If the returned data was unsuccessful\n if \"Error Message\" in r.json():\n console.print(r.json()[\"Error Message\"])\n else:\n # check if json is empty\n if not r.json():\n console.print(\"No data found\")\n else:\n statements = r.json()\n df_fa = pd.DataFrame()\n\n if quarterly:\n if \"quarterlyReports\" in statements:\n df_fa = pd.DataFrame(statements[\"quarterlyReports\"])\n else:\n if \"annualReports\" in statements:\n df_fa = pd.DataFrame(statements[\"annualReports\"])\n\n if df_fa.empty:\n console.print(\"No data found\")\n return pd.DataFrame()\n\n df_fa = df_fa.set_index(\"fiscalDateEnding\")\n df_fa = df_fa.head(number)\n df_fa = df_fa.applymap(lambda x: lambda_long_number_format(x))\n return df_fa[::-1].T\n return pd.DataFrame()\n\n\n@log_start_end(log=logger)\ndef get_cash_flow(ticker: str, number: int, quarterly: bool = False) -> pd.DataFrame:\n \"\"\"Get cash flows for company\n\n Parameters\n ----------\n ticker : str\n Stock ticker\n number : int\n Number of past to get\n quarterly : bool, optional\n Flag to get quarterly instead of annual, by default False\n\n Returns\n -------\n pd.DataFrame\n Dataframe of income statements\n \"\"\"\n url = f\"https://www.alphavantage.co/query?function=CASH_FLOW&symbol={ticker}&apikey={cfg.API_KEY_ALPHAVANTAGE}\"\n r = requests.get(url)\n\n # If the returned data was unsuccessful\n if \"Error Message\" in r.json():\n console.print(r.json()[\"Error Message\"])\n else:\n # check if json is empty\n if not r.json():\n console.print(\"No data found\")\n else:\n statements = r.json()\n df_fa = pd.DataFrame()\n\n if quarterly:\n if \"quarterlyReports\" in statements:\n df_fa = pd.DataFrame(statements[\"quarterlyReports\"])\n else:\n if \"annualReports\" in statements:\n df_fa = pd.DataFrame(statements[\"annualReports\"])\n\n if df_fa.empty:\n console.print(\"No data found\")\n return pd.DataFrame()\n\n df_fa = df_fa.set_index(\"fiscalDateEnding\")\n df_fa = df_fa.head(number)\n df_fa = df_fa.applymap(lambda x: lambda_long_number_format(x))\n return df_fa[::-1].T\n return pd.DataFrame()\n\n\n@log_start_end(log=logger)\ndef get_earnings(ticker: str, quarterly: bool = False) -> pd.DataFrame:\n \"\"\"Get earnings calendar for ticker\n\n Parameters\n ----------\n ticker : str\n Stock ticker\n quarterly : bool, optional\n Flag to get quarterly and not annual, by default False\n\n Returns\n -------\n pd.DataFrame\n Dataframe of earnings\n \"\"\"\n # Request EARNINGS data from Alpha Vantage API\n s_req = (\n \"https://www.alphavantage.co/query?function=EARNINGS&\"\n f\"symbol={ticker}&apikey={cfg.API_KEY_ALPHAVANTAGE}\"\n )\n result = requests.get(s_req, stream=True)\n df_fa = pd.DataFrame()\n\n # If the returned data was unsuccessful\n if \"Error Message\" in result.json():\n console.print(result.json()[\"Error Message\"])\n else:\n # check if json is empty\n if not result.json() or len(result.json()) < 2:\n console.print(\"No data found\")\n else:\n\n df_fa = pd.json_normalize(result.json())\n\n if quarterly:\n df_fa = pd.DataFrame(df_fa[\"quarterlyEarnings\"][0])\n df_fa = df_fa[\n [\n \"fiscalDateEnding\",\n \"reportedDate\",\n \"reportedEPS\",\n \"estimatedEPS\",\n \"surprise\",\n \"surprisePercentage\",\n ]\n ]\n df_fa = df_fa.rename(\n columns={\n \"fiscalDateEnding\": \"Fiscal Date Ending\",\n \"reportedEPS\": \"Reported EPS\",\n \"estimatedEPS\": \"Estimated EPS\",\n \"reportedDate\": \"Reported Date\",\n \"surprise\": \"Surprise\",\n \"surprisePercentage\": \"Surprise Percentage\",\n }\n )\n else:\n df_fa = pd.DataFrame(df_fa[\"annualEarnings\"][0])\n df_fa = df_fa.rename(\n columns={\n \"fiscalDateEnding\": \"Fiscal Date Ending\",\n \"reportedEPS\": \"Reported EPS\",\n }\n )\n\n return df_fa\n\n\n@log_start_end(log=logger)\ndef df_values(\n df: pd.DataFrame, item: str, index: int = 0, length: int = 2\n) -> List[int]:\n \"\"\"Clean the values from the df\n\n Parameters\n ----------\n df : pd.DataFrame\n The Dataframe to use\n item : str\n The item to select\n index : int\n The number of row to display\n length : int\n The number of rows to return\n\n Returns\n -------\n values : List[int]\n The values for the dataframe\n \"\"\"\n if index:\n df = df.iloc[index : index + length]\n selection = df[item]\n values = selection.apply(\n lambda x: \"N/A\" if (not x or x == \"None\") else int(x)\n ).values\n return values.tolist()\n\n\n@log_start_end(log=logger)\ndef get_fraud_ratios(ticker: str) -> pd.DataFrame:\n \"\"\"Get fraud ratios based on fundamentals\n\n Parameters\n ----------\n ticker : str\n Stock ticker\n\n Returns\n -------\n metrics : pd.DataFrame\n The fraud ratios\n \"\"\"\n\n try:\n fd = FundamentalData(key=cfg.API_KEY_ALPHAVANTAGE, output_format=\"pandas\")\n # pylint: disable=unbalanced-tuple-unpacking\n df_cf, _ = fd.get_cash_flow_annual(symbol=ticker)\n df_bs, _ = fd.get_balance_sheet_annual(symbol=ticker)\n df_is, _ = fd.get_income_statement_annual(symbol=ticker)\n\n except Exception as e:\n console.print(e)\n return pd.DataFrame()\n\n # pylint: disable=no-member\n df_cf = df_cf.set_index(\"fiscalDateEnding\")\n df_bs = df_bs.set_index(\"fiscalDateEnding\")\n df_is = df_is.set_index(\"fiscalDateEnding\")\n fraud_years = pd.DataFrame()\n for i in range(len(df_cf) - 1):\n ar = df_values(df_bs, \"currentNetReceivables\", i)\n sales = df_values(df_is, \"totalRevenue\", i)\n cogs = df_values(df_is, \"costofGoodsAndServicesSold\", i)\n ni = df_values(df_is, \"netIncome\", i)\n ca = df_values(df_bs, \"totalCurrentAssets\", i)\n cl = df_values(df_bs, \"totalCurrentLiabilities\", i)\n ppe = df_values(df_bs, \"propertyPlantEquipment\", i)\n cash = df_values(df_bs, \"cashAndCashEquivalentsAtCarryingValue\", i)\n cash_and_sec = df_values(df_bs, \"cashAndShortTermInvestments\", i)\n sec = [y - x for (x, y) in zip(cash, cash_and_sec)]\n ta = df_values(df_bs, \"totalAssets\", i)\n dep = df_values(df_bs, \"accumulatedDepreciationAmortizationPPE\", i)\n sga = df_values(df_is, \"sellingGeneralAndAdministrative\", i)\n tl = df_values(df_bs, \"totalLiabilities\", i)\n icfo = df_values(df_is, \"netIncomeFromContinuingOperations\", i)\n cfo = df_values(df_cf, \"operatingCashflow\", i)\n\n ratios: Dict = {}\n ratios[\"DSRI\"] = (ar[0] / sales[0]) / (ar[1] / sales[1])\n ratios[\"GMI\"] = ((sales[1] - cogs[1]) / sales[1]) / (\n (sales[0] - cogs[0]) / sales[0]\n )\n ratios[\"AQI\"] = (1 - ((ca[0] + ppe[0] + sec[0]) / ta[0])) / (\n 1 - ((ca[1] + ppe[1] + sec[1]) / ta[1])\n )\n ratios[\"SGI\"] = sales[0] / sales[1]\n ratios[\"DEPI\"] = (dep[1] / (ppe[1] + dep[1])) / (dep[0] / (ppe[0] + dep[0]))\n ratios[\"SGAI\"] = (sga[0] / sales[0]) / (sga[1] / sales[1])\n ratios[\"LVGI\"] = (tl[0] / ta[0]) / (tl[1] / ta[1])\n ratios[\"TATA\"] = (icfo[0] - cfo[0]) / ta[0]\n ratios[\"MSCORE\"] = (\n -4.84\n + (0.92 * ratios[\"DSRI\"])\n + (0.58 * ratios[\"GMI\"])\n + (0.404 * ratios[\"AQI\"])\n + (0.892 * ratios[\"SGI\"])\n + (0.115 * ratios[\"DEPI\"] - (0.172 * ratios[\"SGAI\"]))\n + (4.679 * ratios[\"TATA\"])\n - (0.327 * ratios[\"LVGI\"])\n )\n\n zscore = (\n -4.336\n - (4.513 * (ni[0] / ta[0]))\n + (5.679 * (tl[0] / ta[0]))\n + (0.004 * (ca[0] / cl[0]))\n )\n v1 = np.log(ta[0] / 1000)\n v2 = ni[0] / ta[0]\n v3 = cash[0] / cl[0]\n\n x = ((v1 + 0.85) * v2) - 0.85\n y = 1 + v3\n\n mckee = x**2 / (x**2 + y**2)\n ratios[\"Zscore\"] = zscore\n ratios[\"Mscore\"] = mckee\n if fraud_years.empty:\n fraud_years.index = ratios.keys()\n fraud_years[df_cf.index[i]] = ratios.values()\n fraud_years = fraud_years[sorted(fraud_years)]\n return fraud_years\n\n\n@log_start_end(log=logger)\ndef get_dupont(ticker: str) -> pd.DataFrame:\n \"\"\"Get dupont ratios\n\n Parameters\n ----------\n ticker : str\n Stock ticker\n\n Returns\n -------\n dupont : pd.DataFrame\n The dupont ratio breakdown\n \"\"\"\n\n try:\n fd = FundamentalData(key=cfg.API_KEY_ALPHAVANTAGE, output_format=\"pandas\")\n # pylint: disable=unbalanced-tuple-unpacking\n df_bs, _ = fd.get_balance_sheet_annual(symbol=ticker)\n df_is, _ = fd.get_income_statement_annual(symbol=ticker)\n\n except Exception as e:\n console.print(e)\n return pd.DataFrame()\n\n # pylint: disable=no-member\n df_bs = df_bs.set_index(\"fiscalDateEnding\")\n df_is = df_is.set_index(\"fiscalDateEnding\")\n dupont_years = pd.DataFrame()\n\n for i in range(len(df_bs)):\n ni = df_values(df_is, \"netIncome\", i, 1)\n pretax = df_values(df_is, \"incomeBeforeTax\", i, 1)\n ebit = df_values(df_is, \"ebit\", i, 1)\n sales = df_values(df_is, \"totalRevenue\", i, 1)\n assets = df_values(df_bs, \"totalAssets\", i, 1)\n equity = df_values(df_bs, \"totalShareholderEquity\", i, 1)\n\n ratios: Dict = {}\n try:\n ratios[\"Tax Burden\"] = clean_fraction(ni[0], pretax[0])\n ratios[\"Interest Burden\"] = clean_fraction(pretax[0], ebit[0])\n ratios[\"EBIT Margin\"] = clean_fraction(ebit[0], sales[0])\n ratios[\"Asset Turnover\"] = clean_fraction(sales[0], assets[0])\n ratios[\"Finance Leverage\"] = clean_fraction(assets[0], equity[0])\n ratios[\"ROI\"] = clean_fraction(ni[0], equity[0])\n except IndexError:\n pass\n\n if dupont_years.empty:\n dupont_years.index = ratios.keys()\n dupont_years[df_bs.index[i]] = ratios.values()\n dupont_years = dupont_years[sorted(dupont_years)]\n return dupont_years\n","repo_name":"rohankumardubey/OpenBBTerminal","sub_path":"openbb_terminal/stocks/fundamental_analysis/av_model.py","file_name":"av_model.py","file_ext":"py","file_size_in_byte":17931,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"32341402512","text":"import torch\nimport torch.nn as nn\n\n\nclass LabelSmoothingLoss(nn.Module):\n \"\"\"\n With label smoothing,\n KL-divergence between q_{smoothed ground truth prob.}(w)\n and p_{prob. computed by model}(w) is minimized.\n \"\"\"\n def __init__(self, label_smoothing, tgt_vocab_size, ignore_index=0):\n assert 0.0 < label_smoothing <= 1.0\n self.ignore_index = ignore_index\n super(LabelSmoothingLoss, self).__init__()\n\n smoothing_value = label_smoothing / (tgt_vocab_size - 2) # word itself, and pad token\n one_hot = torch.full((tgt_vocab_size,), smoothing_value)\n one_hot[self.ignore_index] = 0\n self.register_buffer('one_hot', one_hot.unsqueeze(0)) # register buffer is not a parameter, but in state_dict.\n self.confidence = 1.0 - label_smoothing\n\n def forward(self, output, target):\n \"\"\"\n output (FloatTensor): batch_size x n_classes\n target (LongTensor): batch_size\n \"\"\"\n model_prob = self.one_hot.repeat(target.size(0), 1) # model_prob = (target_size(0), V)\n model_prob.scatter_(1, target.unsqueeze(1), self.confidence)\n mask = (target == self.ignore_index)\n model_prob.masked_fill_(mask.unsqueeze(1), 0) # broadcasting\n pred = output.log_softmax(dim=-1)\n return torch.sum(-pred*model_prob) / sum(target != self.ignore_index)\n","repo_name":"SungHo3268/Transformer","sub_path":"src/criterion.py","file_name":"criterion.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"7589851877","text":"import itertools\nfrom typing import List\nfrom typing import Any\nfrom typing import Dict\nimport mysql.connector\n\ndef TestModel() -> Dict[str,str]:\n '''SQL test Table model'''\n model = {\"plant\" : \"plant VARCHAR(10) NOT NULL\",\n \"motor_duration\" : \"motor_duration INT NOT NULL\",\n \"motor_power\" : \"motor_power INT NOT NULL\"}\n return model\n\nclass Table:\n '''Class represents SQL table. Translates model to\n Dict'''\n def __init__(self, name : str, model : Dict[str,str]) -> None:\n self. name = name\n self.columns = model.values()\n\nclass DataBaseHandler:\n '''Database handler class'''\n def __init__(self, host=\"localhost\", user=\"root\", password=\"password\"):\n self.host = host\n self.user = user\n self.password = password\n self.cursor = None\n self.connection = None\n\n def connect(self):\n '''SQL connection'''\n try:\n self.connection = mysql.connector.connect(\n host = self.host,\n user = self.user,\n password = self.password)\n self.cursor = self.connection.cursor()\n\n except mysql.connector.Error as connect_error:\n print(f\"Could not connect to database: {connect_error}\")\n\n def connect_to_database(self, database):\n '''Connect to database. The cursor will point to database'''\n if not self.database_exist:\n raise Exception(f\"Database {database} does not exist\")\n try:\n self.connection = mysql.connector.connect(\n host = self.host,\n user = self.user,\n password = self.password,\n database = database)\n self.cursor = self.connection.cursor()\n\n except mysql.connector.Error as connect_error:\n print(f\"Could not connect to database: {connect_error}\")\n\n def create_database(self, new_database) -> bool:\n '''Create new database. Returns False if already exist'''\n if self.database_exist(new_database):\n print(f\"[DEBUG] Database {new_database} already exist\")\n return False\n insert_statement = f\"CREATE DATABASE {new_database}\"\n self.cursor.execute(insert_statement)\n print(f\"[DEBUG] Database {new_database} created\")\n return True\n\n def delete_database(self, remove_database):\n '''Delete database. Returns False if does not exist'''\n insert_statement = f\"DROP DATABASE {remove_database}\"\n if self.database_exist(remove_database):\n self.cursor.execute(insert_statement)\n print(f\"[DEBUG] Database {remove_database} deleted\")\n return True\n print(f\"[DEBUG] Database {remove_database} does not exist\")\n return False\n\n def database_exist(self, database_check: str) -> bool:\n for database in self._get_databases():\n if database_check in database:\n return True\n return False\n\n def _get_databases(self) -> List[str]:\n insert_statement = \"SHOW DATABASES\"\n self.cursor.execute(insert_statement)\n databases = self.cursor.fetchall()\n return list(itertools.chain(*databases))\n\n def close_database_connection(self):\n '''Close database and SQL connection'''\n self.cursor.close()\n self.connection.close()\n\n def create_table(self, table_name: str, table: List[str]):\n '''Create table_name table in current database. Table is a List[str]\n that represents all columns, their types and allowed NULL'''\n column_string = \"\"\n for column in table:\n column_string += f\"{column}, \"\n column_string = column_string[0:len(column_string)-2]\n insert_statement = f\"CREATE TABLE {table_name}({column_string})\"\n try:\n self.cursor.execute(insert_statement)\n return True\n except mysql.connector.errors.ProgrammingError as program_error:\n print(f\"[DEBUG] {program_error}\")\n return False\n\n def table_exist(self, table_check: str) -> bool:\n for table in self._get_tables():\n if table_check in table:\n return True\n return False\n\n def _get_tables(self) -> List[str]:\n insert_statement = \"SHOW TABLES\"\n self.cursor.execute(insert_statement)\n tables = self.cursor.fetchall()\n return list(itertools.chain(*tables))\n\n def delete_table(self, table_name: str):\n '''Delete table_name table form current database'''\n insert_statement = f\"DROP TABLE {table_name}\"\n try:\n self.cursor.execute(insert_statement)\n return True\n except mysql.connector.errors.ProgrammingError as program_error:\n print(f\"[DEBUG] {program_error}\")\n return False\n\n def insert_into_table(self, table_name: str, table: Dict[str,Any]):\n '''Insert table data into table_name table. Not all columns need to\n be filled'''\n insert_statement_start = f\"INSERT INTO {table_name} \"\n insert_statement_colums = \"(\"\n insert_statement_values = \"VALUES (\"\n values = []\n for column in table.keys():\n if not table[column] is None:\n insert_statement_colums += f\"{column},\"\n insert_statement_values += \"%s,\"\n values.append(table[column])\n insert_statement_colums = insert_statement_colums[0:len(insert_statement_colums)-1]\n insert_statement_values = insert_statement_values[0:len(insert_statement_values)-1]\n insert_statement_colums += \") \"\n insert_statement_values += \")\"\n insert_statement = insert_statement_start + insert_statement_colums + insert_statement_values\n try:\n self.cursor.execute(insert_statement, values)\n self._commit()\n return True\n except mysql.connector.errors.ProgrammingError as program_error:\n print(f\"[DEBUG] {program_error}\")\n return False\n\n def select_from_table(self, table_name: str, columns: List[str], order=False, order_by=None, limit=0):\n '''Select columns from table_name from current database'''\n insert_columns = \"\"\n for column in columns:\n insert_columns += f\"{column}, \"\n insert_columns = insert_columns[0:len(insert_columns)-2]\n insert_statement = f\"SELECT {insert_columns} FROM {table_name}\"\n if order:\n insert_order = f\" order by {order_by} desc\"\n insert_statement += insert_order\n if limit > 0:\n insert_limit = f\" limit {limit}\"\n insert_statement += insert_limit\n try:\n self.cursor.execute(insert_statement)\n return self.cursor.fetchall()\n except mysql.connector.errors.ProgrammingError as program_error:\n print(f\"[DEBUG] {program_error}\")\n raise program_error\n\n def _commit(self) -> bool:\n try:\n self.connection.commit()\n return True\n except mysql.connector.Error as connect_error:\n print(f\"[DEBUG] {connect_error}\")\n return False\n\nif __name__ == \"__main__\":\n print(\"Test database handler\")\n host = \"localhost\"\n database = \"Planty2\"\n table = \"test99\"\n databaseHandler = DataBaseHandler(host)\n databaseHandler.connect_to_database(database)\n new_entry = databaseHandler.select_from_table(\"Planty_data\", [\"entry\"], True, \"Datetime\", 1)\n print(new_entry[0][0])\n","repo_name":"DramaCharles1/Planty2","sub_path":"DatabaseHandler.py","file_name":"DatabaseHandler.py","file_ext":"py","file_size_in_byte":7432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"24410782172","text":"import time\nimport sys\nfrom azure.servicebus import ServiceBusService\n\ninfile = open(\"tempOutput.txt\", \"r\")\ntemp = infile.readline().rstrip()\n#print('received temp of: ' + temp)\ntemp = int(temp)\n\nkey_name = \"sendRule\"\nkey_value = \"9SWS0sNEBQMfTmuBHlxFwUHBFMSBgmJ77/ICSRm9HK4=\"\n\nsbs = ServiceBusService(\"pimessage-ns\",shared_access_key_name=key_name, shared_access_key_value=key_value)\nif temp > 65 or temp < 30:\n# print('sending temp of:' + temp)\n sbs.send_event('pimessage', '{ \"DeviceId\": \"smokerpi\", \"Temperature\": temp }')\n print('sent!')\n print ('got here')\nelse:\n print('temp was in normal range')\n","repo_name":"hncshtq/CSC450-Software-Engineering-Project-","sub_path":"sendTemp.py","file_name":"sendTemp.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"30117447777","text":"from __future__ import annotations\n\nimport random\n\nfrom pygame import Vector2\nfrom typing import TYPE_CHECKING, Optional, Tuple\n\nfrom ..traproom import DMTrapRoom\nfrom utilities import UnlockPack, Effect\n\nif TYPE_CHECKING:\n from dm.core.game.game import DMGame\n################################################################################\n\n__all__ = (\"DeathAndCorruption\",)\n\n################################################################################\nclass DeathAndCorruption(DMTrapRoom):\n\n def __init__(self, game: DMGame, position: Optional[Vector2] = None, level: int = 1):\n\n super().__init__(\n game, position,\n _id=\"ROOM-221\",\n name=\"Death and Corruption\",\n description=(\n \"Once recharged, inflict {damage} damage to all enemies in \"\n \"adjacent area and give them {status} Corpse Explosion.\"\n ),\n level=level,\n rank=8,\n unlock=UnlockPack.Myth,\n base_dmg=121,\n effects=[\n Effect(name=\"Corpse Explosion\", base=48, per_lv=36)\n ]\n )\n self.setup_charging(3.3, 3.3)\n\n################################################################################\n def on_charge(self) -> None:\n\n for room in self.adjacent_rooms:\n for hero in room.heroes:\n hero.damage(self.dmg) # type: ignore\n hero.add_status(\"Corpse Explosion\", self.effects[\"Corpse Explosion\"], self)\n\n################################################################################\n","repo_name":"AllegroVivo/DungeonDefense","sub_path":"dm/rooms/EightStar/DeathAndCorruption.py","file_name":"DeathAndCorruption.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"41916807205","text":"import json\nfrom django.shortcuts import render,redirect\nfrom django.views.generic import ListView, DetailView, CreateView\nfrom .models import *\nfrom django.urls import reverse_lazy, reverse\nfrom .forms import ReviewForm, OrderItemForm\nfrom django.http.response import HttpResponseRedirect, HttpResponse\nfrom django.http import JsonResponse\nfrom .utils import cookieCart\n\n\ndef bookListView(request):\n if request.user.is_authenticated:\n try:\n order = Order.objects.get(owner=request.user,complete=False)\n total_items = order.get_total_item\n except:\n total_items = 0\n else:\n cookieData = cookieCart(request)\n total_items = cookieData['total_items']\n\n books = Book.objects.all()\n context = {'books': books, 'total_items': total_items}\n return render(request, 'books/book_list.html', context)\n\n\ndef bookDetailView(request, pk):\n if request.user.is_authenticated:\n try:\n order = Order.objects.get(owner=request.user,complete=False)\n total_items = order.get_total_items\n except:\n total_items = 0\n else:\n try:\n cart = json.loads(request.COOKIES['cart'])\n except:\n cart = {}\n total_items = 0\n for i in cart:\n print('this is cart', cart[i])\n total_items += cart[i]['quantity']\n book = Book.objects.get(id=pk)\n context = {'book': book, 'total_items': total_items}\n return render(request, 'books/book_detail.html', context)\n\n\ndef reviewcreate(request, pk):\n form = ReviewForm(request.POST)\n book = Book.objects.get(id=pk)\n if form.is_valid():\n review = form.save(commit=False)\n review.owner = request.user\n review.book = book\n review.save()\n return HttpResponseRedirect(reverse_lazy('book_list'))\n return HttpResponse(form.errors)\n\n\ndef orderprocess(request):\n order, created = Order.objects.get_or_create(owner=request.user, complete=True)\n for item in request.POST:\n form = OrderItemForm(item)\n if form.is_valid():\n item = form.save(commit=False)\n item.order = order\n item.save()\n return HttpResponse(\"something wrong occure!\")\n\n\ndef cart(request):\n if request.user.is_authenticated:\n try:\n order = Order.objects.get(owner=request.user, complete=False)\n except:\n return HttpResponseRedirect(reverse_lazy(\"book_list\"))\n orderItems = order.orderitem_set.all()\n cart_total = order.get_total_cart\n total_items = order.get_total_items\n else:\n cookieData = cookieCart(request)\n if len(cookieData)> 0:\n orderItems = cookieData['orderItems']\n cart_total = cookieData['cart_total']\n total_items = cookieData['total_items']\n else:\n return HttpResponseRedirect(reverse_lazy(\"book_list\"))\n\n context = {'items': orderItems, 'cart_total': cart_total, 'total_items': total_items}\n return render(request, 'cart/cart.html', context)\n\n\ndef updatecart(request):\n data = json.loads(request.body)\n bookId = data['bookId']\n action = data['action']\n book = Book.objects.get(id=bookId)\n order, created = Order.objects.get_or_create(owner=request.user, complete=False)\n orderItem, created = OrderItem.objects.get_or_create(order=order, book=book)\n if action == 'add':\n orderItem.quantity = orderItem.quantity+1\n elif action == 'remove':\n orderItem.quantity = orderItem.quantity -1\n\n orderItem.save()\n if orderItem.quantity <= 0:\n orderItem.delete()\n\n return JsonResponse('update cart ', safe=False)\n\n\ndef checkout(request):\n if request.user.is_authenticated:\n try:\n order = Order.objects.get(owner=request.user, complete=False)\n except:\n return HttpResponseRedirect(reverse_lazy(\"book_list\"))\n orderItems = order.orderitem_set.all()\n cart_total = order.get_total_cart\n total_items = order.get_total_items\n else:\n cookieData = cookieCart(request)\n if len(cookieData) >0:\n orderItems = cookieData['orderItems']\n cart_total = cookieData['cart_total']\n total_items = cookieData['total_items']\n else:\n return HttpResponseRedirect(reverse_lazy(\"book_list\"))\n context = {'items': orderItems, 'cart_total': cart_total, 'total_items': total_items}\n return render(request, 'cart/checkout.html', context)\n\n\ndef orderprocess(request):\n if request.user.is_authenticated:\n data = json.loads(request.body)\n order = Order.objects.get(owner=request.user, complete=False)\n else:\n cookieData = cookieCart(request)\n orderItems = cookieData['orderItems']\n order = Order.objects.create()\n for item in orderItems:\n orderItem = OrderItem.objects.create(order=order,\n book=item['book']['id'],\n quantity=item['quantity'],\n )\n orderItem.save()\n\n shipping, created = Shipping.objects.get_or_create(order=order)\n shipping.lastname = data['formData']['lastname']\n shipping.firstname = data['formData']['firstname']\n shipping.email = data['formData']['email']\n shipping.address = data['formData']['address']\n shipping.zipcode = int(data['formData']['zipcode'])\n shipping.phone = data['formData']['phone']\n shipping.save()\n order.complete = True\n order.save()\n return HttpResponse(\"data added\", safe=False)\n\n\ndef success(request):\n try:\n cart = json.loads(request.COOKIES['cart'])\n print('Cart', cart)\n except:\n cart = {}\n response = render(request, 'cart/success.html')\n response.delete_cookie(\"cart\")\n return response","repo_name":"Riman-rh/django_bookstore","sub_path":"bookstore/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"2543992749","text":"# 显示相机的深度图和彩色图,效果和Intel配套软件相同\n# 【注】 相机的第一张图像会出现色彩失真的情况,但后续图像正常\n# \n\n\nimport pyrealsense2 as rs\nimport numpy as np\nimport cv2\nimport datetime\n\n# Configure depth and color streams\npipeline = rs.pipeline()\nconfig = rs.config()\nconfig.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)\nconfig.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)\nconfig.enable_stream(rs.stream.infrared, 1, 640, 480, rs.format.y8, 30)\nconfig.enable_stream(rs.stream.infrared, 2, 640, 480, rs.format.y8, 30)\n\n# Start streaming\npipeline.start(config)\n# 像素对齐使用 rs.align 模块\nalign = rs.align(rs.stream.color)\n\ntry:\n while True:\n\n # Wait for a coherent pair of frames: depth and color\n frames = pipeline.wait_for_frames()\n depth_frame = frames.get_depth_frame()\n color_frame = frames.get_color_frame()\n ir_frame_left = frames.get_infrared_frame(1)\n ir_frame_right = frames.get_infrared_frame(2)\n if not depth_frame or not color_frame:\n continue\n\n # Convert images to numpy arrays\n # np.asanyarray 会返回 ndarray 或者 ndarray的子类\n depth_image = np.asanyarray(depth_frame.get_data())\n color_image = np.asanyarray(color_frame.get_data())\n ir_left_image = np.asanyarray(ir_frame_left.get_data())\n ir_right_image = np.asanyarray(ir_frame_right.get_data())\n\n # Apply colormap on depth image (image must be converted to 8-bit per pixel first)\n depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.1), cv2.COLORMAP_JET)\n\n # Stack both images horizontally\n images1 = np.hstack((color_image, depth_colormap))\n images2 = np.hstack((ir_left_image, ir_right_image))\n image3 = cv2.addWeighted(color_image,0.7,depth_colormap,0.3,0)\n\n # 像素对齐\n aligned_frames = align.process(frames)\n aligned_depth_frame = aligned_frames.get_depth_frame()\n aligned_color_frame = aligned_frames.get_color_frame()\n if not aligned_color_frame or not aligned_depth_frame:\n continue\n\n aligned_depth_image = np.asanyarray(aligned_depth_frame.get_data())\n aligned_color_image = np.asanyarray(aligned_color_frame.get_data())\n\n aligned_depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(aligned_depth_image, alpha=0.1), cv2.COLORMAP_JET)\n image4 = cv2.addWeighted(aligned_color_image,0.7,aligned_depth_colormap,0.3,0)\n\n\n # Show images\n cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)\n cv2.imshow('RealSense', images1)\n # cv2.imshow(\"Display pic_irt\", images2)\n cv2.imshow(\"Merge image\",image3)\n cv2.imshow(\"Merge image_aligned\",image4)\n\n key = cv2.waitKey(1)\n # Press esc or 'q' to close the image window\n if key & 0xFF == ord('q') or key == 27:\n cv2.destroyAllWindows()\n ISOTIMEFORMAT = '%Y_%m_%d_%H_%M_%S'\n theTime = datetime.datetime.now().strftime(ISOTIMEFORMAT)\n cv2.imwrite(str(theTime)+'color_image_'+'.png',color_image)\n cv2.imwrite(str(theTime)+'depth_colormap_'+'.png',depth_colormap)\n cv2.imwrite(str(theTime)+'depth_'+'.png',depth_image)\n cv2.imwrite(str(theTime)+'merge'+'.png',image3)\n cv2.imwrite(str(theTime)+'aligned_depth_colormap'+'.png',aligned_depth_colormap)\n cv2.imwrite(str(theTime)+'aligned_color_image'+'.png',aligned_color_image)\n cv2.imwrite(str(theTime)+'aligned_depth_'+'.png',aligned_depth_image)\n cv2.imwrite(str(theTime)+'aligned_merge'+'.png',image4)\n break\n\nfinally:\n # Stop streaming\n pipeline.stop()","repo_name":"sunshineharry/4dof_Gripper_controler","sub_path":"深度对齐/save_aligned_image.py","file_name":"save_aligned_image.py","file_ext":"py","file_size_in_byte":3769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"33006007519","text":"import pandas as pd\n\nfrom openpyxl.utils.dataframe import dataframe_to_rows\nfrom shutil import copyfile\nfrom openpyxl import load_workbook\n\ntemplate_file = 'C:\\\\work\\\\newoperplan\\\\testfiles\\\\Шаблон дефицитов.xlsx' # Has a header in row 1 already\noutput_file = 'C:\\\\work\\\\newoperplan\\\\testfiles\\\\Result.xlsx' # What we are saving the template as\n\n# Copy Template.xlsx as Result.xlsx\ncopyfile(template_file, output_file)\n\nd = {'col1': [1, 2], 'col2': [3, 4]}\ndf = pd.DataFrame(data=d)\n\nprint(df)\n\n# Load the workbook and access the sheet we'll paste into\n# wb = load_workbook(output_file)\n# ws = wb['Дефицит']\n# ws['A8']\n# for r in dataframe_to_rows(df, index=False, header=False):\n# ws.append(r)\n\n# book = load_workbook(output_file)\n# writer = pd.ExcelWriter(output_file, engine='openpyxl') \n# writer.book = book\n# writer.sheets = dict((ws.title, ws) for ws in book.worksheets)\n\n# df.to_excel(writer, index=False, sheet_name='Дефицит', startrow=5, header=None)\n\n# writer.save()\n\n# writer = pd.ExcelWriter(output_file, engine='openpyxl')\n# df.to_excel(writer, index=False, sheet_name='Дефицит', startrow=6, header=None)\n# writer.save()\n\n# wb.save(output_file)\n\n##############################################################################\n#\n# An example of converting a Pandas dataframe to an xlsx file\n# with column formats using Pandas and XlsxWriter.\n#\n# Copyright 2013-2020, John McNamara, jmcnamara@cpan.org\n#\n\n# Create a Pandas dataframe from some data.\ndf = pd.DataFrame({'Numbers': [1010, 2020, 3030, 2020, 1515, 3030, 4545],\n 'Percentage': [.1, .2, .33, .25, .5, .75, .45 ],\n})\n\n# Create a Pandas Excel writer using XlsxWriter as the engine.\nwriter = pd.ExcelWriter(output_file, engine='xlsxwriter')\n\n# Convert the dataframe to an XlsxWriter Excel object.\ndf.to_excel(writer, sheet_name='Sheet1')\n\n# Get the xlsxwriter workbook and worksheet objects.\nworkbook = writer.book\nworksheet = writer.sheets['Sheet1']\n\n# Add some cell formats.\nformat1 = workbook.add_format({'num_format': '#,##0.00'})\nformat2 = workbook.add_format({'num_format': '0%'})\n\n# Note: It isn't possible to format any cells that already have a format such\n# as the index or headers or any cells that contain dates or datetimes.\n\n# Set the column width and format.\nworksheet.set_column('B:B', 18, format1)\n\n# Set the format but not the column width.\nworksheet.set_column('C:C', None, format2)\n\n# Close the Pandas Excel writer and output the Excel file.\nwriter.save()","repo_name":"yegorkowalew/newoperplan","sub_path":"deficit.py","file_name":"deficit.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"13301216666","text":"import collections\n\nclass Solution:\n def removeInvalidParentheses(self, s: str):\n def isValid(str):\n count = 0\n for char in str:\n if char == \"(\":\n count += 1\n elif char == \")\":\n count -= 1\n if count < 0:\n return False\n return count == 0\n\n if not s:\n return ['']\n\n ret = []\n visited = collections.defaultdict(str)\n visited[s]\n q = collections.deque()\n q.append(s)\n found = False\n\n while q:\n for i in range(len(q)):\n top = q.popleft()\n\n if (isValid(top)):\n found = True\n ret.append(top)\n\n if found:\n continue\n\n for j in range(len(top)):\n if top[j] != \"(\" and top[j] != \")\":\n continue\n\n newStr = top[:j] + top[j+1:]\n\n if newStr not in visited:\n visited[newStr]\n q.append(newStr)\n return ret\n\n# Old Solution\n # def removeInvalidParentheses(self, s: str):\n # retList = []\n #\n # def permuteHelper(paranthLeft, build, stack):\n # if paranthLeft == '':\n # if stack == []:\n # retList.append(build)\n # else:\n # if paranthLeft[0] == '(':\n # permuteHelper(paranthLeft[1:], build, stack)\n # stack.append('(')\n # permuteHelper(paranthLeft[1:], build + \"(\", stack)\n # elif paranthLeft[0] == \")\":\n # permuteHelper(paranthLeft[1:], build, stack)\n # if stack:\n # stack.pop()\n # permuteHelper(paranthLeft[1:], build + ')', stack)\n #\n # permuteHelper(s, '', [])\n # return retList\n\nsol = Solution()\ninput = \"()())()\"\nprint(sol.removeInvalidParentheses(input))","repo_name":"adalloul0928/Leetcode_Hell","sub_path":"Archive/Facebook/Recursion/removeInvalidParanth.py","file_name":"removeInvalidParanth.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"18370045541","text":"import logging\n\nfrom pymongo import MongoClient\nfrom redis.client import Redis\n\nfrom config import C\n\n_mongo_client = None\n_mongo_db = None\n\n_redis_client = None\n\n\ndef mongo_db():\n global _mongo_db\n return _mongo_db\n\n\ndef redis_client():\n global _redis_client\n return _redis_client\n\n\ndef init_db():\n \"\"\"Init database objects\"\"\"\n global _mongo_client, _mongo_db, _redis_client\n\n _mongo_client = MongoClient(C.mongo_addr, C.mongo_port)\n try:\n _mongo_client.server_info()\n except Exception as e:\n logging.getLogger('image-classifier-backend').error('mongo error, ' + str(e))\n exit(1)\n _mongo_db = _mongo_client[C.mongo_db]\n\n _redis_client = Redis(C.redis_addr, C.redis_port, C.redis_db)\n try:\n _redis_client.ping()\n except Exception as e:\n logging.getLogger('image-classifier-backend').error('redis error, ' + str(e))\n exit(1)\n","repo_name":"KSkun/Image-Spider","sub_path":"src/db/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"21319821533","text":"import sys\r\nimport PySide6.QtWidgets as pq\r\n#collection of some qtwidgets more in the documentation \r\nclass MainWindow(pq.QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n\r\n layout= pq.QVBoxLayout()\r\n widgets = [\r\n pq.QCheckBox,\r\n pq.QComboBox,\r\n pq.QDateEdit,\r\n pq.QDateTimeEdit,\r\n pq.QDial,\r\n pq.QDoubleSpinBox,\r\n pq.QFontComboBox,\r\n pq.QLCDNumber,\r\n pq.QLabel,\r\n pq.QLineEdit,\r\n pq.QProgressBar,\r\n pq.QPushButton,\r\n pq.QRadioButton,\r\n pq.QSlider,\r\n pq.QSpinBox,\r\n pq.QTimeEdit,\r\n ]\r\n\r\n for widget in widgets:\r\n layout.addWidget(widget())\r\n\r\n central_widget = pq.QWidget()\r\n central_widget.setLayout(layout)\r\n \r\n self.setCentralWidget(central_widget)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = pq.QApplication(sys.argv)\r\n\r\n window = MainWindow()\r\n window.show()\r\n\r\n app.exec_()","repo_name":"StefanStahlCode/Tutorials","sub_path":"PySide6/widgets_overview.py","file_name":"widgets_overview.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"32772623034","text":"import numpy as np\n\nimport Utility as util\n\n\nclass DataSelection:\n\n def interval_from_selection(self, selection):\n mode = selection[0]\n if mode == \"interval\":\n interval = selection[1:]\n elif mode == \"range\":\n interval = selection[1:3]\n elif mode == \"literal\":\n interval = selection[[1, -1]]\n elif mode == \"random\":\n interval = [str(selection[1]), str(selection[1])]\n elif mode == \"random-split\":\n interval = [str(i) for i in selection[1:3]]\n elif mode == \"ordered-split\":\n interval = [str(i) for i in selection[1:3]]\n elif mode == \"all\":\n interval = [\"all\", \"all\"]\n else:\n raise NotImplementedError(selection)\n return interval\n\n def indices_from_selection(self, labels, selection, **kwargs):\n mode = selection[0]\n if mode[0] == \"~\":\n selection = [mode[1:]] + selection[1:]\n indices = self.indices_from_selection(labels, selection)\n indices = np.delete(np.arange(len(labels)), indices)\n else:\n try:\n if mode == \"interval\":\n start_idx = np.where(labels == str(selection[1]))[0][0]\n end_idx = np.where(labels == str(selection[2]))[0][0]\n indices = np.arange(start_idx, end_idx+1)\n elif mode == \"range\":\n start, end, step = selection[1:]\n if start < 0:\n start += len(labels) \n if end < 0:\n end += len(labels) \n indices = np.arange(start, end, step)\n elif mode == \"random\":\n k = selection[1]\n if isinstance(k, float):\n k = round(k * len(labels))\n rng = np.random.default_rng()\n if len(selection) > 2:\n rng = np.random.default_rng(selection[2])\n indices = rng.choice(len(labels), k, replace=False)\n elif mode == \"random-split\":\n start, end = selection[1:3]\n if isinstance(start, float):\n start = int(start * len(labels))\n elif not isinstance(start, int):\n raise ValueError()\n if isinstance(end, float):\n end = int(end * len(labels))\n elif not isinstance(end, int):\n raise ValueError()\n rng = np.random.default_rng()\n if len(selection) > 3:\n rng = np.random.default_rng(selection[3])\n indices = rng.permutation(len(labels))[start:end]\n elif mode == \"ordered-split\":\n start, end = selection[1:3]\n if isinstance(start, float):\n start = int(start * len(labels))\n elif not isinstance(start, int):\n raise ValueError(selection)\n if isinstance(end, float):\n end = int(end * len(labels))\n elif not isinstance(end, int):\n raise ValueError(selection)\n indices = np.arange(len(labels))[start:end]\n elif mode == \"literal\":\n indices = np.array(\n util.get_dict_values(util.to_key_index_dict(labels), selection[1:], **kwargs)\n )\n elif mode == \"all\":\n indices = np.arange(0, len(labels))\n else:\n raise NotImplementedError(\"Selection mode \\\"%s\\\" not implemented\" % (mode))\n except IndexError as err:\n print(\"Function indices_from_selection() failed to locate an element from the set of labels and selection criteria below:\")\n print(\"Labels @\", \"len(%d)\" % (len(labels)), \"=\")\n print(labels)\n print(\"Selection @\", \"len(%d)\" % (len(selection)), \"=\")\n print(selection)\n raise IndexError(err)\n return indices\n\n # Description:\n # Filter a single axis of \"data\" by applying the given filter index set \"indices\"\n # Arguments:\n # data - the data to be filtered\n # axis - the target axis/dimension of data for filtering\n # indices - the array of locations at which to pull values from data along the given axis\n # Requirements:\n # 1. axis - integer\n # 2. indices - int or ndarray\n def __filter_axis(self, data, axis, indices):\n if not isinstance(axis, int):\n raise ValueError(\"Axis must be an integer, received %s\" % (type(axis)))\n if not isinstance(indices, (int, np.int32, np.int64, np.ndarray)):\n raise ValueError(\"Filter index set must be int or NumPy.ndarray, received %s\" % (type(indices)))\n if isinstance(indices, np.ndarray) and len(indices) == 0:\n new_shape = list(data.shape)\n new_shape[axis] = 0\n return np.empty(new_shape)\n return np.take(data, indices, axis)\n\n # Description:\n # Broadcasting wrapper for __filter_axis() to handle multiple \"axis\" and \"indices\" arguments\n # Arguments:\n # data - the data to be filtered\n # axis - the axis to filter\n # indices - the array, or arrays, of locations at which to pull values from data along the given axis\n # Requirements:\n # 1. axis - integer or list of integers\n # 2. indices - ndarray or list of ndarrays\n def filter_axis(self, data, axis, indices):\n \"\"\"\n\n Arguments\n ---------\n data : ndarray or tuple/list/dict of ndarray\n axis : int or tuple/list/dict of int\n indices : ndarray or tuple/list/dict of ndarray\n\n Returns\n -------\n data : ndarray or tuple/list/dict of ndarray\n the data with given axis or axes filtered by the given indices\n\n \"\"\"\n if isinstance(data, tuple):\n return tuple(self.filter_axis(_, axis, indices) for _ in data)\n elif isinstance(data, list):\n return tuple(self.filter_axis(_, axis, indices) for _ in data)\n elif isinstance(data, dict):\n return {key: self.filter_axis(value, axis, indices) for key, value in data.items()}\n # filter just one axis with one index set\n if not isinstance(axis, (tuple, list)) and not isinstance(indices, (tuple, list)):\n return self.__filter_axis(data, axis, indices)\n # filter a set of axes with a set of filter index sets\n if isinstance(axis, (tuple, list)) and isinstance(indices, (tuple, list)):\n if len(axis) == len(indices): \n for _axis, _indices in zip(axis, indices):\n data = self.__filter_axis(data, _axis, _indices)\n else: # not a 1:1 mapping, try 1:n broadcasting\n if len(axis) == 1: # broadcast axis to all filter indices\n for _indices in indices:\n data = self.__filter_axis(data, axis[0], _indices)\n elif len(indices) == 1: # broadcast filter indices to all axes\n for _axis in axis:\n data = self.__filter_axis(data, _axis, indices[0])\n else: # lengths not equal and cannot be broadcasted\n raise ValueError(\"Number of axes and filter index sets must be equal or broadcastable\")\n elif isinstance(axis, (tuple, list)): # a single filter index set, broadcast it to all axes\n for _axis in axis:\n data = self.__filter_axis(data, _axis, indices)\n elif isinstance(indices, (tuple, list)): # a single axis, broadcast it to all fitler index sets\n for _indices in indices:\n data = self.__filter_axis(data, axis, _indices)\n return data\n\n # Description:\n # Filter multiple axes of \"data\" by applying the given multi-axis filter index set \"indices\"\n # Arguments:\n # data - the data to be filtered\n # axes - the target axes/dimensions of data for filtering\n # indices - the multi-axis array of locations at which to pull values from data along the given axes\n # Requirements:\n # data - ndarray\n # axes - list of integers\n # indices - ndarray\n def __filter_axes(self, data, axes, indices):\n if len(data.shape) < 2:\n raise ValueError(\"Data must be multi-dimensional, received data.shape=%s\" % (data.shape))\n if len(axes) != len(indices.shape):\n raise ValueError(\"Number of axes and filter dimension must be equal, received axes=%s and indices.shape=%s\" % (axes, indices.shape))\n # Perform filtering\n # arange target axes to occupy right-most end\n data = np.moveaxis(data, axes, range(-len(axes), 0))\n # filter target axes (now the last k=len(axes) dimensions) with indices expanded to dimension of data\n data = np.take_along_axis(\n data, \n np.expand_dims(indices, tuple(range(len(data.shape) - len(axes)))), \n -1\n )\n # arrange target axes back into original positions\n data = np.moveaxis(data, range(-len(axes), 0), axes)\n return data\n# return np.moveaxis(data, range(-len(axes), 0), axes)\n\n def filter_axes(self, data, axes, indices):\n return self.__filter_axes(data, axes, indices)\n \n def __filter_axis_foreach(self, data, axis, indices):\n if isinstance(data, tuple):\n data = tuple(self.__filter_axis(_, axis, indices) for _ in data)\n elif isinstance(data, list):\n data = [self.__filter_axis(_, axis, indices) for _ in data]\n elif isinstance(data, dict):\n data = {key: self.__filter_axis(value, axis, indices) for key, value in data.items()}\n elif isinstance(data, np.ndarray) and issubclass(data.dtype.type, np.object):\n return np.reshape(\n np.array((self.filter_axis(_, axis, indices) for _ in np.reshape(data, -1)), dtype=object), \n data.shape\n )\n else:\n raise NotImplementedError(\"Unknown type (%s) for data in __filter_axis_foreach()\" % (str(type))) \n return data\n \n def filter_axis_foreach(self, data, axis, indices):\n data = self.__filter_axis_foreach(data, axis, indices)\n return data\n\n def get_reduced_temporal_indices(self, temporal_selection, reduced_temporal_labels, reduced_n_temporal):\n mode = temporal_selection[0]\n implemented_modes = [\"interval\", \"range\", \"literal\"]\n if mode not in implemented_modes:\n raise NotImplementedError(\"Given mode \\\"%s\\\" but only modes \\\"%s\\\" are implemented\" % (\n mode,\n \",\".join(implemented_modes))\n )\n n_temporal_channels = reduced_temporal_labels.shape[0]\n temporal_indices = []\n if mode == \"interval\":\n for i in range(n_temporal_channels):\n start_idx = self.get_temporal_index(\n temporal_selection[1],\n reduced_temporal_labels[i,:reduced_n_temporal[i]]\n )\n end_idx = self.get_temporal_index(\n temporal_selection[2],\n reduced_temporal_labels[i,:reduced_n_temporal[i]]\n )\n temporal_indices.append(np.arange(start_idx, end_idx+1))\n elif mode == \"range\":\n raise NotImplementedError()\n elif mode == \"literal\":\n raise NotImplementedError()\n return temporal_indices\n","repo_name":"HipGraph/HydroLearn","sub_path":"Data/DataSelection.py","file_name":"DataSelection.py","file_ext":"py","file_size_in_byte":11723,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"41297047511","text":"import jwt\nimport pytest\nfrom testcontainers.mongodb import MongoDbContainer\nfrom testcontainers.redis import RedisContainer\n\nfrom skill_manager.models import Skill, SkillSettings\n\n\n@pytest.fixture(scope=\"module\")\ndef monkeymodule():\n from _pytest.monkeypatch import MonkeyPatch\n\n mpatch = MonkeyPatch()\n yield mpatch\n mpatch.undo()\n\n\n@pytest.fixture(scope=\"module\")\ndef init_mongo_db():\n mongo_db = MongoDbContainer(\"mongo:5.0.4\")\n mongo_db.start()\n mongo_db._connect()\n try:\n yield mongo_db\n except Exception as err:\n raise err\n finally:\n mongo_db.stop()\n\n\n@pytest.fixture(scope=\"module\")\ndef init_redis():\n redis_password = \"redis-pass\"\n redis = RedisContainer(\"redis:latest\", password=redis_password)\n redis.start()\n redis._connect()\n try:\n yield redis\n except Exception as err:\n raise err\n finally:\n redis.stop()\n\n\n@pytest.fixture\ndef skill_prediction_factory():\n def skill_prediction(**kwargs):\n return {\n \"predictions\": [\n {\n \"question\": \"What is the answer to the ultimate question of life, the universe, and everything?\",\n \"prediction_score\": 1,\n \"prediction_output\": {\"output\": \"answer\", \"output_score\": \"1\"},\n \"prediction_documents\": [\n {\n \"index\": \"\",\n \"document_id\": \"\",\n \"document\": \"doc one\",\n \"span\": None,\n \"url\": \"\",\n \"source\": \"\",\n \"document_score\": 0.0,\n }\n ],\n **kwargs,\n }\n ]\n }\n\n return skill_prediction\n\n\n@pytest.fixture\ndef skill_factory():\n def skill_init(\n name=\"test-skill\",\n url=\"http://test-skill.square:1234\",\n skill_type=\"abstractive\",\n skill_settings=SkillSettings(),\n user_id=\"test-user-id\",\n description=\"skill for testing\",\n published=False,\n default_skill_args=None,\n **kwargs,\n ):\n # pass `id` or `created_at` as kwargs to add them explicitly\n skill = Skill(\n name=name,\n url=url,\n skill_type=skill_type,\n skill_settings=skill_settings,\n user_id=user_id,\n description=description,\n published=published,\n default_skill_args={} if default_skill_args is None else default_skill_args,\n **kwargs,\n )\n if not skill.id:\n del skill.id\n\n return skill\n\n yield skill_init\n\n\n@pytest.fixture\ndef token_factory():\n def token(**kwargs):\n return jwt.encode(\n {\"iss\": \"https://square.ukp-lab.test/auth/realms/test-realm\", **kwargs},\n \"secret\",\n algorithm=\"HS256\",\n )\n\n return token\n","repo_name":"UKP-SQuARE/square-core","sub_path":"skill-manager/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2987,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"3"}
+{"seq_id":"10615757392","text":"#!/usr/bin/python3\n\nimport sys\ntry:\n\timport PySide6.QtGui as QtGui\nexcept Exception:\n\ttry:\n\t\timport PySide2.QtGui as QtGui\n\texcept Exception:\n\t\timport PySide.QtGui as QtGui\n\nclass QPainter(QtGui.QPainter):\n\t\"\"\"Add __enter__ and __exit__ methods so that the with statement can be used.\n\n\tbefore:\n\t\tp = QPainter()\n\t\tp.begin(self)\n\t\t# draw\n\t\tp.end()\n\n\tnow:\n\t\twith QPainter(self) as p:\n\t\t\t# draw\n\n\tYou can import like this to shadow the actual QPainter class:\n\t\tfrom PySide6.QtGui import *\n\t\tfrom QPainter import *\n\t\"\"\"\n\n\twidget = None\n\n\tdef __init__(self, widget):\n\t\tsuper().__init__()\n\t\tself.widget = widget\n\n\tdef __enter__(self):\n\t\tself.begin(self.widget)\n\t\treturn self\n\n\tdef __exit__(self, type, value, traceback):\n\t\tself.end()\n","repo_name":"a1291762/wallpaper_helper","sub_path":"QPainter.py","file_name":"QPainter.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"5851928868","text":"class_str='class'\ntypes=[\"BOOL\", \"CHAR\", \"INT8\", \"UINT8\", \"INT16\", \"UINT16\", \"INT32\", \"UINT32\",\n\t\t\"INT64\", \"UINT64\", \"FLOAT32\", \"FLOAT64\", \"FLOATMAX\"] \nconfig_tests=[\"HAVE_HDF5\", \"HAVE_JSON\", \"HAVE_XML\", \"HAVE_LAPACK\", \"USE_CPLEX\",\n\t\"USE_SVMLIGHT\", \"USE_GLPK\", \"USE_LZO\", \"USE_GZIP\", \"USE_BZIP2\", \"USE_LZMA\"]\n\ndef check_class(line):\n\tif not (line.find('public')==-1 and\n\t\t\tline.find('private')==-1 and\n\t\t\tline.find('protected')==-1):\n\t\treturn True\n\ndef check_abstract_class(line):\n\tline=line.replace(' ','').replace('\\t','').strip()\n\treturn line.endswith('=0;')\n\ndef check_is_in_blacklist(c, lines, line_nr, blacklist):\n\tifdef_cnt=0\n\tfor i in xrange(line_nr,0,-1):\n\t\tline=lines[i]\n\t\tif line.find('#endif')!=-1:\n\t\t\tifdef_cnt-=1\n\t\tif line.find('#ifdef')!=-1:\n\t\t\tifdef_cnt+=1\n\n\t\t\tfor b in blacklist.keys():\n\t\t\t\tif line.find(b)!=-1 and ifdef_cnt>0:\n\t\t\t\t\treturn True\n\t\tif line.find('#ifndef')!=-1:\n\t\t\tifdef_cnt+=1\n\n\treturn False\n\ndef extract_class_name(lines, line_nr, line, blacklist):\n\ttry:\n\t\tif not line:\n\t\t\tline=lines[line_nr]\n\t\tc=line[line.index(class_str)+len(class_str):]\n\t\tif not ':' in c:\n\t\t\treturn\n\t\tif not check_class(line):\n\t\t\tif not check_class(lines[line_nr+1]):\n\t\t\t\treturn\n\t\tc=c.split()[0]\n\texcept:\n\t\treturn\n\n\tc=c.strip(':').strip()\n\n\tif not c.startswith('C'):\n\t\treturn\n\tif c.endswith(';'):\n\t\treturn\n\tif '>' in c:\n\t\treturn\n\tif not (len(c)>2 and c[1].isupper()):\n\t\treturn\n\tif check_is_in_blacklist(c[1:], lines, line_nr, blacklist):\n\t\treturn\n\n\treturn c[1:]\n\ndef get_includes(classes):\n\tfrom subprocess import Popen, PIPE \n\tfrom StringIO import StringIO\n\tcmd=[\"find\", \".\", \"-false\"]\n\tfor c in classes:\n\t\tcmd.extend([\"-o\", \"-name\", \"%s.h\" % c])\n\tp = Popen(cmd, stdout=PIPE)\n\toutput = StringIO(p.communicate()[0])\n\tincludes=[]\n\tfor o in output:\n\t\tincludes.append('#include \"%s\"' % o.strip().lstrip('./'))\n\treturn includes\n\ndef get_definitions(classes):\n\tdefinitions=[]\n\tfor c in classes:\n\t\td=\"static CSGObject* __new_C%s(EPrimitiveType g) { return g == PT_NOT_GENERIC? new C%s(): NULL; }\" % (c,c)\n\t\tdefinitions.append(d)\n\treturn definitions\n\ndef get_template_definitions(classes):\n\tdefinitions=[]\n\tfor c in classes:\n\t\td=[]\n\t\td.append(\"static CSGObject* __new_C%s(EPrimitiveType g)\\n{\\n\\tswitch (g)\\n\\t{\\n\" % c)\n\t\tfor t in types:\n\t\t\tif t in ('BOOL','CHAR'):\n\t\t\t\tsuffix=''\n\t\t\telse:\n\t\t\t\tsuffix='_t'\n\t\t\td.append(\"\\t\\tcase PT_%s: return new C%s<%s%s>();\\n\" % (t,c,t.lower(),suffix))\n\t\td.append(\"\\t\\tcase PT_SGOBJECT: return NULL;\\n\\t}\\n\\treturn NULL;\\n}\")\n\t\tdefinitions.append(''.join(d))\n\treturn definitions\n\ndef get_struct(classes):\n\tstruct=[]\n\tfor c in classes:\n\t\ts='{m_class_name: \"%s\", m_new_sgserializable: __new_C%s},' % (c,c)\n\t\tstruct.append(s)\n\treturn struct\n\ndef extract_block(c, lines, start_line, stop_line, start_sym, stop_sym):\n\tsym_cnt=0\n\n\tblock_start=-1;\n\tblock_stop=-1;\n\n\tfor line_nr in xrange(start_line, stop_line):\n\t\tline=lines[line_nr]\n\t\tif line.find(start_sym)!=-1:\n\t\t\tsym_cnt+=1\n\t\t\tif block_start==-1:\n\t\t\t\tblock_start=line_nr\n\t\tif line.find(stop_sym)!=-1:\n\t\t\tblock_stop=line_nr+1\n\t\t\tsym_cnt-=1\n\t\tif sym_cnt==0 and block_start!=-1 and block_stop!=-1:\n\t\t\treturn block_start,block_stop\n\n\treturn block_start,block_stop\n\ndef test_candidate(c, lines, line_nr):\n\tstart,stop=extract_block(c, lines, line_nr, len(lines), '{','}')\n\tif stop')\n\t\t\t\t\tline=line[cp+1:]\n\t\t\t\t\tcp=line.find(class_str)\n\t\t\t\t\tif cp!=-1:\n\t\t\t\t\t\tc=extract_class_name(lines, line_nr, line, blacklist)\n\t\t\telse:\n\t\t\t\tif line.find(class_str)!=-1:\n\t\t\t\t\tc=extract_class_name(lines, line_nr, None, blacklist)\n\t\t\tif c:\n\t\t\t\tok, line_nr=test_candidate(c, lines, line_nr)\n\t\t\t\tif ok:\n\t\t\t\t\tclasses.append(c)\n\t\t\t\tcontinue\n\n\t\t\tline_nr+=1\n\treturn classes\n\n\ndef write_templated_file(fname, substitutes):\n\ttemplate=file(fname).readlines()\n\n\tf=file(fname,'w')\n\tfor line in template:\n\t\tl=line.strip()\n\t\tif l.startswith('REPLACE') and l.endswith('THIS'):\n\t\t\tl=line.split()[1]\n\t\t\tfor s in substitutes.iterkeys():\n\t\t\t\tif l==s:\n\t\t\t\t\tf.write('\\n'.join(substitutes[s]))\n\t\t\t\tcontinue\n\t\telse:\n\t\t\tf.write(line)\n\n\ndef read_config():\n\tconfig=dict()\n\tfor line in file('lib/config.h').readlines():\n\t\tif line=='\\n':\n\t\t\tcontinue\n\t\tl=[l.strip() for l in line.split()]\n\t\tconfig[l[1]]=1\n\n\treturn config\n\ndef get_blacklist():\n\tconfig=read_config()\n\tblacklist=dict()\n\tfor cfg in config_tests:\n\t\tif not config.has_key(cfg):\n\t\t\tblacklist[cfg]=1\n\treturn blacklist\n\t\t\nif __name__=='__main__':\n\timport sys\n\tTEMPL_FILE=sys.argv[1]\n\tHEADERS=sys.argv[2:]\n\n\tblacklist = get_blacklist()\n\n\tclasses = extract_classes(HEADERS, False, blacklist)\n\ttemplate_classes = extract_classes(HEADERS, True, blacklist)\n\tincludes = get_includes(classes+template_classes)\n\tdefinitions = get_definitions(classes)\n\ttemplate_definitions = get_template_definitions(template_classes)\n\tstruct = get_struct(classes+template_classes)\n\tsubstitutes = {'includes': includes,\n\t\t'definitions' :definitions,\n\t\t'template_definitions' : template_definitions,\n\t\t'struct' : struct\n\t\t}\n\n\twrite_templated_file(TEMPL_FILE, substitutes)\n","repo_name":"usc-clmc/usc-clmc-ros-pkg","sub_path":"learning/shogun/shogun/src/shogun/base/class_list.cpp.py","file_name":"class_list.cpp.py","file_ext":"py","file_size_in_byte":5895,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"3"}
+{"seq_id":"74094683602","text":"import json\nimport time\nimport hmac\nimport string\nimport hashlib\nimport requests\nfrom urllib import parse\nfrom sign import authBlackCheck\n\n\ndef get(access_key_id, secret_access_key, data):\n\n #url = 'http://*.*.*.*:6144/blackcheck'\n url = 'http://*.*.*.*:8001/blackcheck'\n response = requests.get(url, params=data)\n print(response.status_code)\n print(response.text)\n\n\ndef post(access_key_id, secret_access_key, json_basic_info):\n\n #1 设置 请求地址/请求方法\n #url = 'http://*.*.*.*:6144/blackcheck'\n url = 'http://*.*.*.*:8001/blackcheck'\n http_method = 'POST'\n #1 提取 请求主机地址/请求路径, 设置请求参数\n url_split = parse.urlsplit(url)\n host = url_split.scheme + '://' + parse.splitport(url_split.netloc)[0]\n path = url_split.path\n params = {}\n\n #2 提交数据, 及提交内容的MD5和数据长度\n #json_basic_info = json_basic_info\n content_type = 'application/json'\n content_md5 = hashlib.md5(str(json_basic_info).encode('utf8')).hexdigest()\n content_len = len(json.dumps(json_basic_info))\n\n #3 请求时刻(北京时间)时间戳\n timestamp = time.mktime(time.localtime())\n #timestamp = 1545901200.0\n # headers中的查询时间转换为 UTC时间戳\n query_date = time.strftime('%Y-%m-%dT%XZ', time.localtime(timestamp))\n\n #4 构造请求头headers, 指定参与签名的headers参数\n headers = {\n 'Host': host,\n 'Content-Type': content_type,\n 'Content-MD5' : content_md5,\n 'Content-Length': str(content_len),\n 'Query-Date': query_date,}\n #headers_to_sign = None\n headers_to_sign = {'host', 'content-type', 'content-md5', 'content-length', 'query-date'}\n\n #5 传输延迟时间(秒)\n expiration_time = 1800\n\n '''\n print('host:\\t\\t', host)\n print('path:\\t\\t', path)\n print('json:\\t\\t', json)\n print('content_md5:\\t\\t', content_md5)\n print('content_len:\\t\\t', content_len)\n print('timestamp:\\t\\t', timestamp)\n print('query_date:\\t\\t', query_date)\n print('headers:\\t\\t', headers)\n print('headers_to_sign:\\t\\t', headers_to_sign)\n print('expiration_time:\\t\\t', expiration_time)\n '''\n\n abc = authBlackCheck(\n access_key_id, secret_access_key, http_method, path, params, \n json, headers, headers_to_sign, timestamp, expiration_time)\n # 得到认证字符串\n authorization = abc.sign()\n# print('\\nauthorization:', authorization)\n\n headers['Authorization'] = authorization\n response = requests.post(url, json=json_basic_info, headers=headers, params=params)\n print(response.status_code)\n try:\n print(json.loads(response.text))\n except:\n print(response.text)\n\n\nif __name__ == \"__main__\":\n\n # 设置 应用授权ID, 应用秘钥, 传递数据\n access_key_id = 'xjzls8alyt3v38zwe2xuoshvn3l69sub'\n secret_access_key = 'hmvkvq6andwweid4qs4scem3dbj7uxzj'\n# '''\n json_basic_info = {'idcard': '420921198712345678', 'phone': '11122223333', 'name':'陈王',\n 'imei': '866018037459554', 'android_id': '49f3f8a1cf083664',\n 'mac': '50:9E:A7:04:F2:0C', 'idfa': 'A9-8DAF-1A9D4C473D55',\n 'ip': '112.96.69.153',}\n# '''\n\n post(access_key_id, secret_access_key, json_basic_info)\n# get(access_key_id, secret_access_key, json_basic_info)\n\n","repo_name":"alasituoer/risk-control-service-process","sub_path":"api/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"39466177020","text":"###############################################################################\n# This script handles BLE communications between the Pyboard and a mobile phone\n# application made in Unity 3D\n#\n#\n#\n# Author: Levi Hargrove\n# Date: Jan 20, 2020\n###############################################################################\n\nimport ubluetooth, utime,ubinascii, pyb, micropython, machine\nfrom ble_advertising import advertising_payload\n\n_IRQ_CENTRAL_CONNECT = const(1)\n_IRQ_CENTRAL_DISCONNECT = const(2)\n_IRQ_GATTS_WRITE = const(3)\n_IRQ_GATTS_READ_REQUEST = const(4)\n_IRQ_SCAN_RESULT = const(5)\n_IRQ_SCAN_DONE = const(6)\n_IRQ_PERIPHERAL_CONNECT = const(7)\n_IRQ_PERIPHERAL_DISCONNECT = const(8)\n_IRQ_GATTC_SERVICE_RESULT = const(9)\n_IRQ_GATTC_SERVICE_DONE = const(10)\n_IRQ_GATTC_CHARACTERISTIC_RESULT = const(11)\n_IRQ_GATTC_CHARACTERISTIC_DONE = const(12)\n_IRQ_GATTC_DESCRIPTOR_RESULT = const(13)\n_IRQ_GATTC_DESCRIPTOR_DONE = const(14)\n_IRQ_GATTC_READ_RESULT = const(15)\n_IRQ_GATTC_READ_DONE = const(16)\n_IRQ_GATTC_WRITE_DONE = const(17)\n_IRQ_GATTC_NOTIFY = const(18)\n_IRQ_GATTC_INDICATE = const(19)\n_IRQ_GATTS_INDICATE_DONE = const(20)\n\n\n# code to be run in micropython\ndef timeit(f, *args, **kwargs):\n func_name = str(f).split(' ')[1]\n def new_func(*args, **kwargs):\n t = utime.ticks_us()\n result = f(*args, **kwargs)\n print('execution time: ', utime.ticks_diff(utime.ticks_us(), t), ' us')\n return result\n return new_func\n\n_serviceUUID = ubluetooth.UUID(\"6E400001-B5A3-F393-E0A9-E50E24DCCA9E\")\n_uartReadCharacteristicUUID = ubluetooth.UUID(\"6E400003-B5A3-F393-E0A9-E50E24DCCA9E\")\n_uartWriteCharacteristicUUID = ubluetooth.UUID(\"6E400002-B5A3-F393-E0A9-E50E24DCCA9E\")\n_uartMessCharacteristicUUID = ubluetooth.UUID(\"6E400004-B5A3-F393-E0A9-E50E24DCCA9E\")\n\n_READ_CHAR = (_uartReadCharacteristicUUID, ubluetooth.FLAG_READ|ubluetooth.FLAG_NOTIFY,)\n_WRITE_CHAR = (_uartWriteCharacteristicUUID, ubluetooth.FLAG_WRITE|ubluetooth.FLAG_NOTIFY,)\n_MESS_CHAR = (_uartMessCharacteristicUUID, ubluetooth.FLAG_READ | ubluetooth.FLAG_NOTIFY, )\n_CONTROLLER_SERVICE = (_serviceUUID, (_READ_CHAR,_WRITE_CHAR,_MESS_CHAR,),)\n\n\nclass BLEComms:\n # Initialization function. Set up variables, register callbacks, etc.\n def __init__(self, ble, name='PYB_2DOF_Wrist'):\n self._ble = ble\n self._conn_handle = None\n self._ble.active(True)\n #self._ble.config(rxbuf=2048)\n self._ble.config(gap_name=name)\n self._ble.irq(self._irq)\n ((self._txhandle,self._rxhandle,self._mxhandle),) = self._ble.gatts_register_services((_CONTROLLER_SERVICE,))\n self._connections = set()\n self._payload = advertising_payload(name=name, services=[_serviceUUID])\n self._name = name\n self._adv()\n self._send_saved_folders = True\n self._connected = False\n self._new_message = False\n self._message = bytearray(32)\n self._indicating = False\n self._send_controller_status = False\n self._exceptions = 0\n self._last_heartbeat = -1\n self._send_classifier_available_status = True\n self._first_time_connected = True\n\n def adv_encode(self,adv_type,value):\n tmp = bytes((len(value) + 1, adv_type,)) + value\n return tmp\n\n def adv_encode_name(self,name):\n tmp = self.adv_encode(0x09, name.encode())\n return tmp\n\n def checkMessageFlag(self):\n return self._new_message\n\n def getMessage(self):\n self._new_message = False\n return self._message\n\n def sendMessage(self,mess):\n try:\n self._ble.gatts_notify(64, self._txhandle, mess)\n self._exceptions = 0\n except Exception as e:\n print(\"Got exception\" + str(self._exceptions) + str(e))\n self._exceptions = self._exceptions + 1\n micropython.mem_info()\n if self._exceptions > 50:\n self.disconnectBLE()\n\n def writeMessage(self,mess):\n #print(\"writing BLE message\")\n \n try:\n #self._ble.gatts_write(self._mxhandle, mess)\n self._ble.gatts_notify(64, self._mxhandle,mess)\n self._exceptions = 0\n except Exception as e:\n print(\"Got exception\" + str(self._exceptions) + str(e))\n self._exceptions = self._exceptions + 1\n micropython.mem_info()\n if self._exceptions > 50:\n self.disconnectBLE()\n \n def setSendControllerStatus(self,status_val):\n self._send_controller_status = status_val\n\n\n def checkSendControllerStatus(self):\n return self._send_controller_status\n\n def checkIndicatingFlag(self):\n return self._indicating\n\n def disconnectBLE(self):\n for conn_handle in self._connections:\n try:\n self._ble.gap_disconnect(conn_handle)\n except Exception as e:\n print(\"Disconnect exception: \" + str(e))\n\n self._send_controller_status = False\n\n utime.sleep_ms(1)\n self._connections.clear()\n self._connected = False\n self._connected_counter = 0\n self._last_heartbeat = -1\n self._adv()\n \n\n def checkConnectionStatus(self):\n return self._connected\n\n def _irq(self, event, data):\n # Track connections so we can send notifications.\n if event == _IRQ_CENTRAL_CONNECT:\n self._conn_handle, _, _, = data\n self._connections.add(self._conn_handle)\n self._connected = True\n self._send_controller_status = True\n self._send_classifier_available_status = True\n print(\"Central Connect!\")\n elif event == _IRQ_CENTRAL_DISCONNECT:\n self._conn_handle, _, _, = data\n\n #self._connections.remove(self._conn_handle)\n for conn_handle in self._connections:\n self._ble.gap_disconnect(conn_handle)\n self._connections.clear()\n self._connected = False\n self._connected_counter = 0\n self._last_heartbeat = -1\n self._send_controller_status = False\n self._send_saved_folders = True\n print(\"Central Disconnect!\")\n self._first_time_connected = True\n \n # Start advertising again to allow a new connection.\n self._adv()\n elif event == _IRQ_PERIPHERAL_DISCONNECT:\n print(\"PERIPHERAL Disconnected\")\n\n elif event == _IRQ_GATTS_WRITE:\n self.conn_handle, value_handle, = data\n if self.conn_handle in self._connections:\n if self._first_time_connected:\n self._new_message = False\n self._message = bytearray(32)\n leftover_message = self._ble.gatts_read(self._rxhandle)\n print(\"leftover_message: \",leftover_message)\n else:\n self._new_message = True\n self._message = self._ble.gatts_read(self._rxhandle)\n\n elif event == _IRQ_GATTS_INDICATE_DONE:\n # A central has acknowledged the indication.\n # Note: Status will be zero on successful acknowledgment, implementation-specific value otherwise.\n conn_handle, value_handle, status = data\n\n def _adv(self, interval_us=500000):\n try:\n self._ble.gap_advertise(100, self.adv_encode(0x01, b'\\x06') + self.adv_encode(0x03, b'\\x0d\\x18') + self.adv_encode(0x19, b'\\xc1\\x03') + self.adv_encode_name(self._name))\n except Exception as e:\n print(\"Advertising excpetion: \" + str(e))\n\n def getHeartbeatTime(self):\n return self._last_heartbeat\n\n def updateHeartbeatTime(self):\n self._last_heartbeat = utime.time()\n\n# def parseMessage(mess):\n# if mess[0] == 0x01:\n# self._last_heartbeat\n# elif mess[0] == 0x10:\n# print(\"Log Start\")\n# save_name = mess[1:].decode() + '_' + str(utime.localtime()[0]) + '_' + str(utime.localtime()[1]) + '_' + str(utime.localtime()[2]) + '_' + str(utime.localtime()[3]) + '_' + str(utime.localtime()[4]) + '_' + str(utime.localtime()[5]) + '.DAP' \n# print(save_name)\n# elif mess[0] == 0x11:\n# print(\"Log Stop\")\n# elif mess[0] == 0x12:\n# print(\"Can Start Command\")\n# elif mess[0] == 0x13:\n# print(\"Can Stop Command\") \n\n# def demo():\n# ble = ubluetooth.BLE()\n# b = BLEComms(ble)\n# while True:\n# mess_avail = b.checkMessageFlag()\n# if mess_avail:\n# parseMessage(b.getMessage())\n# utime.sleep_ms(500)\n\nif __name__ == '__main__':\n demo()","repo_name":"yuniteh/wrist_2dof","sub_path":"pyboard/ble_comms.py","file_name":"ble_comms.py","file_ext":"py","file_size_in_byte":8634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"40334761797","text":"def convert_car_data(self, car_full_data):\r\n #Converting categorical data to inetegr data (encoding) using pandas factorize ##\r\n ##Encoding the car data from categorical to integer ##\r\n import pandas as pd\r\n pd.options.display.max_columns = None\r\n pd.options.display.width = None\r\n pd.options.display.max_rows = None\r\n\r\n gdata = car_full_data\r\n gdata = pd.DataFrame(gdata) ##Converting into Dataframe to make easy for encoding ##\r\n ##Renaming the columns with the columns names provided in names list for the dataset ##\r\n gdata.rename(columns={0: 'buying', 1: 'maint', 2: 'doors', 3: 'persons', 4: 'lug_boot', 5: 'safety', 6: 'prediction'}, inplace=True)\r\n \r\n ##Using pandas factorize encoding the values here down ##\r\n gdata['buying'],_ = pd.factorize(gdata['buying']) \r\n gdata['maint'],_ = pd.factorize(gdata['maint'])\r\n gdata['doors'],_ = pd.factorize(gdata['doors'])\r\n gdata['persons'],_ = pd.factorize(gdata['persons'])\r\n gdata['lug_boot'],_ = pd.factorize(gdata['lug_boot'])\r\n gdata['safety'],_ = pd.factorize(gdata['safety'])\r\n\r\n return gdata\r\n","repo_name":"hiteshram/Car-Acceptance-Prediction","sub_path":"Car/tasks/convert_car_data.py","file_name":"convert_car_data.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"7827620973","text":"import wx\r\nimport wx.grid\r\nimport pandas as pd\r\nfrom matplotlib import pyplot as plt\r\nimport datetime\r\nimport json\r\nfrom numpy.core.defchararray import lower\r\n\r\n# LOAD THE FILES NEEDED\r\nlistings = pd.read_csv('./csvs/listings_dec18.csv')\r\nreviews = pd.read_csv('./csvs/reviews_dec18.csv')\r\n\r\n# GLOBAL VARIABLES\r\n# STORE ONLY THE NEEDED COLUMNS INTO NEW DATAFRAMES\r\nlistingsReducedColumns = listings[\r\n [\r\n 'id',\r\n 'name',\r\n 'host_name',\r\n 'host_since',\r\n 'street',\r\n 'neighbourhood',\r\n 'neighbourhood_cleansed',\r\n 'property_type',\r\n 'room_type',\r\n 'amenities',\r\n 'price',\r\n 'review_scores_rating'\r\n ]\r\n]\r\ncommentsReducedColumns = reviews[['listing_id', 'comments']]\r\ncleanlinessKeywords = ['clean', 'neat', 'fresh', 'hygienic', 'taintless', 'sterile', 'sanitary', 'washed', 'flawless', 'bright', 'shiny', 'sparkling']\r\n\r\n# FUNCTIONS TO RETRIEVE LISTINGS BASED ON USER INPUTS\r\ndef showListings(listingToDisplay):\r\n cols = listingToDisplay[0] #len = 13\r\n rows = len(listingToDisplay)\r\n\r\n df = pd.DataFrame(listingToDisplay[1:], columns=cols)\r\n # print(df.shape)\r\n # print(df)\r\n result = df.to_json(orient='index')\r\n #print(result)\r\n\r\n #WRITES THE CONVERTED JSON DATAFRAME TO A FILE SO IT CAN BE USED IN THE GUI MODULE\r\n with open('listings.json', 'w') as jsonListings:\r\n json.dump(result, jsonListings)\r\n\r\ndef findKeyword(startperiod, endperiod, keyword):\r\n # MANIPULATE THE PROVIDED DATE ARGUMENTS\r\n splitStartPeriod = startperiod.split('/')\r\n splitEndPeriod = endperiod.split('/')\r\n startDate = datetime.datetime(int(splitStartPeriod[-1]), int(splitStartPeriod[-2]), int(splitStartPeriod[-3]))\r\n endDate = datetime.datetime(int(splitEndPeriod[-1]), int(splitEndPeriod[-2]), int(splitEndPeriod[-3]))\r\n\r\n listingsReducedColumns.amenities = listingsReducedColumns.amenities.apply(lower)\r\n # FILTER THE LISTINGS TO ONES THAT CONTAIN THE PROVIDED KEYWORD\r\n # A NEW COLUMN IS CREATED TO STORE WHETHER IF THE LISTING'S A MATCH WITH THE KEYWORD\r\n listingsReducedColumns['matchedAmenities'] = listingsReducedColumns.amenities.apply(lambda row: 'match' if keyword in row else 'no match')\r\n\r\n # CONVERT THE HOST_SINCE TYPE INTO DATETIME AND STORE IN A NEW COLUMN\r\n listingsReducedColumns['period'] = pd.to_datetime(listingsReducedColumns.host_since)\r\n\r\n # THE LISTING ROWS MATCHING THE PROVIDED KEYWORD IS STORED IN THE 'MATCHED' VARIABLE\r\n matched = listingsReducedColumns[\r\n (listingsReducedColumns.matchedAmenities == 'match') &\r\n (listingsReducedColumns.period >= startDate) &\r\n (listingsReducedColumns.period <= endDate)\r\n ]\r\n matchedListingsIDs = list(matched.id)\r\n print('listings matched with {}'.format(keyword))\r\n # print(matched)\r\n # print(matchedListingsIDs)\r\n\r\n # RETURNS A DICTIONARY WITH THE KEYWORD AS KEY AND MATCHING LISTINGS AS THE VALUE (IN A LIST)\r\n return {keyword: matchedListingsIDs}\r\n\r\n# findKeyword('1/1/2009', '30/6/2009', 'elevator')\r\n\r\ndef getListings(startperiod, endperiod, suburbName='sydney', keyword=None):\r\n # MANIPULATE THE PROVIDED DATE ARGUMENTS\r\n splitStartPeriod = startperiod.split('/')\r\n splitEndPeriod = endperiod.split('/')\r\n startDate = datetime.datetime(int(splitStartPeriod[-1]), int(splitStartPeriod[-2]), int(splitStartPeriod[-3]))\r\n endDate = datetime.datetime(int(splitEndPeriod[-1]), int(splitEndPeriod[-2]), int(splitEndPeriod[-3]))\r\n\r\n # INITIALIZE THE DICTIONARY FOR IF A KEYWORD IS PROVIDED, AND THE LIST TO SEND TO SHOWLISTINGS FUNCTION\r\n filteredListings = {}\r\n result = []\r\n # CONVERT THE STRINGS, KEYWORD AND SUBURBNAME INTO LOWERCASE\r\n suburbName = suburbName.lower()\r\n listingsReducedColumns.street = listingsReducedColumns.street.apply(lower)\r\n listingsReducedColumns.neighbourhood_cleansed = listingsReducedColumns.neighbourhood_cleansed.apply(lower)\r\n\r\n if keyword != None:\r\n keyword = keyword.lower()\r\n filteredListings = findKeyword(startperiod, endperiod, keyword)\r\n # CHECK IF ANY MATCH FOUND\r\n if len(filteredListings[keyword]):\r\n print('Properties matching the provided keyword found')\r\n # RENAMES THE COLUMN MATCHEDAMENITIES INTO ANOTHER NAME INSTEAD OF ADDING A NEW COLUMN, TO BE USED TO MATCH SUBURBNAME\r\n listingsReducedColumns.rename(columns={'matchedAmenities': 'matchedSuburb'}, inplace=True)\r\n\r\n # COMPARE THE RETURNED LISTING IDS AND SAVE THE BOOLEAN RESULT IN A NEW COLUMN\r\n listingsReducedColumns['matchedSuburb'] = listingsReducedColumns.id.apply(lambda row: 'match' if int(row) in filteredListings[keyword] else 'no match')\r\n\r\n # THE LISTINGS THAT MATCHES THE LISTING IDS AND HAVE SUBURBNAME AS EITHER STREET NAME OR NEIGHBOURHOOD NAME\r\n matched = listingsReducedColumns[\r\n ((listingsReducedColumns.matchedSuburb == 'match') &\r\n (listingsReducedColumns.street == suburbName)) |\r\n ((listingsReducedColumns.matchedSuburb == 'match') &\r\n (listingsReducedColumns.neighbourhood_cleansed == suburbName)) |\r\n ((listingsReducedColumns.matchedSuburb == 'match') &\r\n (listingsReducedColumns.neighbourhood == suburbName))\r\n ]\r\n result = [matched.columns.values.tolist()] + matched.values.tolist()\r\n else:\r\n return 'No match found'\r\n\r\n else:\r\n listingsReducedColumns['period'] = pd.to_datetime(listingsReducedColumns.host_since)\r\n matched = listingsReducedColumns[\r\n ((listingsReducedColumns.period >= startDate) & (listingsReducedColumns.period <= endDate)) &\r\n ((listingsReducedColumns.street == suburbName) |\r\n (listingsReducedColumns.neighbourhood_cleansed == suburbName) |\r\n (listings.neighbourhood == suburbName)\r\n )\r\n ]\r\n print(matched)\r\n result = [matched.columns.values.tolist()] + matched.values.tolist()\r\n\r\n return showListings(result)\r\n\r\n# getListings('1/1/2015', '30/12/2019')\r\n\r\ndef showPriceDist(startperiod, endperiod):\r\n # MANIPULATE THE PROVIDED DATE ARGUMENTS\r\n splitStartPeriod = startperiod.split('/')\r\n splitEndPeriod = endperiod.split('/')\r\n startDate = datetime.datetime(int(splitStartPeriod[-1]), int(splitStartPeriod[-2]), int(splitStartPeriod[-3]))\r\n endDate = datetime.datetime(int(splitEndPeriod[-1]), int(splitEndPeriod[-2]), int(splitEndPeriod[-3]))\r\n\r\n # CONVERT THE HOST_SINCE STRINGS INTO DATETIME TYPE\r\n listingsReducedColumns['period'] = pd.to_datetime(listingsReducedColumns.host_since)\r\n # CAST THE STRINGS IN PRICE COLUMN AS FLOAT\r\n listingsReducedColumns['price'] = listingsReducedColumns.price.apply(lambda x: float(x.replace('$', '').replace(',', '')) if isinstance(x, str) else float(x))\r\n\r\n # RETURN ONLY RECORDS IN THE PROVIDED PERIODS\r\n result = listingsReducedColumns[(listingsReducedColumns.period >= startDate) & (listingsReducedColumns.period <= endDate)]\r\n # print(result)\r\n years = result.period.dt.year.unique()\r\n priceDist = {}\r\n for year in years:\r\n yearPrice = result[result.period.dt.year == year]\r\n priceDist[year] = [yearPrice.price]\r\n plt.hist(priceDist[year], range=(0, 3000), bins=150, alpha=0.5, density=True)\r\n\r\n plt.legend([year for year in years])\r\n plt.show()\r\n return priceDist\r\n\r\n# showPriceDist('1/1/2015', '30/12/2019')\r\n\r\ndef showPopularListings(startperiod, endperiod, suburbName='sydney'):\r\n # MANIPULATE THE PROVIDED DATE ARGUMENTS\r\n splitStartPeriod = startperiod.split('/')\r\n splitEndPeriod = endperiod.split('/')\r\n startDate = datetime.datetime(int(splitStartPeriod[-1]), int(splitStartPeriod[-2]), int(splitStartPeriod[-3]))\r\n endDate = datetime.datetime(int(splitEndPeriod[-1]), int(splitEndPeriod[-2]), int(splitEndPeriod[-3]))\r\n\r\n # INITIALIZE THE DICTIONARY FOR IF A KEYWORD IS PROVIDED, AND THE LIST TO SEND TO SHOWLISTINGS FUNCTION\r\n filteredListings = {}\r\n\r\n # CONVERT THE HOST_SINCE STRINGS INTO DATETIME TYPE\r\n listingsReducedColumns['period'] = pd.to_datetime(listingsReducedColumns.host_since)\r\n # CONVERT THE STRINGS, KEYWORD AND SUBURBNAME INTO LOWERCASE\r\n suburbName = suburbName.lower()\r\n listingsReducedColumns.street = listingsReducedColumns.street.apply(lower)\r\n listingsReducedColumns.neighbourhood_cleansed = listingsReducedColumns.neighbourhood_cleansed.apply(lower)\r\n\r\n # SORT THE REVIEW_SCORES_RATING COLUMN IN DESCENDING ORDER\r\n listingsReducedColumns.sort_values(by='review_scores_rating', inplace=True, ascending=False)\r\n\r\n matched = listingsReducedColumns[\r\n ((listingsReducedColumns.period >= startDate) & (listingsReducedColumns.period <= endDate)) &\r\n ((listingsReducedColumns.street == suburbName) |\r\n (listingsReducedColumns.neighbourhood_cleansed == suburbName) | (listingsReducedColumns.neighbourhood == suburbName))\r\n ]\r\n\r\n # CONVERT THE VALUES INTO A LIST\r\n allRecords = matched.values.tolist()\r\n # SELECTS THE FIRST 5 VALUES\r\n top5 = allRecords[:5]\r\n\r\n # STORES THE TOP 5 VALUES AND COLUMN NAMES TO A LIST\r\n # AND WRITE THE LIST INTO A JSON FILE TO DISPLAY ON THE GUI LATER\r\n result = [matched.columns.values.tolist()] + allRecords\r\n\r\n df = pd.DataFrame(result[1:5], columns=result[0])\r\n jsonResult = df.to_json(orient='index')\r\n \r\n with open('popularListings.json', 'w') as jsonListings:\r\n json.dump(jsonResult, jsonListings)\r\n \r\n # STORE THE VALUES INTO THE DICTIONARY WITH SUBURBNAME AS ITS KEY\r\n filteredListings[suburbName] = top5\r\n\r\n return filteredListings\r\n\r\n# showPopularListings('1/1/2018', '31/1/2018', suburbName='waverley')\r\n\r\ndef showCleanComments():\r\n cleanCommentTotal = 0\r\n cleanlinessKeywordDict = {}\r\n # print(reviews)\r\n\r\n allCommentStr = reviews.comments.values.tolist()\r\n print(type(allCommentStr))\r\n # print(allCommentStr)\r\n totalCommentList = int(len(allCommentStr))\r\n\r\n for kw in cleanlinessKeywords:\r\n for comment in allCommentStr:\r\n if kw in str(comment) and kw not in (cleanlinessKeywordDict.keys()):\r\n cleanCommentTotal += 1\r\n cleanlinessKeywordDict[kw] = 1\r\n elif kw in str(comment) and kw in (cleanlinessKeywordDict.keys()):\r\n cleanCommentTotal += 1\r\n prevCount = cleanlinessKeywordDict[kw]\r\n cleanlinessKeywordDict[kw] = prevCount + 1\r\n\r\n print(cleanlinessKeywordDict)\r\n return cleanlinessKeywordDict\r\n\r\nshowCleanComments()","repo_name":"zarinrayhana/Sydney-AirBnB-Data-Analyst-Software","sub_path":"visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":10647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"4224247448","text":"#list end signature\r\nlEndSign=['.',';','!','?']\r\n#acronym file\r\nnorAcronym='normalAcronym.txt'\r\nnameAcronym='nameAcronym.txt'\r\nnumAcronym='numberAcronym.txt'\r\notherAcronym='otherAcronym.txt'\r\nlistfileInput=[nameAcronym,numAcronym,otherAcronym]\r\n\r\n#Return 'list' KQ vị trí rất cả các dấu hiệu kết thúc câu\r\ndef findEndSign(iText: str):\r\n lPositionEndSign=[]\r\n for i in range(0,len(iText)-1):\r\n if iText[i] in lEndSign and iText[i+1] == ' ':\r\n lPositionEndSign.append(i)\r\n return lPositionEndSign\r\n\r\n#Return 'list' TỪ trước và sau dấu '.'\r\ndef getEndWord(iText: str, lEndSign: list):\r\n ListOfEndWord = []\r\n for i in lEndSign:\r\n position1 = iText[:i].rfind(' ') + 1\r\n position2 = iText[i+2:].find(' ') + i + 2\r\n #Xử lý trường hợp \"### TP. HCM. ###\"\r\n temp = iText[i+2:].find('.') + i + 2\r\n if temp < position2:\r\n position2 = temp\r\n ListOfEndWord.append(iText[position1:position2])\r\n # print(ListOfEndWord)\r\n return ListOfEndWord\r\n \r\ndef CheckAcronym(ListOfEndWord: list):\r\n lAcronym1=[]\r\n lAcronym2=[]\r\n Outlist=[]\r\n\r\n f = open(norAcronym, 'rt', encoding= 'utf-8')\r\n for i in f:\r\n i = i.replace('\\n','')\r\n lAcronym1.append(i)\r\n f.close()\r\n \r\n for namefile in listfileInput:\r\n f = open(namefile, 'rt', encoding= 'utf-8')\r\n for i in f:\r\n i = i.replace('\\n','')\r\n lAcronym2.append(i)\r\n f.close()\r\n\r\n for i in ListOfEndWord:\r\n flat = 0\r\n temp = i[:i.find(' ')]\r\n if temp in lAcronym1:\r\n Outlist.append(0)\r\n flat = 1\r\n continue\r\n for j in lAcronym2:\r\n if i in j:\r\n Outlist.append(0)\r\n flat = 1\r\n break \r\n if flat == 0:\r\n Outlist.append(1)\r\n # print(Outlist)\r\n return Outlist\r\n\r\n#Run here\r\n#input text\r\niText = 'TS. Ng. V. A cùng ThS. B đã nghiên cứu thành công vacxin. TP. HCM phối hợp với Q. Tân Bình, Q. Tân Phú thực hiện thử nghiệm trên chuột.'\r\n# iText = 'Nếu ai đó hỏi tôi, cuốn sách nào bạn muốn giới thiệu cho mọi người nhất, tôi sẽ không ngần ngại trả lời “Harry Potter”. Lần đầu tiên tôi biết đến tác phẩm này qua sự giới thiệu của bạn tôi. Đọc xong tập một tôi tự hỏi, không biết những tập sau tác giả viết gì mà đến bảy tập. Nhưng rồi tôi đã tìm được câu trả lời: tập sau hay hơn tập trước, càng về sau càng hay khi các bí mật dần dần được tiết lộ. Đó chính là sức hút độc đáo của tác phẩm.'\r\n\r\nlPositionEndSign = findEndSign(iText)\r\nlEndWord = getEndWord(iText,lPositionEndSign)\r\nlIsRealEnd = CheckAcronym(lEndWord)\r\nresult = []\r\nposition = 0\r\nfor i in range(len(lPositionEndSign)):\r\n if lIsRealEnd[i]==1:\r\n result.append(iText[position:lPositionEndSign[i]+1])\r\n position = lPositionEndSign[i]+2\r\n else:\r\n continue\r\nresult.append(iText[position:])\r\n\r\n#print result\r\nfor i in result:\r\n print(i)\r\n\r\n","repo_name":"Phuonglecat891/NLPsentencesegment","sub_path":"1712680_BT3.py","file_name":"1712680_BT3.py","file_ext":"py","file_size_in_byte":3180,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"73173990482","text":"import requests\nfrom bs4 import BeautifulSoup\nimport time\nfrom pymongo import MongoClient\nclient = MongoClient('localhost', 27017) # client가 robo와 같은 역할(mongodb에 연결)\ndb = client.your_beer_is\n\ntarget_url = 'https://m.blog.naver.com/nbc64pjh/221988284164'\n\n\nheaders = {'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}\ndata = requests.get(target_url, headers=headers)\n\n\nsoup = BeautifulSoup(data.text, 'html.parser')\n\nbeers = soup.select('#SE-2430502c-2d10-4494-baf5-eb675c5fd24d > div > div > div > table > tbody > tr')\n\nfor beer in beers:\n name_tag = beer.select_one('td > div > p > span')\n name = name_tag.text\n \n documnet = {\n 'name': name,\n }\n print(document)","repo_name":"hyuk7474/your-beer-is","sub_path":"init_db_name.py","file_name":"init_db_name.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"31199594352","text":"from flask import Flask, render_template, request, send_from_directory\nfrom xyj import *\nimport os\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route('/res', methods=['POST'])\ndef result():\n if request.method == 'POST':\n tm985 = request.form['985推免']\n tm211 = request.form['211推免']\n tk985 = request.form['985统考']\n tk211 = request.form['211统考']\n normal = request.form['双非推免']\n m2020 = request.form['2020级']\n m2021 = request.form['2021级']\n m2022 = request.form['2022级']\n xyj = XueYeJiang(tm985, tm211, tk985, tk211, normal, m2020, m2021, m2022)\n return send_from_directory(os.path.join(os.path.dirname(__file__), 'static/'), 'res.xlsx')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"hust-wzq/hustzz","sub_path":"奖学金预算/网页版/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"36999966196","text":"import math\nimport networkx as nx\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\n\n\ndef get_exclude_set(graph, elements):\n print(\"Calculating katz centrality\")\n phi = (1 + math.sqrt(5)) / 2.0\n centrality_initial = nx.katz_centrality(graph, 1/phi - 0.01)\n\n centrality = [(c, v) for v, c in centrality_initial.items()]\n\n # Find the threashold that can be used to filter nodes.\n topElements = sorted(centrality, reverse=True)[:elements]\n minimum = min(topElements)\n maximum = max(topElements)\n\n topElements = [v for c, v in topElements]\n\n print(\n f\"Using {elements} nodes with katz centrality in range [{minimum}:{maximum}]\")\n print(topElements)\n return set(topElements)\n","repo_name":"juliuscc/flight-route-pandemic-simulation","sub_path":"lib/katz_centrality.py","file_name":"katz_centrality.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"11344545636","text":"import re\nimport sys\n\nfrom functools import cmp_to_key\nfrom webkitcorepy import string_utils, unicode\nfrom webkitcorepy.mocks import ContextStack\n\n\nclass ProcessCompletion(object):\n def __init__(self, returncode=None, stdout=None, stderr=None, elapsed=0):\n self.returncode = 1 if returncode is None else returncode\n self.stdout = string_utils.encode(stdout) if stdout else b''\n self.stderr = string_utils.encode(stderr) if stderr else b''\n self.elapsed = elapsed\n\n\nclass Subprocess(ContextStack):\n \"\"\"\n Organize ProcessCompletions so calls to subprocess functions will return a ProcessCompletion for\n a set of arguments or trigger a ProcessCompletion generator. mocks.Subprocess makes an attempt to\n prioritize CommandRoute objects for a given set of arguments such that the most specific applicable route\n is prefered.\n\n Example usage mocking a single command:\n with mocks.Subprocess(\n 'ls', completion=mocks.ProcessCompletion(returncode=0, stdout='file1.txt\\nfile2.txt\\n'),\n ):\n result = run(['ls'], capture_output=True, encoding='utf-8')\n assert result.returncode == 0\n assert result.stdout == 'file1.txt\\nfile2.txt\\n'\n\n Example usage mocking a set of commands:\n with mocks.Subprocess(\n mocks.Subprocess.CommandRoute('command-a', 'argument', completion=mocks.ProcessCompletion(returncode=0)),\n mocks.Subprocess.CommandRoute('command-b', completion=mocks.ProcessCompletion(returncode=-1)),\n ):\n result = run(['command-a', 'argument'])\n assert result.returncode == 0\n\n result = run(['command-b'])\n assert result.returncode == -1\n \"\"\"\n top = None\n\n class CommandRoute(object):\n def __init__(self, *args, **kwargs):\n completion = kwargs.pop('completion', ProcessCompletion())\n cwd = kwargs.pop('cwd', None)\n input = kwargs.pop('input', None)\n env = kwargs.pop('env', None)\n generator = kwargs.pop('generator', None)\n if kwargs.keys():\n raise TypeError('__init__() got an unexpected keyword argument {}'.format(kwargs.keys()[0]))\n\n if isinstance(args, str) or isinstance(args, unicode):\n self.args = [args]\n elif not args:\n raise ValueError('Arguments must be provided to a CommandRoute')\n else:\n self.args = args\n\n self.generator = generator or (lambda *args, **kwargs: completion)\n self.cwd = cwd\n self.input = string_utils.encode(input) if input else None\n self.env = env\n\n def matches(self, *args, **kwargs):\n cwd = kwargs.pop('cwd', None)\n input = kwargs.pop('input', None)\n env = kwargs.pop('env', None)\n if kwargs.keys():\n raise TypeError('matches() got an unexpected keyword argument {}'.format(kwargs.keys()[0]))\n\n if len(self.args) > len(args):\n return False\n\n for count in range(len(self.args)):\n if self.args[count] is None:\n return False\n\n if self.args[count] == args[count]:\n continue\n elif hasattr(self.args[count], 'match') and self.args[count].match(args[count]):\n continue\n elif re.match(self.args[count], args[count]):\n continue\n return False\n\n if self.cwd is not None and cwd != self.cwd:\n return False\n if self.input is not None and input != self.input:\n return False\n if self.env is not None and env != self.env:\n return False\n return True\n\n def __call__(self, *args, **kwargs):\n cwd = kwargs.pop('cwd', None)\n input = kwargs.pop('input', None)\n env = kwargs.pop('env', dict())\n if kwargs.keys():\n raise TypeError('__call__() got an unexpected keyword argument {}'.format(kwargs.keys()[0]))\n return self.generator(*args, cwd=cwd, input=input, env=env)\n\n @classmethod\n def compare(cls, a, b):\n for candidate in [\n len(b.args) - len(a.args),\n 0 if type(a.cwd) == type(b.cwd) else -1 if a.cwd else 1,\n 0 if type(a.input) == type(b.input) else -1 if a.input else 1,\n ]:\n if candidate:\n return candidate\n return 0\n\n Route = CommandRoute\n\n @classmethod\n def completion_generator_for(cls, program):\n current = cls.top\n candidates = []\n while current:\n for completion in current.completions:\n if completion.args[0] == program:\n candidates.append(completion)\n if current.ordered:\n break\n current = current.previous\n\n if candidates:\n return candidates\n\n if sys.version_info > (3, 0):\n raise FileNotFoundError(\"No such file or directory: '{path}': '{path}'\".format(path=program))\n raise OSError('[Errno 2] No such file or directory')\n\n @classmethod\n def completion_for(cls, *args, **kwargs):\n candidates = [\n candidate for candidate in cls.completion_generator_for(args[0]) if candidate.matches(*args, **kwargs)\n ]\n if not candidates:\n raise AssertionError('Provided arguments to {} do not match a provided completion'.format(args[0]))\n\n completion = candidates[0]\n current = cls.top\n while current:\n if current.ordered and completion is current.completions[0]:\n current.completions.pop(0)\n break\n current = current.previous\n return completion(*args, **kwargs)\n\n def __init__(self, *args, **kwargs):\n if all([isinstance(arg, self.CommandRoute) for arg in args]):\n self.ordered = kwargs.pop('ordered', False)\n if kwargs.keys():\n raise TypeError('__init__() got an unexpected keyword argument {}'.format(kwargs.keys()[0]))\n self.completions = list(args) if self.ordered else sorted(args, key=cmp_to_key(self.CommandRoute.compare))\n elif any([isinstance(arg, self.CommandRoute) for arg in args]):\n raise TypeError('mocks.Subprocess arguments must be of a consistent type')\n else:\n self.ordered = False\n self.completions = [self.CommandRoute(*args, **kwargs)]\n\n super(Subprocess, self).__init__(cls=Subprocess)\n\n # Allow mock to be managed via autoinstall\n from mock import patch\n from webkitcorepy.mocks.popen import Popen\n self.patches.append(patch('subprocess.Popen', new=Popen))\n","repo_name":"WebKit/WebKit","sub_path":"Tools/Scripts/libraries/webkitcorepy/webkitcorepy/mocks/subprocess.py","file_name":"subprocess.py","file_ext":"py","file_size_in_byte":6875,"program_lang":"python","lang":"en","doc_type":"code","stars":6880,"dataset":"github-code","pt":"3"}
+{"seq_id":"37407837278","text":"# bfs는 최단 거리를 찾을 때 사용되는 느낌\nfrom collections import deque\n\nn, m = map(int, input().split())\narr = []\n\nfor i in range(n):\n arr.append(list(map(int, input())))\n\n# 상하좌우\ndx = [-1, 1, 0, 0]\ndy = [0, 0, -1, 1]\n\n\ndef bfs(x, y):\n queue = deque()\n queue.append((x, y))\n\n # 큐가 빌 때까지 반복\n while queue:\n x, y = queue.popleft()\n\n # 현재 위치에서 상하좌우 확인\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n\n # 주어진 범위를 벗어나는 경우 무시\n if nx < 0 or nx >= n or ny < 0 or ny >= m:\n continue\n # 괴물인 경우 무시\n if arr[nx][ny] == 0:\n continue\n # 해당 노드를 처음 방문하는 경우에만 최단 거리 기록\n if arr[nx][ny] == 1:\n arr[nx][ny] = arr[x][y] + 1\n queue.append((nx, ny))\n # 가장 오른쪽 아래까지의 최단 거리 반환\n return arr[n-1][m-1]\n\n\n# BFS를 수행한 결과 출력\nprint(bfs(0, 0))\n","repo_name":"pipi-shortstocking/CodingTest","sub_path":"DFS,BFS/미로 탈출/new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"74421927760","text":"import os\n\n# Seed for reproducibility\nSEED = 151836\n\n# Folders\nRAW = './Datasets/Raw/'\nPROCESSED = './Datasets/Processed/'\nRESULTS = './Results/'\nFIGURES = './Results/Figures/'\nCOMPLEXITY = './Results/Complexity/'\n\n# Datasets\nDATASETS = sorted(os.listdir(PROCESSED))\n\n# Minimum DCA plot length\nMIN_LENGTH = 90\n\n# Balance\nBALANCES = [\n [50, 50],\n [60, 40],\n [70, 30],\n [80, 20]\n]","repo_name":"Mhackiori/DCAuth","sub_path":"utils/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"37231346539","text":"from agents.network.GCN import GCN, MatMul\nimport torch\nimport torch.nn as nn\n\n\nclass DQN(nn.Module):\n def __init__(self, D_in, H1, H2, H3, H4, H5):\n super(DQN, self).__init__()\n # Deep network weights and biases\n\n self.gcn1 = GCN(D_in * 3, H1)\n\n self.gcn2 = GCN(H1 * 3, H2)\n\n self.matmul1 = MatMul(H2, H3)\n self.matmul2 = MatMul(H3, H4)\n self.matmul3 = MatMul(H4, H5)\n\n def forward(self, state):\n\n y, in_adj_mat, out_adj_mat = state\n\n # First convolution layer\n y = self.gcn1(y, in_adj_mat, out_adj_mat)\n\n # Second convolution layer\n y = self.gcn2(y, in_adj_mat, out_adj_mat)\n\n # Output layer\n y = self.matmul1(y)\n # For each vertex you have a vertex of length H3. This is the vertex embedding.\n\n # Perform pooling\n y = torch.sum(y, dim=0).view(1, -1)\n\n y = self.matmul2(y).clamp(min=0)\n y = self.matmul3(y) # Could insert a ReLu layer before this.\n\n # this is the Q(s,a) value\n return y\n","repo_name":"lauradarcy/DAG_DQN","sub_path":"agents/network/DQN.py","file_name":"DQN.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"29039973657","text":"import pytest\n\nfrom antirobot.daemon.arcadia_test.util import GenRandomIP\nfrom antirobot.daemon.arcadia_test.util.AntirobotTestSuite import AntirobotTestSuite\n\n\nclass TestUnistat(AntirobotTestSuite):\n @pytest.mark.parametrize('url, service', [\n (\"http://yandex.ru/search\", \"web\"),\n (\"http://images.yandex.ru/search\", \"img\"),\n ])\n def test_handle_time(self, url, service):\n ip = GenRandomIP()\n metric_before = self.antirobot.get_metric(f\"service_type={service};handle_time_10s_deee\")\n self.send_fullreq(url, headers={\"X-Forwarded-For-Y\": ip})\n metric_after = self.antirobot.get_metric(f\"service_type={service};handle_time_10s_deee\")\n assert metric_before + 1 == metric_after\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"antirobot/TestUnistat.py","file_name":"TestUnistat.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"20447567508","text":"from datetime import datetime\nfrom typing import List\n\nfrom aiogoogle import Aiogoogle\n\nfrom app.core.config import settings\nfrom app.models.charity_project import CharityProject\n\nSHEETS_VER = 'v4'\nDRIVE_VER = 'v3'\nDATETIME_NOW = datetime.now().strftime('%Y/%m/%d %H:%M:%S')\n\nFILE_TITLE = f'Отчет от {DATETIME_NOW}'\nLIST_TITLE = 'Отчет'\nROWS = 100\nCOLUMS = 10\nRANGE = 'A1:E100'\nSPREADSHEET_BODY = {\n 'properties': {\n 'title': FILE_TITLE,\n 'locale': 'ru_RU'\n },\n 'sheets': {\n 'properties': {\n 'sheetType': 'GRID',\n 'sheetId': 0,\n 'title': 'Лист1',\n 'gridProperties': {\n 'rowCount': ROWS,\n 'columnCount': COLUMS\n }\n }\n }\n}\nTABLE_VALUES = [\n ['Отчет от', ],\n ['Топ проектов по скорости закрытия'],\n ['Название проекта', 'Время сбора', 'Описание']\n]\n\n\nasync def spreadsheets_create(wrapper_services: Aiogoogle) -> str:\n service = await wrapper_services.discover('sheets', SHEETS_VER)\n response = await wrapper_services.as_service_account(\n service.spreadsheets.create(json=SPREADSHEET_BODY)\n )\n return response['spreadsheetId']\n\n\nasync def set_user_permissions(\n spreadsheet_id: str,\n wrapper_services: Aiogoogle\n) -> None:\n permissions_body = {'type': 'user',\n 'role': 'writer',\n 'emailAddress': settings.email}\n service = await wrapper_services.discover('drive', DRIVE_VER)\n await wrapper_services.as_service_account(\n service.permissions.create(\n fileId=spreadsheet_id,\n json=permissions_body,\n fields='id'\n )\n )\n\n\nasync def spreadsheets_update_value(\n spreadsheet_id: str,\n projects: List[CharityProject],\n wrapper_services: Aiogoogle\n) -> None:\n service = await wrapper_services.discover('sheets', SHEETS_VER)\n\n for project in projects:\n new_row = [\n project.name,\n str(project.close_date - project.create_date),\n project.description\n ]\n TABLE_VALUES.append(new_row)\n\n update_body = {\n 'majorDimension': 'ROWS',\n 'values': TABLE_VALUES\n }\n\n await wrapper_services.as_service_account(\n service.spreadsheets.values.update(\n spreadsheetId=spreadsheet_id,\n range=RANGE,\n valueInputOption='USER_ENTERED',\n json=update_body\n )\n )\n","repo_name":"IgorArefev/QRkot","sub_path":"app/services/google_api.py","file_name":"google_api.py","file_ext":"py","file_size_in_byte":2547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"71561456083","text":"import os\r\norignialDir = os.getcwd()\r\ntry:\r\n os.chdir(os.getcwd() + \"\\\\Tools\")\r\nexcept:\r\n pass\r\nfrom Stack import Stack\r\nos.chdir(orignialDir)\r\n# Importing required modules\r\nimport tkinter\r\nfrom tkinter import ttk\r\nfrom tkinter import *\r\nfrom DirectoryHandler import DirectoryHandler\r\nfrom sizingAdjust import sizingAdjust\r\nimport threading\r\nfrom Theme import Theme\r\nfrom Stack import Stack\r\nimport random\r\nimport time\r\n\r\nclass SlideData:\r\n def __init__(self, type, data=[]):\r\n self.__SlideType = type\r\n\r\n if self.__SlideType == \"convo\":\r\n self.__SlideBackground = data[0]\r\n self.__SlideCharacter = data[1]\r\n self.__SlideText = data[2]\r\n\r\n elif self.__SlideType == \"narration\":\r\n self.__SlideBackground = data[0]\r\n self.__SlideText = data[1]\r\n self.__SlidePos = data[2]\r\n\r\n else:\r\n self.__Game = data\r\n\r\n def getSlideBackground(self):\r\n return self.__SlideBackground\r\n\r\n def getSlideText(self):\r\n return self.__SlideText\r\n\r\n def getSlideType(self):\r\n return self.__SlideType\r\n\r\n def getSlidePos(self):\r\n return self.__SlidePos\r\n\r\n def getSlideCharacter(self):\r\n return self.__SlideCharacter\r\n\r\n def getGameFunction(self):\r\n return data\r\n\r\nclass Episode:\r\n def __init__(self, firstSlide, window, sizing):\r\n self.__SlidesReverse = Stack([])\r\n self.AddSlide(firstSlide, window, sizing)\r\n\r\n def AddSlide(self, SlideParameter, window, sizing):\r\n from Slide import Convo\r\n from Slide import Narration\r\n if SlideParameter.getSlideType() == \"narration\":\r\n slide = Narration(window, sizing, SlideParameter.getSlideBackground(), SlideParameter.getSlideText(), SlideParameter.getSlidePos())\r\n elif SlideParameter.getSlideType() == \"convo\":\r\n slide = Convo(window, sizing, SlideParameter.getSlideBackground(), SlideParameter.getSlideText(), SlideParameter.getSlideCharacter())\r\n else:\r\n chosenClass = SlideParameter.getGameFunction()\r\n slide = chosenClass()\r\n\r\n self.__SlidesReverse.Push(slide)\r\n\r\n def __ReverseOrder(self):\r\n self.__SlidesOrder = Stack([])\r\n self.__Reverse()\r\n\r\n def __Reverse(self):\r\n try:\r\n slide, index = self.__SlidesReverse.Peek()\r\n self.__SlidesOrder.Push(slide)\r\n self.__SlidesReverse.Pop()\r\n if index == 0:\r\n return\r\n else:\r\n self.__Reverse()\r\n except:\r\n return\r\n\r\n def StartEpisode(self):\r\n self.__ReverseOrder()\r\n slide1, index = self.__SlidesOrder.Peek()\r\n self.__ViewSlide(slide1)\r\n \r\n def NextSlide(self):\r\n slide, index = self.__SlidesOrder.Peek()\r\n #slide.SlideFrame.place_forget()\r\n self.__SlidesOrder.Pop()\r\n self.__SlidesReverse.Push(slide)\r\n slide, index = self.__SlidesOrder.Peek()\r\n self.__ViewSlide(slide)\r\n\r\n def PreviousSlide(self):\r\n slide, index = self.__SlidesOrder.Peek()\r\n slide.SlideFrame.place_forget()\r\n\r\n slide, index = self.__SlidesReverse.Peek()\r\n self.__SlidesReverse.Pop()\r\n self.__SlidesOrder.Push(slide)\r\n self.__ViewSlide(slide)\r\n\r\n def __ViewSlide(self, slide):\r\n slide.viewSlide()\r\n\r\n def GetCurrentSlide(self):\r\n currentSlide, index = self.__SlidesReverse.Peek()\r\n return currentSlide, index\r\n\r\n\r\nclass EpisodeInterface1:\r\n def __init__(self):\r\n self.__Theme = Theme()\r\n Fonts = [90, 80, 70, 60, 50, 40, 30, 20]\r\n self.FontFamily = [\"Microsoft YaHei UI Light\", \"Ebrima\"]\r\n self.interfaceDirectory = os.getcwd()[0: len(os.getcwd()) - len('\\\\Main')]\r\n os.chdir(self.interfaceDirectory)\r\n padding = 80\r\n self.DirectoryHandler = DirectoryHandler()\r\n\r\n self.InterfaceWindow = Tk()\r\n self.InterfaceWindow.overrideredirect(1)\r\n self.InterfaceWindow.attributes(\"-topmost\", True)\r\n self.InterfaceWindow.config(bg=\"black\")\r\n\r\n sizing = sizingAdjust(self.InterfaceWindow, Fonts, self.FontFamily, padding, self.interfaceDirectory)\r\n self.sizing = sizing\r\n self.padding = sizing.padding\r\n self.screenWidth = sizing.width\r\n self.screenHeight = sizing.height\r\n self.FontSize = sizing.FontSize\r\n\r\n self.createEpisode()\r\n\r\n geometry = str(sizing.width1) + \"x\" + str(sizing.height1) + \"+0+0\"\r\n self.InterfaceWindow.geometry(geometry)\r\n\r\n self.ScreenCanvas = Frame(self.InterfaceWindow)\r\n self.ScreenCanvas.place(relx=sizing.canvasPosX, rely=sizing.canvasPosY, width=self.screenWidth,\r\n height=self.screenHeight)\r\n\r\n self.createEpisode()\r\n\r\n threading.Thread(target=self.InterfaceWindow.mainloop(), args=()).start()\r\n\r\n def createEpisode(self):\r\n episodeText1 = \"You are in the city of Shenmi. You have been sent on an errand by your father to buy some food from a nearby shop in a busy street.\"\r\n episodeText2 = \"As you approach the market, you notice a man that steals some apples and run off.\"\r\n episodeText3 = \"Stop right There\"\r\n episodeText4 = \"After him!\"\r\n episodeText5 = \"The man runs out the shop with the guards.\"\r\n episodeText6 = \"You leave the shop. The street is empty.\"\r\n episodeText7 = \"You then look around to find a dead man on the floor. A door to a shop on the opposite side of the road opens.\"\r\n episodeText8 = \"HEY! YOU THERE! WHO DO YOU THINK YOU ARE LOOKING AT?!\"\r\n episodeText9 = \"You run.\"\r\n\r\n episodeText11 = \"You are stopped by another man on the other corner\"\r\n episodeText12 = \"Going somewhere?\"\r\n episodeText13 = \"Let’s see how tough you are by the time I’m finished with…\"\r\n\r\n Slide1 = SlideData(\"narration\", [\"market.png\", episodeText1, \"TC\"])\r\n Slide2 = SlideData(\"narration\", [\"market.png\", episodeText2, \"TC\"])\r\n Slide3 = SlideData(\"convo\", [\"market.png\", \"guard1\", episodeText3])\r\n Slide4 = SlideData(\"convo\", [\"market.png\", \"guard2\", episodeText4])\r\n Slide5 = SlideData(\"narration\", [\"street1.png\", episodeText5, \"TC\"])\r\n Slide6 = SlideData(\"narration\", [\"street1.png\", episodeText6, \"TC\"])\r\n Slide7 = SlideData(\"narration\", [\"street1.png\", episodeText7, \"TC\"])\r\n Slide8 = SlideData(\"convo\", [\"street1.png\", \"villain1\", episodeText8])\r\n Slide9 = SlideData(\"narration\", [\"street1.png\", episodeText9, \"TC\"])\r\n\r\n slideArray = [Slide2, Slide3, Slide4, Slide5, Slide6, Slide7, Slide8]\r\n\r\n self.Episode = Episode(Slide1, self.InterfaceWindow, self.sizing)\r\n\r\n for slide in slideArray:\r\n self.Episode.AddSlide(slide, self.InterfaceWindow, self.sizing)\r\n\r\n self.Episode.StartEpisode()\r\n\r\n #self.InterfaceWindow.bind(\"\", lambda event: self.moveToPreviousSlide(event))\r\n self.InterfaceWindow.bind(\"\", lambda event: self.moveToNextSlide(event))\r\n\r\n\r\n\r\nif __name__ in '__main__':\r\n Main = EpisodeInterface1()","repo_name":"dakinfemiwa/NEAFinalWork","sub_path":"Main/Tools/story.py","file_name":"story.py","file_ext":"py","file_size_in_byte":7148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"6690035954","text":"pl = 0\nii = 0\nim = 0\nwhile pl < 10:\n i = int(input(\"Digite a idade do membro\"))\n pl = pl + 1\n if i < 30:\n ii = ii + 1\n im = i/10\nprint(ii,\"membro(s) tem a idade inferior a 30 anos\")\n\n \n \n","repo_name":"kjuao9/coisas-de_python","sub_path":"Python/2° Tri/Atividades/Atividades While/05.py","file_name":"05.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"7306392961","text":"\"\"\"\r\n Copyright (C) 2015 Quinn D Granfor \r\n\r\n This program is free software; you can redistribute it and/or\r\n modify it under the terms of the GNU General Public License\r\n version 2, as published by the Free Software Foundation.\r\n\r\n This program is distributed in the hope that it will be useful, but\r\n WITHOUT ANY WARRANTY; without even the implied warranty of\r\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\r\n General Public License version 2 for more details.\r\n\r\n You should have received a copy of the GNU General Public License\r\n version 2 along with this program; if not, write to the Free\r\n Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\r\n MA 02110-1301, USA.\r\n\"\"\"\r\n\r\nimport datetime\r\nimport uuid\r\n\r\n\r\ndef db_cron_insert(self, cron_name, cron_desc, cron_enabled, cron_schedule, cron_last_run,\r\n cron_json):\r\n \"\"\"\r\n insert cron job\r\n \"\"\"\r\n new_cron_id = uuid.uuid4()\r\n self.db_cursor.execute('insert into mm_cron (mm_cron_guid,'\r\n ' mm_cron_name,'\r\n ' mm_cron_description,'\r\n ' mm_cron_enabled,'\r\n ' mm_cron_schedule,'\r\n ' mm_cron_last_run, mm_cron_json)'\r\n ' values (%s,%s,%s,%s,%s,%s,%s)',\r\n (new_cron_id, cron_name, cron_desc, cron_enabled, cron_schedule,\r\n cron_last_run, cron_json))\r\n return new_cron_id\r\n\r\n\r\ndef db_cron_list_count(self, enabled_only=False):\r\n \"\"\"\r\n Return number of cron jobs\r\n \"\"\"\r\n if not enabled_only:\r\n self.db_cursor.execute('select count(*) from mm_cron')\r\n else:\r\n self.db_cursor.execute(\r\n 'select count(*) from mm_cron'\r\n ' where mm_cron_enabled = true')\r\n return self.db_cursor.fetchone()[0]\r\n\r\n\r\ndef db_cron_list(self, enabled_only=False, offset=0, records=None):\r\n \"\"\"\r\n Return cron list\r\n \"\"\"\r\n if not enabled_only:\r\n self.db_cursor.execute('select mm_cron_guid,'\r\n ' mm_cron_name,'\r\n ' mm_cron_description,'\r\n ' mm_cron_enabled,'\r\n ' mm_cron_schedule,'\r\n ' mm_cron_last_run,'\r\n ' mm_cron_json'\r\n ' from mm_cron where mm_cron_guid'\r\n ' in (select mm_cron_guid from mm_cron'\r\n ' order by mm_cron_name offset %s limit %s)'\r\n ' order by mm_cron_name', (offset, records))\r\n else:\r\n self.db_cursor.execute('select mm_cron_guid,'\r\n ' mm_cron_name,'\r\n ' mm_cron_description,'\r\n ' mm_cron_enabled,'\r\n ' mm_cron_schedule,'\r\n ' mm_cron_last_run,'\r\n ' mm_cron_json'\r\n ' from mm_cron where mm_cron_guid'\r\n ' in (select mm_cron_guid from mm_cron'\r\n ' where mm_cron_enabled = true'\r\n ' order by mm_cron_name offset %s limit %s)'\r\n ' order by mm_cron_name', (offset, records))\r\n return self.db_cursor.fetchall()\r\n\r\n\r\ndef db_cron_time_update(self, cron_type):\r\n \"\"\"\r\n Update the datetime in which a cron job was run\r\n \"\"\"\r\n self.db_cursor.execute('update mm_cron set mm_cron_last_run = %s'\r\n ' where mm_cron_name = %s',\r\n (datetime.datetime.now(), cron_type))\r\n\r\n\r\ndef db_cron_delete(self, cron_uuid):\r\n \"\"\"\r\n Delete cron job\r\n \"\"\"\r\n self.db_cursor.execute('delete from mm_cron'\r\n ' where mm_cron_guid = %s',\r\n (cron_uuid,))\r\n\r\n\r\ndef db_cron_info(self, cron_uuid):\r\n \"\"\"\r\n Cron job info\r\n \"\"\"\r\n self.db_cursor.execute('select mm_cron_guid,'\r\n ' mm_cron_name,'\r\n ' mm_cron_description,'\r\n ' mm_cron_enabled,'\r\n ' mm_cron_schedule,'\r\n ' mm_cron_last_run,'\r\n ' mm_cron_json'\r\n ' from mm_cron'\r\n ' where mm_cron_guid = %s', (cron_uuid,))\r\n return self.db_cursor.fetchone()\r\n","repo_name":"MediaKraken/MediaKraken_Deployment","sub_path":"source/database/db_base_cron.py","file_name":"db_base_cron.py","file_ext":"py","file_size_in_byte":4580,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"3"}
+{"seq_id":"11308305958","text":"import pygatt\nimport csv\nimport datetime\n\n# Connect to the device\nconnect_key = bytearray.fromhex(\"2101020304000000000000\") # Update this with correct connection key you obtained in part 2\nenable_notifications = bytearray.fromhex(\"0b0100000000\") # This value is correct, no need to update\nbbq_mac = \"ff:ff:ff:ff:ff:ff\" # Update this with your BBQ device's MAC address\n\nadapter = pygatt.GATTToolBackend()\n\ndef fahrenheit(celcius):\n return int(round(celcius * (9/5.0) + 32))\n\n# Process and save the realtime data\ndef handle_notification(handle, value):\n \"\"\"\n handle -- integer, characteristic read handle the data was received on\n value -- bytearray, the data returned in the notification\n \"\"\"\n temps = {\"timestamp\": str(datetime.datetime.now())}\n for i in range(0,8,2):\n celcius = int(int.from_bytes(value[i:i+2], \"little\") / 10)\n f_degrees = fahrenheit(celcius)\n temps[f\"Probe-{int(i/2)+1}\"] = f_degrees\n with open(\"temperature_log.csv\", \"a\") as csv_file:\n writer = csv.writer(csv_file)\n writer.writerow([temps[field] for field in temps])\n \n \ntry:\n adapter.start()\n\n try:\n device = adapter.connect(bbq_mac,timeout=20)\n except:\n print(\"Couldn't connect to the device, retrying...\")\n device = adapter.connect(bbq_mac,timeout=20)\n\n # Send the connection key to the 0x29\n print(\"Pairing with the device...\")\n device.char_write_handle(0x0029, connect_key)\n # Enable notifications by writing to 0x34\n device.char_write_handle(0x0034, enable_notifications)\n print(\"Connected with the device.\")\n \n with open('temperature_log.csv', 'w') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerow([\"Timestamp\", \"Probe 1\", \"Probe 2\", \"Probe 3\", \"Probe 4\"])\n # Subscribe and listen for notifications of the realtime data\n try:\n device.subscribe(\"0000fff4-0000-1000-8000-00805f9b34fb\", callback=handle_notification)\n except Exception as e:\n try:\n device.subscribe(\"0000fff4-0000-1000-8000-00805f9b34fb\", callback=handle_notification)\n except:\n pass\n \n input(\"Enter any key to quit....\")\n \n\nfinally:\n adapter.stop()\n\n\n\n","repo_name":"imperfectpython/bbq-hacking-part-3","sub_path":"bbq_app.py","file_name":"bbq_app.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"44796901602","text":"alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n\ndef caesar(text, shift, direction):\n if direction == 'encode':\n encoded = \"\"\n for letter in text:\n if letter in alphabet: # In case user types numbers or symbols\n if alphabet.index(letter) + shift > 26:\n shift = shift % 26\n encoded += alphabet[alphabet.index(letter) - 26 + shift]\n else:\n encoded += alphabet[alphabet.index(letter) + shift]\n print(f\"The {direction}d message is: {encoded}\")\n else:\n decoded = \"\"\n for letter in text:\n if letter in alphabet:\n if alphabet.index(letter) - shift < 0:\n shift = shift % 26\n decoded += alphabet[alphabet.index(letter) + 26 - shift]\n else:\n decoded += alphabet[alphabet.index(letter) - shift]\n print(f\"The {direction}d message is: {decoded}\")\n \nrun = True\nwhile run: \n direction = input(\"Type 'encode' to encrypt, type 'decode' to decrypt:\\n\")\n text = input(\"Type your message:\\n\").lower()\n shift = int(input(\"Type the shift number:\\n\")) \n \n caesar(text, shift, direction)\n \n answer = input(\"Would you like to go again?\\n\").lower()\n if answer == 'no':\n run = False\n print(\"Goodbye\") ","repo_name":"morsalsadat/CaesarCipher","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"25997626617","text":"from django.shortcuts import render, redirect, reverse\nfrom .models import Comment\nfrom django.contrib.contenttypes.models import ContentType\nfrom .forms import CommentForm\nfrom django.http import JsonResponse\nfrom django.utils import timezone\n\n\ndef update_comment(request):\n referer = request.META.get('HTTP_REFERER', reverse('home'))\n comment_form = CommentForm(request.POST, user=request.user)\n\n # 返回给ajax的状态数据,是否完成评论\n data = {}\n\n if comment_form.is_valid():\n comment = Comment()\n comment.user = request.user\n comment.text = comment_form.cleaned_data['text']\n comment.content_object = comment_form.cleaned_data['content_object']\n\n parent = comment_form.cleaned_data['parent']\n if not parent is None: # 这一条是回复\n if parent.root is None: # 被这条回复的内容是一条评论不是一条回复\n comment.root = parent\n else: # 被这条回复的内容是一条回复,所以这条回复的root的parent的root,以此类推,直到最后一个是评论\n comment.root = parent.root\n comment.parent = parent\n comment.reply_to = parent.user # models设置该parent的时候,外键关联User,可以可以通过parent找到user\n data['reply_to'] = comment.reply_to.get_nickname_or_username()\n else:\n data['reply_to'] = ''\n\n comment.save()\n\n data['pk'] = comment.pk\n # 这是一条评论的话\n if comment.root is None:\n data['root_pk'] = ''\n else:\n data['root_pk'] = comment.root.pk\n # 构建返回给前端ajax的数据\n data['status'] = 'SUCCESS'\n data['content_type'] = ContentType.objects.get_for_model(comment).model\n data['username'] = comment.user.get_nickname_or_username()\n data['avatar_url'] = comment.user.get_avatar_url()\n data['comment_time'] = timezone.localtime(comment.comment_time).strftime('%Y-%m-%d %H:%M:%S')\n data['text'] = comment.text\n # return render(request, 'login_logout_error.html', {'message': '评论成功!', 'redirect_to': referer})\n else:\n data['status'] = 'ERROR'\n # 返回具体的错误信息\n data['message'] = list(comment_form.errors.values())[0][0]\n return JsonResponse(data)\n\n # return render('login_logout_error.html', request, {'message': comment_form.errors, 'redirect_to': referer})\n # # 返回原来的博客页面\n # referer = request.META.get('HTTP_REFERER', reverse('home'))\n #\n # # 获取前端页面传递进来的数据\n # user = request.user\n # # 数据检查\n # if not user.is_authenticated: # 是否真的登录了,未登录的话留下一个返回原来博客的链接\n # return render(request, 'login_logout_error.html', {'message': '用户未登录!', 'redirect_to':referer})\n # text = request.POST.get('text', '')\n # if text.strip() == '':\n # return render(request, 'login_logout_error.html', {'message': '提交内容为空!', 'redirect_to':referer})\n # try:\n # object_id = int(request.POST.get('object_id', ''))\n # # 这里传进来的时候字符串,不是博客的类\n # content_type = request.POST.get('content_type', '')\n #\n # # 下面是获取comment models对象中的content_object,相当于Blog.objects.get(pk=object_id)\n # # 下面的这种写法可以让评论变得更加灵活,不单单是评论博客\n # # 根据前端传来的blog_detail获取评论的对象content_type是Blog\n # model_class = ContentType.objects.get(model=content_type).model_class() # 获取所有的blog的ContentType\n # model_object = model_class.objects.get(pk=object_id) # 根据blog的pk确定对应的blog\n # # 实例化一个Comment对象,数据检查通过,要完整的填好这个实例化的comment需要这些数据,具体数据可以看comment的models\n # comment = Comment()\n # comment.content_object = model_object\n # comment.user = user\n # comment.text = text\n # comment.save()\n # except Exception as e:\n # return render(request, 'login_logout_error.html', {'message':e, 'redirect_to':referer})\n #\n # return render(request, 'login_logout_error.html', {'message': '评论成功!', 'redirect_to':referer})\n","repo_name":"zhangyongming13/mysite","sub_path":"comment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4403,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"22788034287","text":"from pathlib import Path\nfrom typing import Optional\n\nimport pandas as pd\nimport pytest\nfrom hyperstyle.src.python.review.common.language import Language\nfrom analysis.src.python.evaluation.issues_statistics.get_raw_issues_statistics import (\n _convert_language_code_to_language,\n _get_output_folder,\n DEFAULT_OUTPUT_FOLDER_NAME,\n inspect_raw_issues,\n)\nfrom analysis.src.python.utils.df_utils import equal_df, read_df\nfrom analysis.test.python.evaluation.issues_statistics import (\n GET_RAW_ISSUES_STATISTICS_TARGET_FILES_FOLDER,\n GET_RAW_ISSUES_STATISTICS_TEST_FILES_FOLDER,\n)\n\nDF_PARENT_FOLDER_NAME = 'parent_folder'\nDF_NAME = 'input_df'\nDF_PATH = Path(DF_PARENT_FOLDER_NAME) / DF_NAME\nDEFAULT_OUTPUT_PATH = Path(DF_PARENT_FOLDER_NAME) / DEFAULT_OUTPUT_FOLDER_NAME\n\nNEW_FOLDER = 'new_folder'\n\nGET_OUTPUT_FOLDER_PATH_TEST_DATA = [\n (DF_PATH, None, DEFAULT_OUTPUT_PATH),\n (DF_PATH, Path(NEW_FOLDER), Path(NEW_FOLDER)),\n]\n\n\n@pytest.mark.parametrize(\n ('solutions_file_path', 'output_folder', 'expected_output_folder'),\n GET_OUTPUT_FOLDER_PATH_TEST_DATA,\n)\ndef test_get_output_folder(solutions_file_path: Path, output_folder: Optional[Path], expected_output_folder: Path):\n actual_output_folder = _get_output_folder(solutions_file_path, output_folder)\n assert actual_output_folder == expected_output_folder\n\n\nCONVERT_LANGUAGE_CODE_TO_LANGUAGE_TEST_DATA = [\n ('java7', 'JAVA'),\n ('java8', 'JAVA'),\n ('java9', 'JAVA'),\n ('java11', 'JAVA'),\n ('java15', 'JAVA'),\n ('python3', 'PYTHON'),\n ('kotlin', 'KOTLIN'),\n ('javascript', 'JAVASCRIPT'),\n ('some_weird_lang', 'some_weird_lang'),\n]\n\n\n@pytest.mark.parametrize(('language_code', 'expected_language'), CONVERT_LANGUAGE_CODE_TO_LANGUAGE_TEST_DATA)\ndef test_convert_language_code_to_language(language_code: str, expected_language: str):\n actual_language = _convert_language_code_to_language(fragment_id='0', language_code=language_code)\n assert actual_language == expected_language\n\n\nINSPECT_SOLUTIONS_TEST_DATA = [\n (\n 'test_df_with_null.csv',\n 'target_df_with_null_python.csv',\n Language.PYTHON.value,\n ),\n (\n 'test_df_with_null.csv',\n 'target_df_with_null_unknown.csv',\n '',\n ),\n (\n 'test_df_with_empty_raw_issues.csv',\n 'target_df_with_empty_raw_issues.csv',\n Language.KOTLIN.value,\n ),\n (\n 'test_df_with_incorrect_language.csv',\n 'target_df_with_incorrect_language.csv',\n 'some_weird_lang',\n ),\n (\n 'test_df_single_lang.csv',\n 'target_df_single_lang.csv',\n Language.JAVA.value,\n ),\n (\n 'test_df_multi_lang.csv',\n 'target_df_multi_lang_java.csv',\n Language.JAVA.value,\n ),\n (\n 'test_df_multi_lang.csv',\n 'target_df_multi_lang_js.csv',\n Language.JS.value,\n ),\n (\n 'test_df_multi_lang.csv',\n 'target_df_multi_lang_python.csv',\n Language.PYTHON.value,\n ),\n]\n\n\n@pytest.mark.parametrize(('test_file', 'target_file', 'lang'), INSPECT_SOLUTIONS_TEST_DATA)\ndef test_inspect_solutions(test_file: str, target_file: str, lang: str):\n test_df = read_df(GET_RAW_ISSUES_STATISTICS_TEST_FILES_FOLDER / test_file)\n stats = inspect_raw_issues(test_df)\n\n freq_stats = pd.read_csv(GET_RAW_ISSUES_STATISTICS_TARGET_FILES_FOLDER / target_file)\n\n assert equal_df(stats[lang], freq_stats)\n","repo_name":"nbirillo/hyperstyle-analyze","sub_path":"analysis/test/python/evaluation/issues_statistics/test_get_raw_issues_statistics.py","file_name":"test_get_raw_issues_statistics.py","file_ext":"py","file_size_in_byte":3408,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"}
+{"seq_id":"31562774100","text":"from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty\nfrom pypy.interpreter.astcompiler import ast, consts\nfrom pypy.interpreter.pyparser.error import SyntaxError\n\n\n### Parsing utilites #################################################\ndef parse_except_clause(tokens):\n \"\"\"parses 'except' [test [',' test]] ':' suite\n and returns a 4-tuple : (tokens_read, expr1, expr2, except_body)\n \"\"\"\n lineno = tokens[0].lineno\n clause_length = 1\n # Read until end of except clause (bound by following 'else',\n # or 'except' or end of tokens)\n while clause_length < len(tokens):\n token = tokens[clause_length]\n if isinstance(token, TokenObject) and \\\n (token.get_value() == 'except' or token.get_value() == 'else'):\n break\n clause_length += 1\n if clause_length == 3:\n # case 'except: body'\n return (3, None, None, tokens[2])\n elif clause_length == 4:\n # case 'except Exception: body':\n return (4, tokens[1], None, tokens[3])\n else:\n # case 'except Exception, exc: body'\n return (6, tokens[1], to_lvalue(tokens[3], consts.OP_ASSIGN), tokens[5])\n\n\ndef parse_dotted_names(tokens, builder):\n \"\"\"parses NAME('.' NAME)* and returns full dotted name\n\n this function doesn't assume that the list ends after the\n last 'NAME' element\n \"\"\"\n first = tokens[0]\n assert isinstance(first, TokenObject)\n name = first.get_value()\n l = len(tokens)\n index = 1\n for index in range(1, l, 2):\n token = tokens[index]\n assert isinstance(token, TokenObject)\n if token.name != builder.parser.tokens['DOT']:\n break\n token = tokens[index+1]\n assert isinstance(token, TokenObject)\n name += '.'\n value = token.get_value()\n name += value\n return (index, name)\n\ndef parse_argument(tokens, builder):\n \"\"\"parses function call arguments\"\"\"\n l = len(tokens)\n index = 0\n arguments = []\n last_token = None\n building_kw = False\n kw_built = False\n stararg_token = None\n dstararg_token = None\n while index < l:\n cur_token = tokens[index]\n if not isinstance(cur_token, TokenObject):\n index += 1\n if not building_kw:\n arguments.append(cur_token)\n else:\n last_token = arguments.pop()\n assert isinstance(last_token, ast.Name) # used by rtyper\n arguments.append(ast.Keyword(last_token.varname, cur_token, last_token.lineno))\n building_kw = False\n kw_built = True\n continue\n elif cur_token.name == builder.parser.tokens['COMMA']:\n index += 1\n continue\n elif cur_token.name == builder.parser.tokens['EQUAL']:\n index += 1\n building_kw = True\n continue\n elif cur_token.name == builder.parser.tokens['STAR'] or cur_token.name == builder.parser.tokens['DOUBLESTAR']:\n index += 1\n if cur_token.name == builder.parser.tokens['STAR']:\n stararg_token = tokens[index]\n index += 1\n if index >= l:\n break\n index += 2 # Skip COMMA and DOUBLESTAR\n dstararg_token = tokens[index]\n break\n elif cur_token.get_value() == 'for':\n if len(arguments) != 1:\n raise SyntaxError(\"invalid syntax\", cur_token.lineno,\n cur_token.col)\n expr = arguments[0]\n genexpr_for = parse_genexpr_for(tokens[index:])\n genexpr_for[0].is_outmost = True\n gexp = ast.GenExpr(ast.GenExprInner(expr, genexpr_for, expr.lineno), expr.lineno)\n arguments[0] = gexp\n break\n return arguments, stararg_token, dstararg_token\n\n\ndef parse_fpdef(tokens, index, builder):\n \"\"\"fpdef: fpdef: NAME | '(' fplist ')'\n fplist: fpdef (',' fpdef)* [',']\n\n This intend to be a RPYTHON compliant implementation of _parse_fpdef,\n but it can't work with the default compiler.\n We switched to use astcompiler module now\n \"\"\"\n nodes = []\n comma = False\n while True:\n token = tokens[index]\n index += 1\n assert isinstance(token, TokenObject)\n if token.name == builder.parser.tokens['LPAR']: # nested item\n index, node = parse_fpdef(tokens, index, builder)\n elif token.name == builder.parser.tokens['RPAR']: # end of current nesting\n break\n else: # name\n val = token.get_value()\n node = ast.AssName(val, consts.OP_ASSIGN, token.lineno)\n nodes.append(node)\n\n token = tokens[index]\n index += 1\n assert isinstance(token, TokenObject)\n if token.name == builder.parser.tokens['COMMA']:\n comma = True\n else:\n assert token.name == builder.parser.tokens['RPAR']\n break\n if len(nodes) == 1 and not comma:\n node = nodes[0]\n else:\n node = ast.AssTuple(nodes, token.lineno)\n return index, node\n\ndef parse_arglist(tokens, builder):\n \"\"\"returns names, defaults, flags\"\"\"\n l = len(tokens)\n index = 0\n defaults = []\n names = []\n flags = 0\n first_with_default = -1\n while index < l:\n cur_token = tokens[index]\n index += 1\n if not isinstance(cur_token, TokenObject):\n # XXX: think of another way to write this test\n defaults.append(cur_token)\n if first_with_default == -1:\n first_with_default = len(names) - 1\n elif cur_token.name == builder.parser.tokens['COMMA']:\n # We could skip test COMMA by incrementing index cleverly\n # but we might do some experiment on the grammar at some point\n continue\n elif cur_token.name == builder.parser.tokens['LPAR']:\n index, node = parse_fpdef(tokens, index, builder)\n names.append(node)\n elif cur_token.name == builder.parser.tokens['STAR'] or cur_token.name == builder.parser.tokens['DOUBLESTAR']:\n if cur_token.name == builder.parser.tokens['STAR']:\n cur_token = tokens[index]\n assert isinstance(cur_token, TokenObject)\n index += 1\n if cur_token.name == builder.parser.tokens['NAME']:\n val = cur_token.get_value()\n names.append( ast.AssName( val, consts.OP_ASSIGN ) )\n flags |= consts.CO_VARARGS\n index += 1\n if index >= l:\n break\n else:\n # still more tokens to read\n cur_token = tokens[index]\n index += 1\n else:\n raise SyntaxError(\"incomplete varags\", cur_token.lineno,\n cur_token.col)\n assert isinstance(cur_token, TokenObject)\n if cur_token.name != builder.parser.tokens['DOUBLESTAR']:\n raise SyntaxError(\"Unexpected token\", cur_token.lineno,\n cur_token.col)\n cur_token = tokens[index]\n index += 1\n assert isinstance(cur_token, TokenObject)\n if cur_token.name == builder.parser.tokens['NAME']:\n val = cur_token.get_value()\n names.append( ast.AssName( val, consts.OP_ASSIGN ) )\n flags |= consts.CO_VARKEYWORDS\n index += 1\n else:\n raise SyntaxError(\"incomplete varags\", cur_token.lineno,\n cur_token.col)\n if index < l:\n token = tokens[index]\n raise SyntaxError(\"unexpected token\" , token.lineno,\n token.col)\n elif cur_token.name == builder.parser.tokens['NAME']:\n val = cur_token.get_value()\n names.append( ast.AssName( val, consts.OP_ASSIGN ) )\n\n if first_with_default != -1:\n num_expected_with_default = len(names) - first_with_default\n if flags & consts.CO_VARKEYWORDS:\n num_expected_with_default -= 1\n if flags & consts.CO_VARARGS:\n num_expected_with_default -= 1\n if len(defaults) != num_expected_with_default:\n raise SyntaxError('non-default argument follows default argument',\n tokens[0].lineno, tokens[0].col)\n return names, defaults, flags\n\n\ndef parse_listcomp(tokens, builder):\n \"\"\"parses 'for j in k for i in j if i %2 == 0' and returns\n a GenExprFor instance\n XXX: refactor with listmaker ?\n \"\"\"\n list_fors = []\n ifs = []\n index = 0\n if tokens:\n lineno = tokens[0].lineno\n else:\n lineno = -1\n while index < len(tokens):\n token = tokens[index]\n assert isinstance(token, TokenObject) # rtyper info + check\n if token.get_value() == 'for':\n index += 1 # skip 'for'\n ass_node = to_lvalue(tokens[index], consts.OP_ASSIGN)\n index += 2 # skip 'in'\n iterables = [tokens[index]]\n index += 1\n while index < len(tokens):\n tok2 = tokens[index]\n if not isinstance(tok2, TokenObject):\n break\n if tok2.name != builder.parser.tokens['COMMA']:\n break\n iterables.append(tokens[index+1])\n index += 2\n if len(iterables) == 1:\n iterable = iterables[0]\n else:\n iterable = ast.Tuple(iterables, token.lineno)\n while index < len(tokens):\n token = tokens[index]\n assert isinstance(token, TokenObject) # rtyper info\n if token.get_value() == 'if':\n ifs.append(ast.ListCompIf(tokens[index+1], token.lineno))\n index += 2\n else:\n break\n list_fors.append(ast.ListCompFor(ass_node, iterable, ifs, lineno))\n ifs = []\n else:\n assert False, 'Unexpected token: expecting for in listcomp'\n #\n # Original implementation:\n #\n # if tokens[index].get_value() == 'for':\n # index += 1 # skip 'for'\n # ass_node = to_lvalue(tokens[index], consts.OP_ASSIGN)\n # index += 2 # skip 'in'\n # iterable = tokens[index]\n # index += 1\n # while index < len(tokens) and tokens[index].get_value() == 'if':\n # ifs.append(ast.ListCompIf(tokens[index+1]))\n # index += 2\n # list_fors.append(ast.ListCompFor(ass_node, iterable, ifs))\n # ifs = []\n # else:\n # raise ValueError('Unexpected token: %s' % tokens[index])\n return list_fors\n\n\ndef parse_genexpr_for(tokens):\n \"\"\"parses 'for j in k for i in j if i %2 == 0' and returns\n a GenExprFor instance\n XXX: if RPYTHON supports to pass a class object to a function,\n we could refactor parse_listcomp and parse_genexpr_for,\n and call :\n - parse_listcomp(tokens, forclass=ast.GenExprFor, ifclass=...)\n or:\n - parse_listcomp(tokens, forclass=ast.ListCompFor, ifclass=...)\n \"\"\"\n genexpr_fors = []\n ifs = []\n index = 0\n if tokens:\n lineno = tokens[0].lineno\n else:\n lineno = -1\n while index < len(tokens):\n token = tokens[index]\n assert isinstance(token, TokenObject) # rtyper info + check\n if token.get_value() == 'for':\n index += 1 # skip 'for'\n ass_node = to_lvalue(tokens[index], consts.OP_ASSIGN)\n index += 2 # skip 'in'\n iterable = tokens[index]\n index += 1\n while index < len(tokens):\n token = tokens[index]\n assert isinstance(token, TokenObject) # rtyper info\n if token.get_value() == 'if':\n ifs.append(ast.GenExprIf(tokens[index+1], token.lineno))\n index += 2\n else:\n break\n genexpr_fors.append(ast.GenExprFor(ass_node, iterable, ifs, lineno))\n ifs = []\n else:\n raise SyntaxError('invalid syntax',\n token.lineno, token.col)\n return genexpr_fors\n\ndef get_docstring(builder,stmt):\n \"\"\"parses a Stmt node.\n\n If a docstring if found, the Discard node is **removed**\n from and the docstring is returned.\n\n If no docstring is found, is left unchanged\n and None is returned\n \"\"\"\n if not isinstance(stmt, ast.Stmt):\n return None\n doc = builder.wrap_none()\n if len(stmt.nodes):\n first_child = stmt.nodes[0]\n if isinstance(first_child, ast.Discard):\n expr = first_child.expr\n if builder.is_basestring_const(expr):\n # This *is* a docstring, remove it from stmt list\n assert isinstance(expr, ast.Const)\n del stmt.nodes[0]\n doc = expr.value\n return doc\n\n\ndef to_lvalue(ast_node, flags):\n lineno = ast_node.lineno\n if isinstance( ast_node, ast.Name ):\n return ast.AssName(ast_node.varname, flags, lineno)\n # return ast.AssName(ast_node.name, flags)\n elif isinstance(ast_node, ast.Tuple):\n nodes = []\n # FIXME: should ast_node.getChildren() but it's not annotable\n # because of flatten()\n for node in ast_node.nodes:\n nodes.append(to_lvalue(node, flags))\n return ast.AssTuple(nodes, lineno)\n elif isinstance(ast_node, ast.List):\n nodes = []\n # FIXME: should ast_node.getChildren() but it's not annotable\n # because of flatten()\n for node in ast_node.nodes:\n nodes.append(to_lvalue(node, flags))\n return ast.AssList(nodes, lineno)\n elif isinstance(ast_node, ast.Getattr):\n expr = ast_node.expr\n assert isinstance(ast_node, ast.Getattr)\n attrname = ast_node.attrname\n return ast.AssAttr(expr, attrname, flags, lineno)\n elif isinstance(ast_node, ast.Subscript):\n ast_node.flags = flags\n return ast_node\n elif isinstance(ast_node, ast.Slice):\n ast_node.flags = flags\n return ast_node\n else:\n if isinstance(ast_node, ast.GenExpr):\n raise SyntaxError(\"assign to generator expression not possible\",\n lineno, 0, '')\n elif isinstance(ast_node, ast.ListComp):\n raise SyntaxError(\"can't assign to list comprehension\",\n lineno, 0, '')\n elif isinstance(ast_node, ast.CallFunc):\n if flags == consts.OP_DELETE:\n raise SyntaxError(\"can't delete function call\",\n lineno, 0, '')\n else:\n raise SyntaxError(\"can't assign to function call\",\n lineno, 0, '')\n else:\n raise SyntaxError(\"can't assign to non-lvalue\",\n lineno, 0, '')\n\ndef is_augassign( ast_node ):\n if ( isinstance( ast_node, ast.Name ) or\n isinstance( ast_node, ast.Slice ) or\n isinstance( ast_node, ast.Subscript ) or\n isinstance( ast_node, ast.Getattr ) ):\n return True\n return False\n\ndef get_atoms(builder, nb):\n atoms = []\n i = nb\n while i>0:\n obj = builder.pop()\n if isinstance(obj, BaseRuleObject):\n i += obj.count\n else:\n atoms.append( obj )\n i -= 1\n atoms.reverse()\n return atoms\n\n\ndef peek_atoms(builder, nb):\n atoms = []\n\n i = nb\n current = len(builder.rule_stack) - 1\n while i > 0:\n assert current >= 0\n obj = builder.rule_stack[current]\n if isinstance(obj, BaseRuleObject):\n i += obj.count\n else:\n atoms.append( obj )\n i -= 1\n current -= 1\n\n atoms.reverse()\n return atoms\n\n\n#def eval_string(value):\n# \"\"\"temporary implementation\n#\n# FIXME: need to be finished (check compile.c (parsestr) and\n# stringobject.c (PyString_DecodeEscape()) for complete implementation)\n# \"\"\"\n# # return eval(value)\n# if len(value) == 2:\n# return ''\n# result = ''\n# length = len(value)\n# quotetype = value[0]\n# index = 1\n# while index < length and value[index] == quotetype:\n# index += 1\n# if index == 6:\n# # empty strings like \"\"\"\"\"\" or ''''''\n# return ''\n# # XXX: is it RPYTHON to do this value[index:-index]\n# chars = [char for char in value[index:len(value)-index]]\n# result = ''.join(chars)\n# result = result.replace('\\\\\\\\', '\\\\')\n# d = {'\\\\b' : '\\b', '\\\\f' : '\\f', '\\\\t' : '\\t', '\\\\n' : '\\n',\n# '\\\\r' : '\\r', '\\\\v' : '\\v', '\\\\a' : '\\a',\n# }\n# for escaped, value in d.items():\n# result = result.replace(escaped, value)\n# return result\n\n\n## misc utilities, especially for power: rule\ndef reduce_callfunc(obj, arglist):\n \"\"\"generic factory for CallFunc nodes\"\"\"\n assert isinstance(arglist, ArglistObject)\n return ast.CallFunc(obj, arglist.arguments,\n arglist.stararg, arglist.dstararg, arglist.lineno)\n\ndef reduce_subscript(obj, subscript):\n \"\"\"generic factory for Subscript nodes\"\"\"\n assert isinstance(subscript, SubscriptObject)\n return ast.Subscript(obj, consts.OP_APPLY, subscript.value, subscript.lineno)\n\ndef reduce_slice(obj, sliceobj):\n \"\"\"generic factory for Slice nodes\"\"\"\n assert isinstance(sliceobj, SlicelistObject)\n if sliceobj.fake_rulename == 'slice':\n start = sliceobj.value[0]\n end = sliceobj.value[1]\n return ast.Slice(obj, consts.OP_APPLY, start, end, sliceobj.lineno)\n else:\n return ast.Subscript(obj, consts.OP_APPLY, ast.Sliceobj(sliceobj.value,\n sliceobj.lineno), sliceobj.lineno)\n\ndef parse_attraccess(tokens, builder):\n \"\"\"parses token list like ['a', '.', 'b', '.', 'c', ...]\n\n and returns an ast node : ast.Getattr(Getattr(Name('a'), 'b'), 'c' ...)\n \"\"\"\n token = tokens[0]\n # XXX HACK for when parse_attraccess is called from build_decorator\n if isinstance(token, TokenObject):\n val = token.get_value()\n result = ast.Name(val, token.lineno)\n else:\n result = token\n index = 1\n while index < len(tokens):\n token = tokens[index]\n if isinstance(token, TokenObject) and token.name == builder.parser.tokens['DOT']:\n index += 1\n token = tokens[index]\n assert isinstance(token, TokenObject)\n result = ast.Getattr(result, token.get_value(), token.lineno)\n elif isinstance(token, ArglistObject):\n result = reduce_callfunc(result, token)\n elif isinstance(token, SubscriptObject):\n result = reduce_subscript(result, token)\n elif isinstance(token, SlicelistObject):\n result = reduce_slice(result, token)\n else:\n assert False, \"Don't know how to handle index %s of %s\" % (index, len(tokens))\n index += 1\n return result\n\n\n\n## Stack elements definitions ###################################\n\nclass BaseRuleObject(ast.Node):\n \"\"\"Base class for unnamed rules\"\"\"\n def __init__(self, count, lineno):\n self.count = count\n self.lineno = lineno # src.getline()\n self.col = 0 # src.getcol()\n\n\nclass RuleObject(BaseRuleObject):\n \"\"\"A simple object used to wrap a rule or token\"\"\"\n def __init__(self, name, count, lineno, parser):\n BaseRuleObject.__init__(self, count, lineno)\n self.rulename = name\n self.parser = parser\n\n def __str__(self):\n return \"\" % ( self.parser.symbol_repr(self.rulename), self.count)\n\n def __repr__(self):\n return \"\" % ( self.parser.symbol_repr(self.rulename), self.count)\n\n\nclass TempRuleObject(BaseRuleObject):\n \"\"\"used to keep track of how many items get_atom() should pop\"\"\"\n def __init__(self, name, count, lineno):\n BaseRuleObject.__init__(self, count, lineno)\n self.temp_rulename = name\n\n def __str__(self):\n return \"\" % (self.temp_rulename, self.count)\n\n def __repr__(self):\n return \"\" % (self.temp_rulename, self.count)\n\n\nclass TokenObject(ast.Node):\n \"\"\"A simple object used to wrap a rule or token\"\"\"\n\n def __init__(self, name, value, lineno, parser):\n self.name = name\n self.value = value\n self.count = 0\n # self.line = 0 # src.getline()\n self.col = 0 # src.getcol()\n self.lineno = lineno\n self.parser = parser\n\n def get_name(self):\n tokname = self.parser.tok_name.get(self.name, str(self.name))\n return self.parser.tok_rvalues.get(self.name, tokname)\n\n def get_value(self):\n value = self.value\n if value is None:\n value = ''\n return value\n\n def descr_fget_value(space, self):\n value = self.get_value()\n return space.wrap(value)\n\n def __str__(self):\n return \"\" % (self.get_name(), self.value)\n\n def __repr__(self):\n return \"\" % (self.get_name(), self.value)\n\nTokenObject.typedef = TypeDef('BuildToken',\n name=interp_attrproperty('name', cls=TokenObject),\n lineno=interp_attrproperty('lineno', cls=TokenObject),\n value=GetSetProperty(TokenObject.descr_fget_value))\n\nclass ObjectAccessor(ast.Node):\n \"\"\"base class for ArglistObject, SubscriptObject and SlicelistObject\n\n FIXME: think about a more appropriate name\n \"\"\"\n\nclass ArglistObject(ObjectAccessor):\n \"\"\"helper class to build function's arg list\n \"\"\"\n def __init__(self, arguments, stararg, dstararg, lineno):\n self.fake_rulename = 'arglist'\n self.arguments = arguments\n self.stararg = stararg\n self.dstararg = dstararg\n self.lineno = lineno\n\n def __str__(self):\n return \"\" % self.value\n\n def __repr__(self):\n return \"\" % self.value\n\nclass SubscriptObject(ObjectAccessor):\n \"\"\"helper class to build subscript list\n\n self.value represents the __getitem__ argument\n \"\"\"\n def __init__(self, name, value, lineno):\n self.fake_rulename = name\n self.value = value\n self.lineno = lineno\n\n def __str__(self):\n return \"\" % self.value\n\n def __repr__(self):\n return \"\" % self.value\n\nclass SlicelistObject(ObjectAccessor):\n \"\"\"helper class to build slice objects\n\n self.value is a list [start, end, step]\n self.fake_rulename can either be 'slice' or 'sliceobj' depending\n on if a step is specfied or not (see Python's AST\n for more information on that)\n \"\"\"\n def __init__(self, name, value, lineno):\n self.fake_rulename = name\n self.value = value\n self.lineno = lineno\n\n def __str__(self):\n return \"\" % self.value\n\n def __repr__(self):\n return \"\" % self.value\n\n","repo_name":"camillobruni/pygirl","sub_path":"pypy/interpreter/pyparser/asthelper.py","file_name":"asthelper.py","file_ext":"py","file_size_in_byte":23377,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"3"}
+{"seq_id":"71065653523","text":"#!/usr/bin/python3\n\"\"\"Write the first class Base\"\"\"\nfrom json import dumps, loads\nimport csv\n\n\nclass Base:\n \"\"\"Class Base\"\"\"\n __nb_objects = 0\n\n def __init__(self, id=None):\n \"\"\"Constructor\"\"\"\n if id is not None:\n self.id = id\n else:\n Base.__nb_objects += 1\n self.id = Base.__nb_objects\n\n @staticmethod\n def to_json_string(list_dictionaries):\n \"\"\"\"to json string definition\"\"\"\n if list_dictionaries is None or not list_dictionaries:\n return \"[]\"\n else:\n return dumps(list_dictionaries)\n\n @classmethod\n def save_to_file(cls, list_objs):\n \"\"\"save to file definition\"\"\"\n if list_objs is not None:\n list_objs = [o.to_dictionary() for o in list_objs]\n with open(\"{}.json\".format(cls.__name__), \"w\", encoding=\"utf-8\") as f:\n f.write(cls.to_json_string(list_objs))\n\n @staticmethod\n def from_json_string(json_string):\n \"\"\"from json string definition\"\"\"\n if json_string is None or not json_string:\n return []\n return loads(json_string)\n\n @classmethod\n def create(cls, **dictionary):\n \"\"\"create definition\"\"\"\n from models.rectangle import Rectangle\n from models.square import Square\n if cls is Rectangle:\n new = Rectangle(1, 1)\n elif cls is Square:\n new = Square(1)\n else:\n new = None\n new.update(**dictionary)\n return new\n\n @classmethod\n def load_from_file(cls):\n \"\"\"load from file definition\"\"\"\n from os import path\n file = \"{}.json\".format(cls.__name__)\n if not path.isfile(file):\n return []\n with open(file, \"r\", encoding=\"utf-8\") as f:\n return [cls.create(**d) for d in cls.from_json_string(f.read())]\n\n @classmethod\n def save_to_file_csv(cls, list_objs):\n \"\"\"Save to csv file definition\"\"\"\n from models.rectangle import Rectangle\n from models.square import Square\n if list_objs is not None:\n if cls is Rectangle:\n list_objs = [[o.id, o.width, o.height, o.x, o.y]\n for o in list_objs]\n else:\n list_objs = [[o.id, o.size, o.x, o.y]\n for o in list_objs]\n with open('{}.csv'.format(cls.__name__), 'w', newline='',\n encoding='utf-8') as file:\n writer = csv.writer(file)\n writer.writerows(list_objs)\n\n @classmethod\n def load_from_file_csv(cls):\n \"\"\"load from csv definition\"\"\"\n from models.rectangle import Rectangle\n from models.square import Square\n result = []\n with open('{}.csv'.format(cls.__name__), 'r', newline='',\n encoding='utf-8') as file:\n reader = csv.reader(file)\n for row in reader:\n row = [int(r) for r in row]\n if cls is Rectangle:\n d = {\"id\": row[0], \"width\": row[1], \"height\": row[2],\n \"x\": row[3], \"y\": row[4]}\n else:\n d = {\"id\": row[0], \"size\": row[1],\n \"x\": row[2], \"y\": row[3]}\n result.append(cls.create(**d))\n return result\n\n @staticmethod\n def draw(list_rectangles, list_squares):\n \"\"\"Let draw it definition\"\"\"\n\n import turtle\n import time\n from random import randrange\n turtle.Screen().colormode(255)\n for i in list_rectangles + list_squares:\n t = turtle.Turtle()\n t.color((randrange(255), randrange(255), randrange(255)))\n t.pensize(1)\n t.penup()\n t.pendown()\n t.setpos((i.x + t.pos()[0], i.y - t.pos()[1]))\n t.pensize(10)\n t.forward(i.width)\n t.left(90)\n t.forward(i.height)\n t.left(90)\n t.forward(i.width)\n t.left(90)\n t.forward(i.height)\n t.left(90)\n t.end_fill()\n\n time.sleep(5)\n","repo_name":"KennyChukwuebuka/alx-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"37305525816","text":"from email.headerregistry import Address\nfrom brownie import accounts, config,chain, DataSetFactory,DataSet, DHN\nimport scripts.deploy as deployer\n\n#Test the subscription to two DataSets\n#See the changes in:\n # variables (subCount, contract balance, subBalance), \n # mappings(addressToSub), \n # arrays of structs (deposits, subscribres)\n\ndef testCreateDS():\n dec_fit = 10**18\n\n #Get the DataSetFactory.sol instance after deployment and the account used\n (DSF,DHN)=deployer.deploy()\n\n #Define accounts\n dohrnii_account = accounts[0] #mints the DHN tokens\n ds_creator_account1 = accounts[1] #creates a nem Data Set callled \"Tetris\"\n ds_creator_account2 = accounts[2] #creates a nem Data Set callled \"Desserts\"\n ds_subscriber_account1 = accounts[3] #will subscribe to the \"Tetris\" dataset with a 1s sub time\n ds_subscriber_account2 = accounts[4] #will also subscribe to the \"Tetris\" dataset with a 1s sub time\n ds_subscriber_account3 = accounts[5] #will subscribe to the \"Desserts\" dataset with a 1day sub time\n ds_subscriber_account4 = accounts[6] #will also subscribe to the \"Desserts\" dataset with a 30 days sub time\n\n #Fund accounts\n DHN.transfer(ds_creator_account1, 30*dec_fit, {\"from\": dohrnii_account}) #fund the creator\n DHN.transfer(ds_creator_account2, 30*dec_fit, {\"from\": dohrnii_account}) #fund the creator\n DHN.transfer(ds_subscriber_account1, 30*dec_fit, {\"from\": dohrnii_account}) #fund sub1\n DHN.transfer(ds_subscriber_account2, 30*dec_fit, {\"from\": dohrnii_account}) #fund sub2\n DHN.transfer(ds_subscriber_account3, 30*dec_fit, {\"from\": dohrnii_account}) #fund sub3\n DHN.transfer(ds_subscriber_account4, 30*dec_fit, {\"from\": dohrnii_account}) #fund sub4 \n\n #Create a DS and instantiate it\n deployer.createDS(dec_fit, DHN, DSF, ds_creator_account1,\"Tetris\", \"https://ipfs.io/ipfs/Qme7ss3ARVgxv6rXqVPiikMJ8u2NLgmgszg13pYrDKEoiu\",\n \"Games\",\"Tetris statistics and data\", 10*dec_fit, 3600, 2*dec_fit)\n\n DS_instance1 = deployer.getDSbyName(dec_fit, DSF, ds_subscriber_account1, \"Tetris\")\n \n #Create a DS and instantiate it\n deployer.createDS(dec_fit, DHN, DSF, ds_creator_account2,\"Desserts\", \"https://ipfs.io/ipfs/Qme7ss3ARVgxv6rXqVPiikMJ8u2NLgmgszg13pYrDKEoiu\",\n \"Food\",\"Some dessert recipes\", 5*dec_fit, 3600, 2*dec_fit)\n \n DS_instance2 = deployer.getDSbyName(dec_fit, DSF, ds_subscriber_account2, \"Desserts\")\n\n #Sub1\n deployer.subToDS(dec_fit, DHN, DSF, ds_subscriber_account1, \"Tetris\", 0) \n #Sub2\n deployer.subToDS(dec_fit, DHN, DSF, ds_subscriber_account2, \"Tetris\", 0)\n #Sub3\n deployer.subToDS(dec_fit, DHN, DSF, ds_subscriber_account3, \"Desserts\", 1) \n #Sub4\n deployer.subToDS(dec_fit, DHN, DSF, ds_subscriber_account4, \"Desserts\", 2)\n\n#Assertion: DS creation alterations\n \n assert ((20+10*2)*dec_fit, 2) == (DHN.balanceOf(DS_instance1),#contract balance changes because of 2 subs\n DS_instance1.subCount())#subcount increases by 2\n \n assert ((20+2*5)*dec_fit, 2) == (DHN.balanceOf(DS_instance2),#contract balance changes because of 2 subs\n DS_instance2.subCount())#subcount increases by 2\n \n#Assertion: Mapping and Subscriber struct\n\n #Subscribed to \"Tetris\"\n info1 = DS_instance1.addressToSub(ds_subscriber_account1)\n #Price paid, subscription time, Is this person currently subbed?\n assert (10*dec_fit, 1, True) == (info1[0], info1[1], info1[3])\n\n #Subscribed to \"Tetris\"\n info2 = DS_instance1.addressToSub(ds_subscriber_account2)\n #Price paid, subscription time, Is this person currently subbed?\n assert (10*dec_fit, 1, True) == (info2[0], info2[1], info2[3])\n\n #Not subscribed to \"Tetris\"\n info3 = DS_instance1.addressToSub(ds_subscriber_account3)\n #Price paid, subscription time, Is this person currently subbed?\n assert (0, 0, False) == (info3[0], info3[1], info3[3])\n\n #Subscribed to \"Desserts\"\n info4 = DS_instance2.addressToSub(ds_subscriber_account3)\n #Price paid, subscription time, Is this person currently subbed?\n assert (5*dec_fit, 24*3600, True) == (info4[0], info4[1], info4[3])\n\n #Subscribed to \"Desserts\"\n info5 = DS_instance2.addressToSub(ds_subscriber_account4)\n #Price paid, subscription time, Is this person currently subbed?\n assert (5*dec_fit, 30*24*3600, True) == (info5[0], info5[1], info5[3])\n\n #Not subscribed to \"Desserts\"\n info6 = DS_instance2.addressToSub(ds_subscriber_account1)\n #Price paid, subscription time, Is this person currently subbed?\n assert (0, 0, False) == (info6[0], info6[1], info6[3])\n\n#Assertion: \"deposits\" struct array\n\n #1st Subscriber deposit to \"Tetris\"\n info7 = DS_instance1.deposits(0)\n #Price paid, subscription time, Is this person currently subbed?\n assert (ds_subscriber_account1, 10*dec_fit) == (info7[0], info7[1])\n\n #2nd Subscriber deposit to \"Desserts\"\n info8 = DS_instance2.deposits(1)\n #Price paid, subscription time, Is this person currently subbed?\n assert (ds_subscriber_account4, 5*dec_fit) == (info8[0], info8[1])\n\n#Assertion: \"subscribers\" address array\n\n #1st Subscriber deposit to \"Tetris\"\n info9 = DS_instance1.subscribers(0)\n #Price paid, subscription time, Is this person currently subbed?\n assert (ds_subscriber_account1) == (info9)\n\n #2nd Subscriber deposit to \"Desserts\"\n info10 = DS_instance2.subscribers(1)\n #Price paid, subscription time, Is this person currently subbed?\n assert (ds_subscriber_account4) == (info10)\n\n\n","repo_name":"SayNode/Subscripiton-contract","sub_path":"tests/test_SubToDS.py","file_name":"test_SubToDS.py","file_ext":"py","file_size_in_byte":5614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"34668814722","text":"#test case1: -73.59 45.49 to -73.55 45.49 threshold = 0.5\n#test case2: -73.59 45.49 to -73.55 45.53 threshold = 0.5\n#test case3: -73.568 45.508 to -73.55 45.53 threshold = 0.5 (Supposed no path)\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nimport shapefile\nimport numpy as np\nfrom collections import defaultdict\nimport math\n\ngrid_size = 0.002\nx1 = np.arange(-73.590,-73.550,grid_size)\ny1 = np.arange(45.490,45.5301,grid_size)\nshape = shapefile.Reader(\"Shape/crime_dt.shp\",encoding='ISO-8859-1')\nshapeRecords = shape.shapeRecords()\nnum_seq = []\ngrid_map= []\n\nfor i in range(len(x1)):\n col = []\n for j in range(len(y1)):\n col.append(0)\n grid_map.append(col)\n\nx_coordinates=[]\ny_coordinates=[]\n\n#counting the density(crime rate) in each grid.\nfor k in range(len(shapeRecords)):\n x = float(shapeRecords[k].shape.__geo_interface__[\"coordinates\"][0])\n y = float(shapeRecords[k].shape.__geo_interface__[\"coordinates\"][1])\n x_coordinates.append(x)\n y_coordinates.append(y)\n x = int((x - (-73.590)) / grid_size)\n y = int((y - (45.490)) / grid_size)\n grid_map[y][x] +=1\n#grid_map.reverse()\n#display the grid number\n# for row in grid_map:\n# print(row)\n\n#storing all rate numbers in a list from the grid_map, and sort the list in descending order.\nfor i in range(len(grid_map)-1):\n for j in range(len(grid_map[i])-1):\n num_seq.append(grid_map[i][j])\nnum_seq = sorted(num_seq,reverse=True)\n\n# promote user to input the threshold, and also verify if the threshold is valid.\nvalid_threshold = False\nwhile (not valid_threshold):\n threshold = input(\"Please enter a threshold:\")\n if (float(threshold) <= 1) and (float(threshold) >= 0):\n valid_threshold = True\n\n# define the high crime block based on the top (1-threshold) crime rate numbers.\n\nindex = len(num_seq) - int(float(threshold) * len(num_seq)) - 1\nhigh_num = num_seq [index]\n\n# plot the grid using only 2 colors.\nif threshold == 0.0 :\n plt.hist2d(x_coordinates, y_coordinates, bins=[x1, y1], cmap=ListedColormap(['yellow']))\nelif threshold == 1.0:\n plt.hist2d(x_coordinates, y_coordinates, bins=[x1, y1], cmap=ListedColormap(['purple']))\nelse:\n plt.hist2d(x_coordinates, y_coordinates, bins=[x1, y1], cmap=ListedColormap(['purple', 'yellow']), vmin=0,vmax=2 * high_num)\n\n#total number of crimes in each grid;\nprint(\"Total crimes in each grid:\");\n\nfor i in range(len(grid_map)-1):\n print(grid_map[i])\n\n\n\n#display statics\nprint(\"Total number of crime:\",sum(num_seq))\nprint(\"Average:\",\"{:.3f}\".format(np.average(num_seq)))\nprint(\"Standard deviation:\",\"{:.3f}\".format(np.std(num_seq)))\nprint(\"High crime rate:\",high_num)\n#plt.show()\n\n\nfor i in range(len(grid_map)):\n for j in range(len(grid_map[i])):\n if grid_map[i][j] >= high_num:\n grid_map[i][j] = 1\n else:\n grid_map[i][j] = 0\n\n\ndef getNeighbours(point):\n l = []\n point_x = point[0]\n point_y = point[1]\n\n # up\n if point_y+1 <= len(grid_map)-1:\n if point_x == 0 or point_x == len(grid_map[0])-1:\n l.append((point_x,point_y+1))\n else:\n if not(grid_map[point_y][point_x] ==1 and grid_map[point_y][point_x-1] ==1):\n l.append((point_x,point_y+1))\n #down\n if point_y-1 >= 0:\n if point_x == 0 or point_x == len(grid_map[0])-1:\n l.append((point_x, point_y - 1))\n else:\n if not(grid_map[point_y-1][point_x] ==1 and grid_map[point_y-1][point_x-1] ==1):\n l.append((point_x,point_y-1))\n #left\n if point_x-1 >=0:\n if point_y == 0 or point_y == len(grid_map)-1:\n l.append((point_x-1,point_y))\n else:\n if not (grid_map[point_y][point_x-1] == 1 and grid_map[point_y-1][point_x-1] == 1):\n l.append((point_x-1,point_y))\n #right\n if point_x+1 <= len(grid_map[0])-1:\n if point_y == 0 or point_y == len(grid_map)-1:\n l.append((point_x+1,point_y))\n else:\n if not (grid_map[point_y][point_x] == 1 and grid_map[point_y-1][point_x] == 1):\n l.append((point_x + 1, point_y))\n #left-upper diagonal\n if point_x - 1 >=0 and point_y + 1 <=len(grid_map)-1:\n if grid_map[point_y][point_x-1] == 0:\n l.append((point_x-1,point_y+1))\n #right-upper diagonal\n if point_x + 1 <=len(grid_map[0])-1 and point_y+1<=len(grid_map)-1:\n if grid_map[point_y][point_x] == 0:\n l.append((point_x+1,point_y+1))\n #left-lower diagonal\n if point_x-1>=0 and point_y-1>=0:\n if grid_map[point_y-1][point_x-1] == 0:\n l.append((point_x-1,point_y-1))\n #right-lower diagonal\n if point_x+1 <= len(grid_map[0])-1 and point_y-1>=0:\n if grid_map[point_y-1][point_x] == 0:\n l.append((point_x+1,point_y-1))\n return l\n\n#print(getNeighbours((4,1)))\n\ndef getCost(p1,p2):\n x1 = p1[0]\n y1 = p1[1]\n x2 = p2[0]\n y2 = p2[1]\n\n if x1 == x2 and y2-y1 ==1:\n if grid_map[y1][x1] ==1:\n return 1.3\n elif x1-1>=0 :\n if grid_map[y1][x1-1] ==1:\n return 1.3\n else:\n return 1\n else:\n return 1\n\n if x1 == x2 and y1-y2 == 1:\n if grid_map[y2][x2] ==1:\n return 1.3\n elif x1-1 >= 0 and y1-1 >=0:\n if grid_map[y1-1][x1-1] ==1:\n return 1.3\n else:\n return 1\n else:\n return 1\n if y1 == y2 and x1-x2 == 1:\n if grid_map[y2][x2] ==1:\n return 1.3\n elif y2-1>=0:\n if grid_map[y2-1][x2] ==1:\n return 1.3\n else:\n return 1\n else:\n return 1\n if y1 == y2 and x2-x1 == 1:\n if grid_map[y1][x1] == 1:\n return 1.3\n elif y1-1 >=0:\n if grid_map[y1-1][x1] ==1:\n return 1.3\n else:\n return 1\n else:\n return 1\n if abs(x1 - x2) ==1 and abs(y1 -y2) == 1:\n return 1.5\n\ndef construct_path(cameFrom,current):\n total_path = [current]\n while current in cameFrom.keys():\n current = cameFrom[current]\n total_path.insert(0,current)\n return total_path\n\ndef A_start(start, goal):\n # simple heuristic function estimate the cost from n to goal.\n def h(n):\n return abs(goal[0] - n[0]) + abs(goal[1] - n[1])\n\n # open set records the to be visited points\n openSet = [start]\n\n # a map records the nodes' parent node\n cameFrom = {}\n\n #g function records the cost from initial point to n.\n gScore = defaultdict(lambda: float(\"inf\"))\n gScore[start] = 0\n\n # f function records the cost from start to n plus from n to the goal.\n fScore = defaultdict(lambda: float(\"inf\"))\n fScore[start] = h(start)\n\n #visit each node in open set\n while openSet:\n lowest_fscore = math.inf\n current = None\n for point in openSet:\n if fScore[point] < lowest_fscore:\n lowest_fscore = fScore[point]\n current = point\n\n #if the current node is the goal, build the path\n if current == goal:\n return construct_path(cameFrom,current)\n\n #remove the visited node\n openSet.remove(current)\n neighbors = getNeighbours(current)\n for neighbor in neighbors:\n temp_gScore = gScore[current] + getCost(current,neighbor)\n if temp_gScore < gScore[neighbor]:\n\n cameFrom[neighbor] = current\n gScore[neighbor] = temp_gScore\n fScore[neighbor] = gScore[neighbor] + h(neighbor)\n if neighbor not in openSet:\n openSet.append(neighbor)\n\n return None\n\n#promote user input for initial point and goal point\npoint_valid = False\nwhile not point_valid:\n initial_x,initial_y = input(\"Please enter the start point:\").split()\n goal_x,goal_y = input(\"Please enter the end point:\").split()\n initial_x,initial_y = float(initial_x),float(initial_y)\n goal_x,goal_y = float(goal_x),float(goal_y)\n if (initial_x>=-73.590 and initial_x<=-73.550) and (initial_y>=45.490 and initial_y<=45.530) and (goal_x>=-73.590 and goal_x<=-73.550) and (goal_y>=45.490 and goal_y<=45.530):\n point_valid = True\n else:\n print(\"Invalid points, please enter another one.\")\n\n#covert the input points to integer so it can fit into my algorithm.\ninitial_x = int(((initial_x - (-73.59)) / grid_size) + 0.01)\ninitial_y = int(((initial_y - (45.49)) / grid_size) +0.01)\ngoal_x = int(((goal_x - (-73.59)) / grid_size)+0.01)\ngoal_y = int(((goal_y - (45.49)) / grid_size)+0.01)\n\n#invoke A* algorithm to generate the optimal path\nfinal_path = A_start((initial_x,initial_y),(goal_x,goal_y))\n\nif final_path != None :\n # calculate the total cost\n total_cost = 0\n for i in range(len(final_path) - 1):\n total_cost += getCost(final_path[i], final_path[i + 1])\n\n # print(final_path)\n\n # convert the coordinates to original coordinates.\n real_path = []\n path_x = []\n path_y = []\n for point in final_path:\n real_path.append((round(point[0] * grid_size + (-73.59), 3), round(point[1] * grid_size + 45.49, 3)))\n path_x.append(round(point[0] * grid_size + (-73.59), 3))\n path_y.append(round(point[1] * grid_size + 45.49, 3))\n\n print(\"Path:\",real_path)\n print(\"path cost:\", \"{:.2f}\".format(total_cost))\n\n plt.plot(path_x, path_y, color=\"red\", linewidth=6)\nelse:\n print(\"Due to blocks, no path is found. Please change the map and try again\")\n\n#show the plot\nplt.xticks(x1, rotation=90)\nplt.yticks(y1)\nplt.show()\nprint(\"Program terminated.\")\n","repo_name":"m441249833/MontrealCrimeAnalysis","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"43264257876","text":"# Escrever um programa que coleta a senha do Usuário(previamente ajustada), armazena a senha digitada em uma lista e\n# retorna a quantidade de vezes que o usuário precisou para digitar a senha correta\n\nsenha = ''\nvezes = 0\n\nwhile senha != 'fofinho':\n senha = input('Digite a sua senha de usuário: ')\n vezes += 1\nprint(f'Você precisou de {vezes} tentativas para acertar a senha')\n","repo_name":"Yuri-Santiago/sor-python-ifce-p7","sub_path":"Aula 02/atv06.py","file_name":"atv06.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"31945778995","text":"import tkinter as tk\nimport socket\nfrom threading import Thread\nfrom errno import EAGAIN, EWOULDBLOCK\nfrom sys import exit\n\nHEADER = 10\n\nIP = socket.gethostbyname(socket.gethostname())\nPORT = 5050\n\nclient_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nmainwindow = tk.Tk()\ntext_box = tk.Text()\n\n\n# Build GUI\ndef build_Gui():\n mainwindow.title(\"Smart home app\")\n\n mainframe = tk.Frame(mainwindow, bg=\"#1e1e1e\")\n mainframe.pack(fill=\"both\", expand=True)\n\n label = tk.Label(mainframe, bg=\"#1e1e1e\", fg=\"white\", padx=5, pady=5)\n label.config(font=(\"Arial\", 18))\n label.pack(fill=\"x\")\n\n verticalFrame = tk.Frame(mainframe, bg=\"#1e1e1e\")\n label = tk.Label(verticalFrame, text=\"Smarta hemmet\",\n bg=\"#1e1e1e\", fg=\"white\", padx=10, pady=10,\n font=(\"Arial\", 20))\n label.pack(fill=\"x\", padx=10, pady=10)\n verticalFrame.pack(fill=\"x\")\n\n label = tk.Label(mainframe, text=\"Anslut till server och be om data \"\n \"från hemmets sensorer\", bg=\"#1e1e1e\", fg=\"white\",\n padx=5, pady=5, font=(\"Arial\", 14))\n label.pack(fill=\"x\")\n\n label = tk.Label(mainframe, text=\"Om ingen data visas vid förfrågan \"\n \"är sensorklient ej operativ\", bg=\"#1e1e1e\", fg=\"white\",\n padx=5, pady=5, font=(\"Arial\", 14))\n label.pack(fill=\"x\")\n\n horizontal_frame = tk.Frame(mainframe, bg=\"#1e1e1e\")\n\n button1 = tk.Button(horizontal_frame, text=\"Anslut till server\",\n command=connect_to_server,\n fg=\"#1e1e1e\", bg=\"white\", padx=10, pady=10)\n button1.grid(row=0, column=0, padx=10, pady=10, sticky=\"nsew\")\n button2 = tk.Button(horizontal_frame, text=\"Temperatur\",\n command=ask_temperature,\n fg=\"#1e1e1e\", bg=\"white\", padx=10, pady=10)\n button2.grid(row=0, column=1, padx=10, pady=10, sticky=\"nsew\")\n button3 = tk.Button(horizontal_frame, text=\"Luftfuktighet\",\n command=ask_humidity,\n fg=\"#1e1e1e\", bg=\"white\", padx=10, pady=10)\n button3.grid(row=0, column=2, padx=10, pady=10, sticky=\"nsew\")\n button4 = tk.Button(horizontal_frame, text=\"Stäng anslutning\",\n command=close_connection,\n fg=\"#1e1e1e\", bg=\"white\", padx=10, pady=10)\n button4.grid(row=0, column=3, padx=10, pady=10, sticky=\"nsew\")\n\n horizontal_frame.grid_columnconfigure(0, weight=1)\n horizontal_frame.grid_columnconfigure(1, weight=1)\n horizontal_frame.grid_columnconfigure(2, weight=1)\n horizontal_frame.grid_columnconfigure(3, weight=1)\n horizontal_frame.pack(fill=\"x\")\n\n verticalFrame = tk.Frame(mainframe, bg=\"#1e1e1e\")\n text_box.configure(fg=\"white\", bg=\"#1e1e1e\", font=(\"arial\", 12),\n state=tk.DISABLED)\n text_box.pack(fill=\"both\", padx=5, pady=5)\n verticalFrame.pack(fill=\"both\")\n\n\n# This function inserts data in GUI text box\ndef insert_text(msg):\n try:\n text_box.configure(state=tk.NORMAL)\n text_box.insert(tk.END, msg + \"\\n\")\n text_box.configure(state=tk.DISABLED)\n except Exception as e:\n print(e)\n pass\n\n\n# Connect to server and start a thread that listens for messages\n# Here I wanted to get a connection message back from server,\n# but I didn't get that to work.\n# If we fail to connect we hit the Exception so this should be fine as it is\ndef connect_to_server():\n try:\n client_socket.connect((IP, PORT))\n client_socket.setblocking(False)\n\n send(\"Client\")\n insert_text(\"Ansluten till server\")\n Thread(target=listen_for_message, daemon=True).start()\n except OSError as e:\n insert_text(str(e))\n print(e)\n pass\n\n\n# Continuously listens for messages and if there is one\n# send it to insert_text function\n# If we get disconnection message from server close program\ndef listen_for_message():\n while True:\n message = recieve()\n if message:\n insert_text(message)\n if message == \"Anslutning avbruten\":\n # If server sends our disconnection message back\n # we want to close app\n mainwindow.after(1000, mainwindow.destroy)\n\n\n# This function returns message from server IF there is one\ndef recieve():\n try:\n client_header = client_socket.recv(HEADER)\n\n if not len(client_header):\n print('Connection closed by the server')\n exit()\n\n client_length = int(client_header.decode('utf-8').strip())\n client = client_socket.recv(client_length).decode('utf-8')\n message_header = client_socket.recv(HEADER)\n message_length = int(message_header.decode('utf-8').strip())\n message = client_socket.recv(message_length).decode('utf-8')\n\n print(f'{client} > {message}')\n return message\n\n # When there are no incoming data, error is going to be raised\n # We are going to check for both, as they can depend on different os,\n # and only want to close if both errors hit\n # We expecte one error, meaning no incoming data, so continue as normal\n except IOError as e:\n if e != EAGAIN and e.errno != EWOULDBLOCK:\n print('Reading error: {}'.format(str(e)))\n exit()\n pass\n # Something else went wrong\n except Exception as e:\n print('General error: {}'.format(str(e)))\n exit()\n\n\n# This function converts message to bytes and sends it to server\ndef send(data):\n message = data.encode(\"utf-8\")\n message_header = f\"{len(message):<{HEADER}}\".encode('utf-8')\n client_socket.send(message_header + message)\n\n\n# Sends message to server asking sensor klient for temperature\ndef ask_temperature():\n try:\n send(\"?temperature\")\n except OSError as e:\n insert_text(\"[WinError 10057] Ej ansluten till server\")\n print(e)\n pass\n\n\n# Sends message to server asking sensor klient for humidity\ndef ask_humidity():\n try:\n send(\"?humidity\")\n except OSError as e:\n insert_text(\"[WinError 10057] Ej ansluten till server\")\n print(e)\n pass\n\n\n# This function sends disconnection message to server\ndef close_connection():\n try:\n send(\"Anslutning avbruten\")\n except OSError as e:\n insert_text(\"[WinError 10057] Ej ansluten till server\\n\"\n \"Stänger program...\")\n print(e)\n mainwindow.after(1500, mainwindow.destroy)\n\n\n# Starts GUI Client\ndef start_Gui():\n build_Gui()\n mainwindow.mainloop()\n\n\nif __name__ == \"__main__\":\n start_Gui()\n","repo_name":"MartinMachl/Nackademin","sub_path":"2_Programmering_Python/App/smart_home_client.py","file_name":"smart_home_client.py","file_ext":"py","file_size_in_byte":6612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"38828286400","text":"'''This module samples some data from a saved checkpoint'''\n'''\n o==+--\n | |\\ \\\n | | \\ \\ ____________________\n | \\ \\ \\ | |\n | \\ \\ \\ | +------------+ |\n | \\ \\ \\ | | (__) | |\n | \\ \\ \\| | (oo) | |\n | \\ \\ | | o\\ .\\/. | |\n | \\ \\| | | \\/ \\ | |\n /---\\ \\ | +------------+ |\n / \\ \\| |\n | | | |\n \\ / | |\n \\---/ | |\n | |\n --------------------------\n ( )\n --------------------------\n'''\n\nimport argparse\nimport os\nimport tensorflow as tf\nfrom prepare_data import CharData\nimport numpy as np\nimport pickle\nfrom tqdm import tqdm\n\n\ndef sample(sample_type, prediction, temperature=0.9):\n \n if sample_type==0:\n return np.argmax(prediction)\n else:\n sample_exp = np.exp(prediction) / temperature\n sample_reduce_mean = sample_exp / np.sum(sample_exp)\n prediction_real = np.random.choice(range(len(prediction)), 1, p=sample_reduce_mean)\n return prediction_real[0]\n\n\ndef get_meta_file_path(save_dir):\n '''Return the first .meta file in @param save_dit'''\n meta_file = ''\n for _f in os.listdir(save_dir):\n if _f[-5:] == '.meta':\n meta_file = os.path.join(save_dir, _f)\n break\n print(meta_file)\n return meta_file\n\n\ndef vectorize(text_to_vectorize, character_set):\n temp = np.zeros((1, len(text_to_vectorize), len(character_set)))\n for i, j in enumerate(text_to_vectorize):\n temp[0][i][character_set.index(j)] = 1\n return temp\n\n\ndef main():\n '''Run the script i guess'''\n #build the arguments parser\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--save-dir', type=str, default='saved-checkpoints',\n help='directory with the checkpoints to sample from')\n parser.add_argument('--n', type=int, default='100',\n help='number of character to sample')\n parser.add_argument('--timesteps', type=int, default=50,\n help='timesteps to unravel the graph')\n parser.add_argument('--sampling-type', type=int, default=1,\n help='sampling-type, 0-argmax, 1-exponential')\n parser.add_argument('--temperature', type=float, default=0.9,\n help='temperature for exponential sampling. between 0 & 1') \n args = vars(parser.parse_args())\n\n save_dir = args['save_dir']\n sample_size = args['n']\n timesteps = args['timesteps']\n sampling_type = args['sampling_type']\n temperature = args['temperature']\n\n checkpoint_file = tf.train.latest_checkpoint(save_dir)\n meta_file = get_meta_file_path(save_dir)\n seed_file = os.path.join(save_dir, 'seed.txt')\n seed_data = CharData(seed_file, 1, 10)\n character_set = seed_data.character_set\n all_text = seed_data.random_seed(timesteps)\n with tf.Session() as sess:\n saver = tf.train.import_meta_graph(meta_file)\n saver.restore(sess, checkpoint_file)\n input_ph = tf.get_default_graph().get_tensor_by_name('input_data:0')\n op_to_restore = tf.get_default_graph().get_tensor_by_name(\"output_layer:0\")\n # todo fix this weird progress bar\n with tqdm(total=sample_size) as pb:\n for i in range(sample_size):\n text_input = vectorize(all_text[-timesteps:], character_set)\n out_vec = sess.run(op_to_restore, feed_dict={input_ph:text_input})[0]\n sampled_output = sample(sampling_type, out_vec, temperature=0.9)\n all_text = all_text + character_set[sampled_output]\n pb.update(i)\n print(all_text)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"deeplaying/char_wgu","sub_path":"sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":4062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"28396770531","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\"\"\"\n==================================================\n@project -> file :LHYML -> hw01_org.PY\n@author: Minovo\n@time : 2021/3/11 1:23 AM\n@IDE : PyCharm\n@site : \n@desc : \n==================================================\n\"\"\"\n\n'''load train data train.csv'''\n'''\n全局禁止警告:\nimport warnings\nimport numpy as np\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nprint('x' in np.arange(5)) #returns False, without Warning\n逐行抑制警告.\n\nimport warnings\nimport numpy as np\n\nwith warnings.catch_warnings():\n warnings.simplefilter(action='ignore', category=FutureWarning)\n print('x' in np.arange(2)) #returns False, warning is suppressed\n\nprint('x' in np.arange(10)) #returns False, Throws FutureWarning\n'''\nimport sys\nimport pandas as pd\nimport numpy as np\nimport warnings\nimport math\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n# print('x' in np.arange(5)) #returns False, without Warning\n\ndata = pd.read_csv('./trainsimple.csv', encoding='big5')\n'''clear data '''\n\n'''取需要的數值部分,將 'RAINFALL' 欄位全部補 0。\n另外,如果要在 colab 重覆這段程式碼的執行,請從頭開始執行(把上面的都重新跑一次),以避免跑出不是自己要的結果(若自己寫程式不會遇到,但 colab 重複跑這段會一直往下取資料。意即第一次取原本資料的第三欄之後的資料,第二次取第一次取的資料掉三欄之後的資料,...)。'''\n\ndata = data.iloc[:, 3:]\ndata[data == 'NR'] = 0\nraw_data = data.to_numpy()\n\n'''Extract Features 將原始 5760 * 5 的資料依照每個月分重組成 12 個 18 (features) * 480 (hours) 的資料。'''\n'''Extract Features 3月3天4小时3特征將原始 36 * 3特征 的資料依照每個月分重組成 3 個 3 (features) * 12 (hours:3day*4hours) 的資料。'''\n\nmonth_data = {}\n'''3 month '''\nfor month in range(3):\n '''3feature * 3day * 4hours'''\n sample = np.empty([3, 12])\n '''3day'''\n for day in range(3):\n ''' day * 4hours, 3feature * (3day*month+day '''\n sample[:, day * 4: (day+1) * 4] = raw_data[3 * (3 * month + day): 3 * (3*month + day+1), :]\n month_data[month] = sample\n\n'''Extract Features2 3月3天4小时3特,每個月會有 12hrs,每 2 小時形成一個 data,每個月會有 12-2=10 個 data,故總資料數為 12-2=10 * 3month 筆,\n而每筆 data 有 2 * 3 的 features (一小時 3 個 features * 2 小時)。\n對應的 target 則有 (12-2) * 3 個(第 3 個小時的 PM2.5)\n'''\n''' 3month * 12hrs-2hrs, 3features * 2 hrs'''\nx = np.empty([3 * 10, 3 * 2], dtype=float)\n''' y 3* 1month * (total 12hrs-2hrs) '''\ny = np.empty([3 * 10, 1], dtype=float)\nfor month in range(3):\n for day in range(3):\n for hour in range(4):\n if day == 2 and hour > 1:\n continue\n x[month * 10 + day * 4 +hour, :] = month_data[month][:,day * 4 +hour:day * 4 + hour + 2].reshape(1, -1)\n\n y[month * 10 + day * 4 + hour, 0] = month_data[month][2, day * 4 + hour + 2]\n\n'''Normalize (1)'''\n''' corr standard: X* = (X - E(X)) / (var ** 0.5) , cov(X*, Y*) = corr(X,Y)'''\nmean_x = np.mean(x, axis=0)\nstd_x = np.std(x, axis=0)\n\nprint('***')\n\nfor i in range(len(x)):\n for j in range(len(x[0])):\n if std_x[j] != 0:\n x[i][j] = (x[i][j] - mean_x[j]) / std_x[j]\n'''#**Split Training Data Into \"train_set\" and \"validation_set\"**'''\n'''這部分是針對作業中 report 的第二題、第三題做的簡單示範,以生成比較中用來訓練的 train_set 和不會被放入訓練、\n只是用來驗證的 validation_set。'''\nprint('****')\nx_train_set = x[: math.floor(len(x) * 0.8), :]\ny_train_set = y[: math.floor(len(y) * 0.8), :]\nx_validation = x[math.floor(len(x) * 0.8):, :]\ny_validation = y[math.floor(len(y) * 0.8):, :]\n\n# print(x_train_set)\n# print(y_train_set)\n# print(x_validation)\n# print(y_validation)\nprint(len(x_train_set))\nprint(len(y_train_set))\nprint(len(x_validation))\nprint(len(y_validation))\n\n\n''' 3 feature * 每2 days '''\ndim = 3 * 2 + 1\nw = np.zeros([dim, 1])\nprint('2*' * 50)\n''' 3month * 12-2hrs '''\nx = np.concatenate((np.ones([3 * 10, 1]), x), axis=1).astype(float)\nlearning_rate = 100\niter_time = 5\nadagrad = np.zeros([dim, 1])\neps = 0.0000000001\nfor t in range(iter_time):\n loss = np.sqrt(np.sum(np.power(np.dot(x, w) - y, 2)) / 10 / 3) # rmse\n # if t % 100 == 0:\n # print(str(t) + \":\" + str(loss))\n print('1000000000*' * 20)\n print(loss)\n gradient = 2 * np.dot(x.transpose(), np.dot(x, w) - y) # dim*1\n print('2000000000*' * 20)\n print(gradient)\n # print(y)\n # print(t)\n # print(gradient)\n adagrad += gradient ** 2\n print('3000000000*' * 20)\n print(adagrad)\n w = w - learning_rate * gradient / np.sqrt(adagrad + eps)\n print('4000000000*' * 20)\n print(w)\nnp.save('weightsimple.npy', w)\n\n\n","repo_name":"Minovoo/HYLML","sub_path":"NOVOHW01/hw01_simple.py","file_name":"hw01_simple.py","file_ext":"py","file_size_in_byte":4984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"23679817707","text":"import myPackage as mp\nimport numpy as np\n\n\nchain = mp.lammps.data()\nloop_datafile = '/Users/Shi/Downloads/GSE63525_GM12878_primary+replicate_HiCCUPS_looplist_with_motifs.txt'\nepigenetic_state_datafile = '/Users/Shi/Desktop/LEM/GM12878/wgEncodeBroadHmmGm12878HMM.bed'\nchain.writetofile('LEM_chain','lem_chain_chr5.dat',bond_length=1.13,straight_line=False,\\\n\t loop_filename=loop_datafile, epigenetic_filename = epigenetic_state_datafile, chrom='Chr5',\\\n\t start=145870001,end=157870001,bin_size=1200, data_source = 'Encode_Chrom_State')\n\n\n'''\nchain2 = mp.lammps.data()\nloop_base = [[201,400],[601,800],[1001,1200],[1401,1600]]\nstate1 = np.concatenate((np.arange(1,201), np.arange(601,1001)))\nstate1 = np.concatenate((state1, np.arange(1401,1601)))\nstate2 = np.concatenate((np.arange(201,601), np.arange(1001,1401)))\nstate2 = np.concatenate((state2, np.arange(1601,1801)))\nepigenetic_state = {'state1':state1,'state2':state2}\n\nchain2.writetofile('LEM_chain','lem_chain_manual.dat',bond_length=1.13,straight_line=False,binding_protein=[200,200],loop_base=loop_base,epigenetic_state=epigenetic_state)\n'''","repo_name":"anyuzx/myPackage","sub_path":"examples/make_lem_chain_data.py","file_name":"make_lem_chain_data.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"74819728081","text":"import logging,os\nimport atexit\n\nclass FileSeparatorHandler(logging.FileHandler):\n \"\"\"Custom FileHandler that writes a separator line after each log record.\"\"\"\n\n def emit(self, record):\n \"\"\"Emit a log record and write a separator line.\"\"\"\n super().emit(record)\n\nclass LoggingConfig:\n \"\"\"Logging configuration utility for setting up a logger with FileHandler and StreamHandler.\"\"\"\n\n def __init__(self, config={}):\n \"\"\"\n Initialize the LoggingConfig.\n :param log_level: The desired log level for the logger.\n \"\"\"\n self.file_path=config.get('file_path')\n self.log_level = config.get(\"log_level\")\n self.formatter=config.get('log_formatter')\n\n def configure_logger(self):\n \"\"\"\n Configure the logger with FileHandler and StreamHandler.\n :return: The configured logger object.\n \"\"\"\n self.logger = logging.getLogger(__name__)\n self.logger.setLevel(self.log_level)\n\n # Create a formatter\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - Line %(lineno)d - %(message)s')\n\n # Create a FileHandler with custom FileSeparatorHandler\n file_handler = FileSeparatorHandler(self.file_path)\n file_handler.setLevel(logging.DEBUG)\n file_handler.setFormatter(formatter)\n\n # Create a StreamHandler to display logs on the console\n steam_handler = logging.StreamHandler()\n steam_handler.setLevel(logging.DEBUG)\n steam_handler.setFormatter(formatter)\n\n # Add the handlers to the logger\n self.logger.addHandler(file_handler)\n self.logger.addHandler(steam_handler)\n\n # Register atexit handler to write separator line on program exit\n atexit.register(self._write_separator_line)\n\n return self.logger\n\n def _write_separator_line(self):\n \"\"\"\n Write a separator line to the log file.\n This method is automatically called on program exit.\n \"\"\"\n separator = '-' * 150\n file_handler = next((handler for handler in self.logger.handlers if isinstance(handler, FileSeparatorHandler)),\n None)\n if file_handler:\n file_handler.stream.write(f\"{separator}\\n\")\n\n\nif __name__ == \"__main__\":\n # Create a LoggingConfig instance with log level INFO\n config = LoggingConfig(logging.INFO)\n\n # Configure the logger\n config.configure_logger()\n\n # Get the configured logger object\n logger = config.logger\n\n # Log some entries\n logger.info('Log entry 1')\n logger.info('Log entry 2')\n\n\n","repo_name":"pawan-salve-199/data_quality_tool_","sub_path":"resource/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":2592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"6921903350","text":"\"\"\"\nStore standard root for user page navigation\n\"\"\"\n\nfrom flask import Blueprint, render_template, request\nfrom .models import Track, Score\n\n# define blueprint for flask application\nplaylist = Blueprint('playlist', __name__)\n\n@playlist.route('/create_playlist')\ndef home():\n # render our create_playlist.html in template\n return render_template(\"create_playlist.html\")\n\n@playlist.route('/create_playlist_top', methods=['GET'])\ndef create_playlist_top():\n top_num = request.args.get(\"top\")\n tracks = Track.query.order_by(Track.score.desc()).limit(top_num)\n return render_template(\"create_playlist.html\", tracks=tracks, selection=True)\n\n@playlist.route('/create_playlist_bottom', methods=['GET'])\ndef create_playlist_bottom():\n bottom_num = request.args.get(\"bottom\")\n tracks = Track.query.order_by(Track.score).limit(bottom_num)\n return render_template(\"create_playlist.html\", tracks=tracks, selection=True)\n\n\n\n\n","repo_name":"kluu22/music-catalog","sub_path":"website/playlist.py","file_name":"playlist.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"29335583061","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Sep 22 21:25:41 2020\r\n\r\n@author: LW\r\n\"\"\"\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Sep 20 14:02:32 2020\r\n\r\n@author: LW\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Aug 21 23:01:26 2020\r\n\r\n@author: LW\r\n\"\"\"\r\nimport imageio\r\nfrom osgeo import gdal,gdal_array,osr, ogr\r\nimport numpy as np\r\nimport glob\r\nimport os,sys \r\nfrom skimage import io\r\nimport cv2\r\nfrom numpy import nan as NaN\r\n\r\nvi='NDVI_MAX'\r\nlable_path=r'F:\\工作文件\\论文发表\\葡萄主产区优势对比\\样本数据\\标签数据\\标签Raster\\grape-lable-new.tif'\r\nfile_path=r'F:/工作文件/论文发表/葡萄主产区优势对比/测试数据/20190423_S2A.tif'#+vi+'_2019.tif'\r\nnon_lable_ds_ref_pngpath=r'F:/工作文件/论文发表/葡萄主产区优势对比/测试数据/20190423_S2A.jpg'\r\n\r\ndef getSplitImageAndImageByMutilBands():\r\n# lable=[]\r\n# image=[]\r\n \r\n ####获取非标签原始影像的属性信息\r\n non_lable_ds=gdal.Open(file_path)\r\n ###获取放射变换信息\r\n non_lable_transform = non_lable_ds.GetGeoTransform()\r\n non_lable_xOrigin = non_lable_transform[0]\r\n non_lable_yOrigin = non_lable_transform[3]\r\n non_lable_pixelWidth = non_lable_transform[1]\r\n non_lable_pixelHeight = non_lable_transform[5]\r\n non_lable_cols=non_lable_ds.RasterXSize\r\n non_lable_rows=non_lable_ds.RasterYSize\r\n \r\n non_lable_ds_ref=io.imread(file_path)\r\n print(non_lable_ds_ref.shape)\r\n# non_lable_ds_ref_B234=np.array(non_lable_ds_ref[:,:,0:3],dtype=int)\r\n# imageio.imwrite(non_lable_ds_ref_pngpath, non_lable_ds_ref_B234)\r\n# cv2.imwrite('20190423_S2A.jpg', non_lable_ds_ref_B234)\r\n\r\n non_lable_ds_ref_B2=np.array(non_lable_ds_ref[:,:,0],dtype=float)\r\n non_lable_ds_ref_B2[non_lable_ds_ref_B2==65536]=NaN\r\n# non_lable_ds_ref_B2[non_lable_ds_ref_B2==1]=NaN\r\n non_lable_Max_B2=non_lable_ds_ref_B2[~np.isnan(non_lable_ds_ref_B2)].max()\r\n non_lable_Min_B2=non_lable_ds_ref_B2[~np.isnan(non_lable_ds_ref_B2)].min()\r\n \r\n non_lable_ds_ref_B3=np.array(non_lable_ds_ref[:,:,1],dtype=float)\r\n non_lable_ds_ref_B3[non_lable_ds_ref_B3==65536]=NaN\r\n non_lable_Max_B3=non_lable_ds_ref_B3[~np.isnan(non_lable_ds_ref_B3)].max()\r\n non_lable_Min_B3=non_lable_ds_ref_B3[~np.isnan(non_lable_ds_ref_B3)].min()\r\n \r\n non_lable_ds_ref_B4=np.array(non_lable_ds_ref[:,:,2],dtype=float)\r\n non_lable_ds_ref_B4[non_lable_ds_ref_B4==65536]=NaN\r\n non_lable_Max_B4=non_lable_ds_ref_B4[~np.isnan(non_lable_ds_ref_B4)].max()\r\n non_lable_Min_B4=non_lable_ds_ref_B4[~np.isnan(non_lable_ds_ref_B4)].min()\r\n \r\n non_lable_ds_ref_B8=np.array(non_lable_ds_ref[:,:,3],dtype=float)\r\n non_lable_ds_ref_B8[non_lable_ds_ref_B8==65536]=NaN\r\n non_lable_Max_B8=non_lable_ds_ref_B8[~np.isnan(non_lable_ds_ref_B8)].max()\r\n non_lable_Min_B8=non_lable_ds_ref_B8[~np.isnan(non_lable_ds_ref_B8)].min()\r\n \r\n \r\n del non_lable_ds_ref, non_lable_ds_ref_B2,non_lable_ds_ref_B3,non_lable_ds_ref_B4\r\n ####获取标签影像的属性信息\r\n \r\n lable_ds=gdal.Open(lable_path)\r\n ###获取放射变换信息\r\n lable_transform = lable_ds.GetGeoTransform()\r\n lable_xOrigin = lable_transform[0]\r\n lable_yOrigin = lable_transform[3]\r\n lable_pixelWidth = lable_transform[1]\r\n lable_pixelHeight = lable_transform[5]\r\n lable_cols=lable_ds.RasterXSize\r\n lable_rows=lable_ds.RasterYSize\r\n \r\n# lableimgs=imageio.imread(lable_path)\r\n \r\n ###循环标签TIF的行列进行寻找256*256的标签PNG\r\n labe_counts=0\r\n non_grape_traincountes=0\r\n grape_traincounts=0\r\n non_grapecunts=0 #对非葡萄地块的相片进行计数,10选1\r\n jpgwidth=224\r\n \r\n \r\n rowImageCount=int(non_lable_rows/jpgwidth)\r\n colImageCount=int(non_lable_cols/jpgwidth)\r\n print('行照片数',rowImageCount,'列照片数',colImageCount)\r\n \r\n for r in range(rowImageCount):\r\n for c in range(colImageCount):\r\n print('导出第',r,'行',c,'列')\r\n ###获取非标签位置\r\n non_lable_xOffset = c*jpgwidth\r\n non_lable_yOffset = r*jpgwidth\r\n \r\n \r\n non_lableArray_B2=non_lable_ds.GetRasterBand(1).ReadAsArray(non_lable_xOffset,non_lable_yOffset,jpgwidth,jpgwidth)\r\n non_lableArray_B2 = (non_lableArray_B2-non_lable_Min_B2)*255/(non_lable_Max_B2-non_lable_Min_B2) # (矩阵元素-最小值)/(最大值-最小值) \r\n non_lableArray_B2[non_lableArray_B2>255]=255\r\n \r\n non_lableArray_B3=non_lable_ds.GetRasterBand(2).ReadAsArray(non_lable_xOffset,non_lable_yOffset,jpgwidth,jpgwidth)\r\n non_lableArray_B3 = (non_lableArray_B3-non_lable_Min_B3)*255/(non_lable_Max_B3-non_lable_Min_B3) # (矩阵元素-最小值)/(最大值-最小值) \r\n non_lableArray_B3[non_lableArray_B3>255]=255\r\n \r\n non_lableArray_B4=non_lable_ds.GetRasterBand(3).ReadAsArray(non_lable_xOffset,non_lable_yOffset,jpgwidth,jpgwidth)\r\n non_lableArray_B4 = (non_lableArray_B4-non_lable_Min_B4)*255/(non_lable_Max_B4-non_lable_Min_B4) # (矩阵元素-最小值)/(最大值-最小值) \r\n non_lableArray_B4[non_lableArray_B4>255]=255\r\n \r\n non_lableArray_B8=non_lable_ds.GetRasterBand(3).ReadAsArray(non_lable_xOffset,non_lable_yOffset,jpgwidth,jpgwidth)\r\n non_lableArray_B8 = (non_lableArray_B8-non_lable_Min_B8)*255/(non_lable_Max_B8-non_lable_Min_B8) # (矩阵元素-最小值)/(最大值-最小值) \r\n non_lableArray_B8[non_lableArray_B8>255]=255\r\n# non_lableArray_B8=non_lable_ds.GetRasterBand(4).ReadAsArray(non_lable_xOffset,non_lable_yOffset,512,512)\r\n# non_lableArray = (non_lableArray-non_lable_Min)*255/(non_lable_Max-non_lable_Min) # (矩阵元素-最小值)/(最大值-最小值) \r\n# \r\n non_lableArray = np.zeros((jpgwidth,jpgwidth,3))\r\n# ####导入R,G,B三个波段\r\n# non_lableArray[:,:,0]=non_lableArray_B4\r\n# non_lableArray[:,:,1]=non_lableArray_B3\r\n# non_lableArray[:,:,2]=non_lableArray_B2\r\n \r\n# ####导入NIR,R,G三个波段\r\n# non_lableArray[:,:,0]=non_lableArray_B8\r\n# non_lableArray[:,:,1]=non_lableArray_B4\r\n# non_lableArray[:,:,2]=non_lableArray_B3\r\n \r\n ####导入NIR,G,B三个波段\r\n non_lableArray[:,:,0]=non_lableArray_B4\r\n non_lableArray[:,:,1]=non_lableArray_B3\r\n non_lableArray[:,:,2]=non_lableArray_B2\r\n \r\n non_lableArray =np.array(non_lableArray,dtype=int)\r\n print(non_lableArray.shape)\r\n \r\n ###获得标签的256*256的数组\r\n lable_x=non_lable_xOffset*non_lable_pixelWidth+non_lable_xOrigin\r\n lable_y=non_lable_yOffset*non_lable_pixelHeight+non_lable_yOrigin\r\n \r\n lable_xOffset = int((lable_x-lable_xOrigin)/lable_pixelWidth)\r\n lable_yOffset = int((lable_y-lable_yOrigin)/lable_pixelHeight)\r\n \r\n lableArray=lable_ds.GetRasterBand(1).ReadAsArray(lable_xOffset,lable_yOffset,jpgwidth,jpgwidth)\r\n# print(lableArray)\r\n \r\n count=np.sum(lableArray == 1)\r\n scale=count/(jpgwidth*jpgwidth)\r\n \r\n if scale>0:\r\n save_non_grape=r'H:/gansu\\wuwei/DataSet/WaitClassificationImage/'+str(r)+'-'+str(c)+'-'+'1'+'.png'\r\n cv2.imwrite(save_non_grape, non_lableArray)\r\n save_label_grape=r'H:/gansu\\wuwei/DataSet/WaitClassificationImage/mask/'+str(r)+'-'+str(c)+'-'+'1'+'-mask.png'\r\n cv2.imwrite(save_label_grape, lableArray)\r\n else:\r\n save_non_grape=r'H:/gansu\\wuwei/DataSet/WaitClassificationImage/'+str(r)+'-'+str(c)+'-'+'0'+'.png'\r\n cv2.imwrite(save_non_grape, non_lableArray)\r\n \r\n \r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\": \r\n \r\n ###定义工作空间\r\n os.chdir(r'H:\\gansu\\wuwei\\DataSet')\r\n \r\n getSplitImageAndImageByMutilBands()\r\n \r\n print('complete!')\r\n\r\n","repo_name":"devilweil/CNN-Image-Segmentation-ResNet","sub_path":"SplitImageToClassification.py","file_name":"SplitImageToClassification.py","file_ext":"py","file_size_in_byte":8133,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"}
+{"seq_id":"23676772630","text":"import cv2\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom IPython.display import Image\r\n\r\n#split the image into the B,G,R components\r\nimg_NZ_bgr = cv2.imread(\"New_Zealand_Lake.jpg\",cv2.IMREAD_COLOR)\r\nb,g,r = cv2.split(img_NZ_bgr)\r\n\r\n#show the channels\r\nplt.figure(figsize=[20,5])\r\nplt.subplot(141);plt.imshow(r,cmap='gray');plt.title(\"Red Channel\");\r\nplt.subplot(142);plt.imshow(g,cmap='gray');plt.title(\"Green Channel\");\r\nplt.subplot(143);plt.imshow(b,cmap='gray');plt.title(\"Blue Channel\");\r\n\r\n#merge the individual channels into a BGR image\r\nimgMerged = cv2.merge((b,g,r))\r\n\r\n#show the merged output\r\nplt.subplot(144);plt.imshow(imgMerged[:,:,::-1]);plt.title(\"Merged Output\");\r\n\r\n#openCV stores color channels in a differnet order than most other applications (BGR vs RGB).\r\nimg_NZ_rgb = cv2.cvtColor(img_NZ_bgr, cv2.COLOR_BGR2RGB)\r\nimg_hsv = cv2.cvtColor(img_NZ_bgr, cv2.COLOR_BGR2HSV)\r\n\r\n#split the image into the H,S,V components\r\nh,s,v = cv2.split(img_hsv)\r\n\r\n#show the channels\r\nplt.figure(figsize=[20,5])\r\nplt.subplot(141);plt.imshow(h,cmap='gray');plt.title(\"H Channel\");\r\nplt.subplot(142);plt.imshow(s,cmap='gray');plt.title(\"S Channel\");\r\nplt.subplot(143);plt.imshow(v,cmap='gray');plt.title(\"V Channel\");\r\nplt.subplot(144);plt.imshow(img_NZ_rgb);plt.title(\"Original\");\r\n\r\n#increase hue by 10\r\nh_new = h+10\r\nimg_NZ_merged = cv2.merge((h_new,s,v))\r\nimg_NZ_rgb = cv2.cvtColor(img_NZ_merged, cv2.COLOR_HSV2RGB)\r\n\r\n#show the channels\r\nplt.figure(figsize=[20,5])\r\nplt.subplot(141);plt.imshow(h,cmap='gray');plt.title(\"H Channel\");\r\nplt.subplot(142);plt.imshow(s,cmap='gray');plt.title(\"S Channel\");\r\nplt.subplot(143);plt.imshow(v,cmap='gray');plt.title(\"V Channel\");\r\nplt.subplot(144);plt.imshow(img_NZ_rgb);plt.title(\"Modified\");\r\n\r\n#save the image\r\ncv2.imwrite(\"New_Zealand_Lake_SAVED.png\", img_NZ_bgr)\r\n\r\nImage(filename=\"New_Zealand_Lake_SAVED.png\")\r\n\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"mkw9665/OpenCV_colour_channels","sub_path":"OpenCV_colour_channels/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"2366993512","text":"import openpyxl\r\nimport datetime\r\nimport os\r\nimport pyminizip\r\nimport lxml.etree as etree\r\n\r\n\r\ndef procXL(zip_path, xlsx_file, tempdir, bnkPass):\r\n workbook = openpyxl.load_workbook(filename=xlsx_file, data_only=True)\r\n # check that the required sheets are in the Excel File\r\n setXLFiles = set(workbook.sheetnames)\r\n if not {'Header Record', 'Payment Information Record', 'Credit Instruction Record', 'Control',\r\n 'Control Data (Hidden)'}.issubset(setXLFiles):\r\n\r\n critical_err = 'The XL File is not structured properly'\r\n print (critical_err)\r\n input('Press Enter to terminate.')\r\n raise Exception(critical_err)\r\n\r\n # Build the XML document\r\n nsmap = {\r\n 'xsi': \"http://www.w3.org/2001/XMLSchema-instance\",\r\n None: \"urn:iso:std:iso:20022:tech:xsd:pain.001.001.03\"\r\n }\r\n root = etree.Element('Document', nsmap=nsmap)\r\n\r\n # Fill in the Computed MsgId and PmtInfld to (necessary if the user leaves these blank)\r\n sh = workbook['Control Data (Hidden)']\r\n computedMsgId = sh['B18'].value\r\n computedPmtInfld = sh['B20'].value\r\n\r\n CstmrCdtTrfInitn = etree.SubElement(root, 'CstmrCdtTrfInitn')\r\n\r\n # Header Record\r\n CstmrCdtTrfInitn = bldHeader(CstmrCdtTrfInitn, computedMsgId, workbook)\r\n\r\n # This tag covers both PIR and CIR sections\r\n PmtInf = etree.SubElement(CstmrCdtTrfInitn, \"PmtInf\")\r\n\r\n # Payment Information Record\r\n PmtInf = bldPIR(PmtInf, computedPmtInfld, workbook)\r\n\r\n # Credit Instruction Record\r\n PmtInf = bldCIR(PmtInf, workbook)\r\n\r\n datastr = etree.tostring(root, xml_declaration=True, encoding='utf-8', pretty_print=True)\r\n\r\n # Get the name of the XML file that will store the transactions\r\n sh = workbook['Control']\r\n xmlFile = sh['B2'].value\r\n if xmlFile is None or xmlFile.strip() == '':\r\n critical_err = 'Invalid SCT file name'\r\n print (critical_err)\r\n input('Press Enter to terminate.')\r\n raise Exception(critical_err)\r\n\r\n srcFile = xmlFile.strip() + \".SCT\"\r\n fileSCT = os.path.join(tempdir, srcFile)\r\n try:\r\n with open(fileSCT, 'wb') as file:\r\n file.write(datastr)\r\n except:\r\n critical_err = 'Unable to create SCT file'\r\n print('\\n\\n' + critical_err+ '\\n\\n')\r\n input('Press Enter to terminate.')\r\n raise Exception(critical_err)\r\n\r\n # package everything in the zip file\r\n # in web interface replace C:\\Temp with tempdir as the file will be emailed\r\n zipSCTE = os.path.join(zip_path, xmlFile.strip() + \".SCTE\")\r\n pyminizip.compress(fileSCT, None, zipSCTE, bnkPass, 0)\r\n\r\n\r\ndef bldCIR(PmtInf, workbook):\r\n sh = workbook['Credit Instruction Record']\r\n\r\n row = 5\r\n lstEndToEndId = []\r\n # list stores the sEndToEndId values. If there are duplicate entries raises an exception - ACB 202309\r\n while row < 200:\r\n sInstrId = sh['A' + str(row)].value\r\n if sInstrId is None:\r\n break\r\n\r\n result = bldCIRrow(sh, PmtInf, workbook, row)\r\n PmtInf = result[0]\r\n sEndToEndId = result[1]\r\n\r\n if sEndToEndId in lstEndToEndId:\r\n critical_err = 'EndToEndId {0} has been already used in this batch'.format(sEndToEndId)\r\n print('\\n\\n' + critical_err+ '\\n\\n')\r\n input('Press Enter to terminate.')\r\n raise Exception(critical_err)\r\n else:\r\n lstEndToEndId.append(sEndToEndId)\r\n \r\n row += 2\r\n\r\n return PmtInf\r\n\r\n\r\ndef bldCIRrow(sh, PmtInf, workbook, row):\r\n # Process the particular row\r\n\r\n # Read the Fields from this worksheet\r\n sInstrId = sh['A' + str(row)].value.strip()\r\n sEndToEndId = sh['B' + str(row)].value.strip()\r\n sCcy = sh['C' + str(row)].value.strip()\r\n sInstdAmt = '{0:.2f}'.format(sh['D' + str(row)].value)\r\n sBIC = sh['E' + str(row)].value.strip()\r\n sNm = sh['F' + str(row)].value.strip()\r\n sAdrLine1 = sh['G5'].value\r\n if sAdrLine1 is None:\r\n sAdrLine1 = ''\r\n else:\r\n sAdrLine1 = sAdrLine1.strip()\r\n sAdrLine2 = sh['H5'].value\r\n if sAdrLine2 is None:\r\n sAdrLine2 = ''\r\n else:\r\n sAdrLine2 = sAdrLine2.strip()\r\n if sAdrLine1 == '':\r\n sAdrLine1 = sAdrLine2\r\n sIBAN = sh['I' + str(row)].value.strip()\r\n sCd = sh['J' + str(row)].value.strip()\r\n sUstrd = sh['K' + str(row)].value.strip()\r\n\r\n CdtTrfTxInf = etree.SubElement(PmtInf, \"CdtTrfTxInf\")\r\n PmtId = etree.SubElement(CdtTrfTxInf, \"PmtId\")\r\n InstrId = etree.SubElement(PmtId, \"InstrId\")\r\n InstrId.text = sInstrId\r\n EndToEndId = etree.SubElement(PmtId, \"EndToEndId\")\r\n EndToEndId.text = sEndToEndId\r\n Amt = etree.SubElement(CdtTrfTxInf, \"Amt\")\r\n InstdAmt = etree.SubElement(Amt, \"InstdAmt\")\r\n InstdAmt.set('Ccy', sCcy)\r\n InstdAmt.text = sInstdAmt\r\n CdtrAgt = etree.SubElement(CdtTrfTxInf, \"CdtrAgt\")\r\n FinInstnId = etree.SubElement(CdtrAgt, \"FinInstnId\")\r\n BIC = etree.SubElement(FinInstnId, \"BIC\")\r\n BIC.text = sBIC\r\n Cdtr = etree.SubElement(CdtTrfTxInf, \"Cdtr\")\r\n Nm = etree.SubElement(Cdtr, \"Nm\")\r\n Nm.text = sNm\r\n # Only fill in the subnodes if the address lines are not blank\r\n if sAdrLine1 != \"\":\r\n PstlAdr = etree.SubElement(Cdtr, \"PstlAdr\")\r\n AdrLine1 = etree.SubElement(PstlAdr, \"AdrLine\")\r\n AdrLine1.text = sAdrLine1\r\n AdrLine2 = etree.SubElement(PstlAdr, \"AdrLine\")\r\n AdrLine2.text = sAdrLine2\r\n CdtrAcct = etree.SubElement(CdtTrfTxInf, \"CdtrAcct\")\r\n Id = etree.SubElement(CdtrAcct, \"Id\")\r\n IBAN = etree.SubElement(Id, \"IBAN\")\r\n IBAN.text = sIBAN\r\n Purp = etree.SubElement(CdtTrfTxInf, \"Purp\")\r\n Cd = etree.SubElement(Purp, \"Cd\")\r\n Cd.text = sCd\r\n RmtInf = etree.SubElement(CdtTrfTxInf, \"RmtInf\")\r\n Ustrd = etree.SubElement(RmtInf, \"Ustrd\")\r\n Ustrd.text = sUstrd\r\n\r\n return PmtInf, sEndToEndId\r\n\r\n\r\ndef bldPIR(PmtInf, computedPmtInfld, workbook):\r\n sh = workbook['Payment Information Record']\r\n\r\n # Read the Fields from this worksheet\r\n sPmtInfId = sh['A5'].value\r\n if sPmtInfId is None:\r\n sPmtInfId = computedPmtInfld\r\n sPmtInfId = sPmtInfId.strip()\r\n # Cechk for a space condition\r\n if sPmtInfId == '':\r\n sPmtInfId = computedPmtInfld.strip()\r\n\r\n sPmtMtd = sh['B5'].value.strip()\r\n sBtchBookg = sh['C5'].value.strip()\r\n sNbOfTxs = str(int(sh['D5'].value))\r\n sCtrlSum = '{0:.2f}'.format(sh['E5'].value)\r\n sCd = sh['F5'].value.strip()\r\n sReqdExctnDt = sh['G5'].value\r\n sReqdExctnDt = datetime.datetime.strftime(sReqdExctnDt, '%Y-%m-%d')\r\n sNm = sh['H5'].value.strip()\r\n sAdrLine1 = sh['I5'].value\r\n if sAdrLine1 is None:\r\n sAdrLine1 = ''\r\n else:\r\n sAdrLine1 = sAdrLine1.strip()\r\n sAdrLine2 = sh['J5'].value\r\n if sAdrLine2 is None:\r\n sAdrLine2 = ''\r\n else:\r\n sAdrLine2 = sAdrLine2.strip()\r\n if sAdrLine1 == '':\r\n sAdrLine1 = sAdrLine2\r\n sIBAN = sh['K5'].value.strip()\r\n sCcy = sh['L5'].value.strip()\r\n sBIC = sh['M5'].value.strip()\r\n\r\n PmtInfId = etree.SubElement(PmtInf, \"PmtInfId\")\r\n PmtInfId.text = sPmtInfId\r\n PmtMtd = etree.SubElement(PmtInf, \"PmtMtd\")\r\n PmtMtd.text = sPmtMtd\r\n BtchBookg = etree.SubElement(PmtInf, \"BtchBookg\")\r\n BtchBookg.text = sBtchBookg\r\n NbOfTxs = etree.SubElement(PmtInf, \"NbOfTxs\")\r\n NbOfTxs.text = sNbOfTxs\r\n CtrlSum = etree.SubElement(PmtInf, \"CtrlSum\")\r\n CtrlSum.text = sCtrlSum\r\n PmtTpInf = etree.SubElement(PmtInf, \"PmtTpInf\")\r\n SvcLvl = etree.SubElement(PmtTpInf, \"SvcLvl\")\r\n Cd = etree.SubElement(SvcLvl, \"Cd\")\r\n Cd.text = sCd\r\n ReqdExctnDt = etree.SubElement(PmtInf, \"ReqdExctnDt\")\r\n ReqdExctnDt.text = sReqdExctnDt\r\n Dbtr = etree.SubElement(PmtInf, \"Dbtr\")\r\n Nm = etree.SubElement(Dbtr, \"Nm\")\r\n Nm.text = sNm\r\n # Only fill in the subnodes if the address lines are not blank\r\n if sAdrLine1 != \"\":\r\n PstlAdr = etree.SubElement(Dbtr, \"PstlAdr\")\r\n AdrLine1 = etree.SubElement(PstlAdr, \"AdrLine\")\r\n AdrLine1.text = sAdrLine1\r\n # Only fill if Address line 2 is not null\r\n if sAdrLine2 != \"\":\r\n AdrLine2 = etree.SubElement(PstlAdr, \"AdrLine\")\r\n AdrLine2.text = sAdrLine2\r\n DbtrAcct = etree.SubElement(PmtInf, \"DbtrAcct\")\r\n Id = etree.SubElement(DbtrAcct, \"Id\")\r\n IBAN = etree.SubElement(Id, \"IBAN\")\r\n IBAN.text = sIBAN\r\n Ccy = etree.SubElement(DbtrAcct, \"Ccy\")\r\n Ccy.text = sCcy\r\n DbtrAgt = etree.SubElement(PmtInf, \"DbtrAgt\")\r\n FinInstnId = etree.SubElement(DbtrAgt, \"FinInstnId\")\r\n BIC = etree.SubElement(FinInstnId, \"BIC\")\r\n BIC.text = sBIC\r\n\r\n return PmtInf\r\n\r\n\r\ndef bldHeader(CstmrCdtTrfInitn, computedMsgId, workbook):\r\n sh = workbook['Header Record']\r\n\r\n # Read the Fields from this worksheet\r\n sMsgId = sh['A5'].value\r\n if sMsgId is None:\r\n sMsgId = computedMsgId\r\n sMsgId = sMsgId.strip()\r\n # check for a space condition\r\n if sMsgId == '':\r\n sMsgId = computedMsgId.strip()\r\n\r\n sCreDtTm = str(sh['B5'].value)\r\n # cater for different formats with microseconds and without\r\n try:\r\n sCreDtTm = datetime.datetime.strptime(sCreDtTm, \"%Y-%m-%d %H:%M:%S.%f\").replace(microsecond=0).isoformat()\r\n except:\r\n sCreDtTm = datetime.datetime.strptime(sCreDtTm, \"%Y-%m-%d %H:%M:%S\").isoformat()\r\n\r\n sNbOfTxs = str(int(sh['C5'].value))\r\n sCtrlSum = '{0:.2f}'.format(sh['D5'].value)\r\n sNm = sh['E5'].value.strip()\r\n sId = sh['F5'].value.strip()\r\n\r\n GrpHdr = etree.SubElement(CstmrCdtTrfInitn, \"GrpHdr\")\r\n MsgId = etree.SubElement(GrpHdr, \"MsgId\")\r\n MsgId.text = sMsgId\r\n CreDtTm = etree.SubElement(GrpHdr, \"CreDtTm\")\r\n CreDtTm.text = sCreDtTm\r\n NbOfTxs = etree.SubElement(GrpHdr, \"NbOfTxs\")\r\n NbOfTxs.text = sNbOfTxs\r\n CtrlSum = etree.SubElement(GrpHdr, \"CtrlSum\")\r\n CtrlSum.text = sCtrlSum\r\n InitgPty = etree.SubElement(GrpHdr, \"InitgPty\")\r\n Nm = etree.SubElement(InitgPty, \"Nm\")\r\n Nm.text = sNm\r\n Id1 = etree.SubElement(InitgPty, \"Id\")\r\n OrgId = etree.SubElement(Id1, \"OrgId\")\r\n Othr = etree.SubElement(OrgId, \"Othr\")\r\n Id2 = etree.SubElement(Othr, \"Id\")\r\n Id2.text = sId\r\n\r\n return CstmrCdtTrfInitn\r\n","repo_name":"chribonn/bnkSEPA","sub_path":"procXlsx.py","file_name":"procXlsx.py","file_ext":"py","file_size_in_byte":10188,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"35476608346","text":"import json\nimport os\nimport codecs\n\n\nclass SingleCharacter:\n\n def __init__(self, mod_file_path: str):\n self.file_path = mod_file_path\n self.root_dir = os.path.dirname(self.file_path)\n # load json file\n with open(self.file_path, 'r') as f:\n self.data = json.load(f, strict=False)\n\n self.aic = self.data['AIC']\n self.aiv = None\n self.aiv_base_dir = None\n self.troops = None\n self.assets = None\n self.assets_speech_dir = None\n self.assets_binks_dir = None\n if 'AIV' in self.data.keys():\n self.aiv = self.data['AIV']\n self.aiv_base_dir = os.path.join(self.root_dir, self.aiv['base_dir'])\n if 'Troops' in self.data.keys():\n self.troops = self.data['Troops']\n if 'Assets' in self.data.keys():\n self.assets = self.data['Assets']\n self.assets_speech_dir = os.path.join(self.root_dir, self.assets['base_dir'], self.assets['Speech']['base_dir'])\n self.assets_binks_dir = os.path.join(self.root_dir, self.assets['base_dir'], self.assets['Binks']['base_dir'])\n\n def get_num_speech_assets(self):\n fx_files = [item for sublist in self.assets['Speech'].values() for item in sublist]\n return len(fx_files)\n\n def get_num_binks_assets(self):\n bik_files = [item for sublist in self.assets['Binks'].values() for item in sublist]\n return len(bik_files)\n\n\nclass BaseMod:\n\n def __init__(self,\n base_file_path: str,\n vanilla_path: str):\n self.base_file_path = base_file_path\n self.base_dir = os.path.dirname(self.base_file_path)\n self.vanilla_path = vanilla_path\n with open(self.base_file_path, 'r') as f:\n self.base_file = json.load(f, strict=False)\n self.aic = None\n self.troops = None\n self.aiv = None\n self.assets = None\n self.load_files()\n\n def load_files(self) -> None:\n \"\"\"\n load all the relevant files from base mod config file\n \"\"\"\n # Load AIC file\n if self.base_file['AIC']:\n with codecs.open(os.path.join(self.base_dir, self.base_file['AIC']), 'r', 'utf-8-sig') as f:\n self.aic = json.load(f, strict=False)\n else:\n # load Vanilla AIC\n with open(os.path.join(self.vanilla_path, 'vanilla_aic.json'), 'r') as f:\n self.aic = json.load(f, strict=False)\n\n # Load Troops file\n if self.base_file['Troops']:\n with codecs.open(os.path.join(self.base_dir, self.base_file['Troops']), 'r', 'utf-8-sig') as f:\n self.troops = json.load(f, strict=False)\n else:\n # load Vanilla Troops\n with open(os.path.join(self.vanilla_path, 'vanilla_troops.json'), 'r') as f:\n self.troops = json.load(f, strict=False)\n\n # Load AIV file\n if self.base_file['AIV']:\n with open('char_aiv.json', 'r') as f:\n self.aiv = json.load(f, strict=False)\n for char in self.aiv.keys():\n for item in self.aiv[char].items():\n self.aiv[char][item[0]] = os.path.join(os.path.abspath(self.base_file['AIV']), item[1])\n else:\n # load Vanilla AIV\n with open(os.path.join(self.vanilla_path, 'vanilla_aiv.json'), 'r') as f:\n self.aiv = json.load(f, strict=False)\n for char in self.aiv.keys():\n for item in self.aiv[char].items():\n self.aiv[char][item[0]] = os.path.join(self.vanilla_path, item[1])\n\n # Load Assets file\n if self.base_file['Assets']:\n with open(os.path.join(self.base_dir, self.base_file['Assets']), 'r') as f:\n self.assets = json.load(f, strict=False)\n else:\n # load Vanilla Assets\n with open(os.path.join(self.vanilla_path, 'vanilla_assets.json'), 'r') as f:\n self.assets = json.load(f, strict=False)\n for char in self.assets.keys():\n for item in self.assets[char]['Speech'].items():\n self.assets[char]['Speech'][item[0]] = os.path.join(self.vanilla_path, 'assets', item[1])\n for item in self.assets[char]['Binks'].items():\n self.assets[char]['Binks'][item[0]] = os.path.join(self.vanilla_path, 'assets', item[1])\n\n def update_character(self, new_char: SingleCharacter, replace_char: str = 'Wazir') -> bool:\n \"\"\"\n Update characters\n :param new_char:\n :param replace_char:\n :return: showing update success\n \"\"\"\n\n characters = ['Caliph', 'Frederick', 'Pig', 'Phillip', 'Richard', 'Rat', 'Saladin', 'Sheriff', 'Snake',\n 'Sultan', 'Wolf', 'Abbot', 'Marshall', 'Nizar', 'Emir', 'Wazir']\n if replace_char not in characters:\n raise Exception('Invalid standard character name given for update')\n\n # update AIC\n # get index\n idx = [i for i in range(len(self.aic['AICharacters'])) if self.aic['AICharacters'][i]['Name'] == replace_char][0]\n # add Custom Name\n self.aic['AICharacters'][idx]['CustomName'] = new_char.aic['CustomName']\n self.aic['AICharacters'][idx]['Personality'] = new_char.aic['Personality']\n\n # update Troops\n if new_char.troops:\n # get index\n idx = [i for i in range(1, 17) if self.troops[str(i)]['Name'] == replace_char][0]\n # add custom troops configuration\n self.troops[str(idx)]['Lord'] = new_char.troops['Lord']\n self.troops[str(idx)]['normal'] = new_char.troops['normal']\n self.troops[str(idx)]['crusader'] = new_char.troops['crusader']\n self.troops[str(idx)]['deathmatch'] = new_char.troops['deathmatch']\n\n # update AIV\n if new_char.aiv:\n for i, item in enumerate(self.aiv[replace_char].items()):\n self.aiv[replace_char][item[0]] = os.path.join(os.path.abspath(new_char.aiv_base_dir),\n new_char.aiv[str(i+1)])\n\n # Update Assets\n if new_char.assets:\n new_speech_dict = dict.fromkeys(self.assets[replace_char]['Speech'], \"\")\n new_binks_dict = dict.fromkeys(self.assets[replace_char]['Binks'], \"\")\n special_character = False\n if self.assets[replace_char]['SpecialCharacter']:\n # special character (Pig, Rat, Snake, Wolf) -> different speech files (only 22 files)\n special_character = True\n\n # Update speech file paths\n with open('char_speech.json', 'r') as f:\n sp_info = json.load(f)\n for item in new_char.assets['Speech']['actions'].items():\n key_pre = sp_info[replace_char]['prefix'] + '_' + item[0]\n if item[1]:\n if special_character and 'player' in item[0]:\n keys = [key_pre for i\n in range(len(item[1][:sp_info[replace_char]['actions'][item[0]]]))]\n else:\n keys = [key_pre + '_' + str(i + 1).zfill(2) for i\n in range(len(item[1][:sp_info[replace_char]['actions'][item[0]]]))]\n\n for i, k in enumerate(keys):\n new_speech_dict[k] = os.path.join(new_char.assets_speech_dir, item[1][i])\n self.assets[replace_char]['Speech'] = new_speech_dict\n\n # Update binks videos\n with open('char_binks.json', 'r') as f:\n bik_info = json.load(f)\n for item in new_char.assets['Binks']['actions'].items():\n if item[1]:\n action = item[0]\n # anger or angry?\n if item[0] == 'anger':\n if bik_info[replace_char]['actions']['anger'] == 0 and bik_info[replace_char]['actions']['angry'] == 1:\n action = 'angry'\n # taunt or taunting?\n elif item[0] == 'taunt':\n if bik_info[replace_char]['actions']['taunt'] == 0 and bik_info[replace_char]['actions']['taunting'] == 1:\n action = 'taunting'\n # create the keys\n if bik_info[replace_char]['numerate_files']:\n keys = [bik_info[replace_char]['prefix'] + '_' + action + str(i+1)\n for i in range(len(item[1][:bik_info[replace_char]['actions'][item[0]]]))]\n else:\n keys = [bik_info[replace_char]['prefix'] + '_' + action\n for i in range(len(item[1][:bik_info[replace_char]['actions'][item[0]]]))]\n for i, k in enumerate(keys):\n new_binks_dict[k] = os.path.join(new_char.assets_binks_dir, item[1][i])\n self.assets[replace_char]['Binks'] = new_binks_dict\n\n return True\n\n def save_base_mod(self, output_dir: str, output_name: str = 'my_mod') -> None:\n # save aic file\n with open(os.path.join(output_dir, output_name + '_aic.json'), 'w') as f:\n json.dump(self.aic, f)\n\n # save troops file\n with open(os.path.join(output_dir, output_name + '_troops.json'), 'w') as f:\n json.dump(self.troops, f)\n\n # save aiv file\n with open(os.path.join(output_dir, output_name + '_aiv.json'), 'w') as f:\n json.dump(self.aiv, f)\n\n # save assets file\n with open(os.path.join(output_dir, output_name + '_assets.json'), 'w') as f:\n json.dump(self.assets, f)\n\n","repo_name":"NMme/CrusaderAIManager","sub_path":"data_classes.py","file_name":"data_classes.py","file_ext":"py","file_size_in_byte":9791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"21189751964","text":"import sqlite3\nfrom datetime import datetime, date\nfrom sqlite3 import Error\nimport os\n\ndef conexao_banco():\n dir = os.path.dirname(__file__)\n caminho = f\"{dir}/sistema_votacao.db\"\n con = None\n try:\n con = sqlite3.connect(caminho)\n return con\n except Error as error:\n print(error)\n\ndef inserir(insert):\n try:\n con = conexao_banco()\n cursor = con.cursor()\n cursor.execute(insert)\n con.commit()\n con.close()\n print(\"Inserido com sucesso\")\n except Error as error:\n print(error)\n\ndef atualizar(update):\n try:\n con = conexao_banco()\n cursor = con.cursor()\n cursor.execute(update)\n con.commit()\n con.close()\n print(\"Atualizado com sucesso\")\n except Error as error:\n print(error)\n\ndef deletar(delete):\n try:\n con = conexao_banco()\n cursor = con.cursor()\n cursor.execute(delete)\n con.commit()\n con.close()\n print(\"Removido com sucesso\")\n except Error as error:\n print(error)\n\ndef consultar(consultar):\n try:\n con = conexao_banco()\n cursor = con.cursor()\n cursor.execute(consultar)\n valores = cursor.fetchall()\n con.close()\n return valores\n except Error as error:\n print(error)\n\ndef consultar_cargos(consultar):\n try:\n con = conexao_banco()\n cursor = con.cursor()\n cursor.execute(consultar)\n dados = cursor.fetchall()\n dados = \" \".join(\"\".join(var) for var in dados)\n return dados\n except Error as error:\n print(error)\n\ndef consultar_candidatos(consultar):\n try:\n con = conexao_banco()\n cursor = con.cursor()\n cursor.execute(consultar)\n dados = cursor.fetchall()\n if str(dados) == \"[(None,)]\":\n pass\n else:\n dados = \" \".join(\"\".join(var) for var in dados)\n caracteres = '\"[]'\n subcaracter = \"'\"\n for i in range(len(caracteres)):\n dados = dados.replace(caracteres[i],\"\")\n dados = dados.replace(subcaracter, \"\")\n return dados\n except Error as error:\n print(error)\n\ndef consultar_cpf(consultar):\n try:\n con = conexao_banco()\n cursor = con.cursor()\n cursor.execute(consultar)\n valores = cursor.fetchall()\n #for i in valores:\n #print(i[0])\n con.close()\n return valores\n except Error as error:\n print(error)\n\ndef atualizar_data(update):\n try:\n con = conexao_banco()\n cursor = con.cursor()\n cursor.execute(update)\n con.commit()\n con.close()\n print(\"Atualizado com sucesso\")\n return True\n except Error as error:\n print(error)\n\n\n#data = datetime.today().date().strftime('%d-%m-%Y')\n#print(data)\n\n#query = f'SELECT data_inicio FROM eleicao WHERE data_inicio < \"{data}\";'\n#consultar_data(query)\n\n# query = 'INSERT INTO usuario (\"nome\", \"user_name\", \"senha\", \"tipo\", \"status\") VALUES (\"Elias de Oliveira Cacau\", \"EliasCacau\", \"123\", \"Usuário\", 0);'\n# query = 'INSERT INTO candidato (\"nome\", \"num_candidato\", \"votos\") VALUES (\"Elias de Oliveira Cacau\", \"4002\", 0);'\n# inserir(query)\n\n# set = f'UPDATE candidato SET votos=\"{votos}\" WHERE num_candidato LIKE 4002;'\n# atualizar(set)\n\n# delete = 'DELETE FROM usuario WHERE id=2;'\n# deletar(delete)\n\n\n#show = 'SELECT user_name, senha FROM usuario;'\n#consultar(query)","repo_name":"EliasCacau/sistema-votacao","sub_path":"banco_de_dados.py","file_name":"banco_de_dados.py","file_ext":"py","file_size_in_byte":3479,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"37949116552","text":"\"\"\"empty message\n\nRevision ID: 556834bc19db\nRevises: 3765da66361a\nCreate Date: 2022-02-28 18:16:23.839211\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"556834bc19db\"\ndown_revision = \"3765da66361a\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column(\"units\", sa.Column(\"tenant\", sa.String(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column(\"units\", \"tenant\")\n # ### end Alembic commands ###\n","repo_name":"epam/badgerdoc","sub_path":"scheduler/alembic/versions/556834bc19db_.py","file_name":"556834bc19db_.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"3"}
+{"seq_id":"18129223455","text":"# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution(object):\n def constructMaximumBinaryTree(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: TreeNode\n \"\"\"\n _max = -1\n for i in nums:\n if i > _max:\n _max = i\n\n root = TreeNode(_max)\n idx = nums.index(_max)\n arr1 = nums[:idx]\n arr2 = nums[idx + 1:]\n root.left = self.constructor(arr1, root)\n root.right = self.constructor(arr2, root)\n return root\n\n def constructor(self, arr, root):\n\n if len(arr) > 1:\n _max = -1\n for i in arr:\n if i > _max:\n _max = i\n idx = arr.index(_max)\n arr1 = arr[:idx]\n arr2 = arr[idx + 1:]\n n_root = TreeNode(_max)\n n_root.left = self.constructor(arr1, n_root)\n\n n_root.right = self.constructor(arr2, n_root)\n\n return n_root\n\n elif not arr:\n return None\n else:\n return TreeNode(arr[0])\n\n def dfs(self, roo):\n\n if roo:\n self.dfs(roo.left)\n\n self.dfs(roo.right)\n\n\ns = Solution()\ns.dfs(s.constructMaximumBinaryTree([3, 2, 1, 6, 0, 5]))\n","repo_name":"TOOFACK/DailyCodingPy","sub_path":"LeetCode/654. Maximum Binary Tree.py","file_name":"654. Maximum Binary Tree.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"31679420409","text":"\"\"\" Added code to ask the user if they wanted to play again\n\"\"\"\n\nimport random\n\nQUESTIONS = [[\"Question 1 / What is number 1:\", \"Tahi\"],\n [\"Question 2 / What is number 2:\", \"Rua\"],\n [\"Question 3 / What is number 3:\", \"Toru\"],\n [\"Question 4 / What is number 4:\", \"Wha\"],\n [\"Question 5 / What is number 5:\", \"Rima\"],\n [\"Question 6 / What is number 6:\", \"Ono\"],\n [\"Question 7 / What is number 7:\", \"Whitu\"],\n [\"Question 8 / What is number 8:\", \"Waru\"],\n [\"Question 9 / What is number 9:\", \"Iwa\"],\n [\"Question 10 / What is number 10:\", \"Tekau\"]]\n# Functions go here\n# Yes no checker function\ndef yes_no(question_text):\n while True:\n\n # Ask the user if they have played before.\n answer = input(question_text).lower()\n\n # If they say yes, print 'Program Continues'.\n if answer == \"yes\" or answer == \"Yes\":\n return answer\n\n # If they say no, output 'Display Instructions'.\n elif answer == \"no\" or answer == \"No\":\n return answer\n\n # Otherwise - show error.\n else:\n print(\"error, please answer 'yes' or 'no'\")\n\n#Instructions Function\ndef instructions():\n print(\"*** How Quiz Works ***\")\n print()\n print(\"The instructions of the quiz will go here\")\n print(\"Program continues\")\n print()\n\n# Questions Function\ndef question(questions, score):\n while len(questions) != 0:\n # Picking Random Number from selection\n question_number = random.randrange(len(questions))\n\n # Getting the correct answer\n answer = questions[question_number][1]\n\n # Displaying Answer\n user_answer = input(f\"{questions[question_number][0]} \")\n del questions[question_number]\n\n # If user gets Correct Answer\n if user_answer == answer:\n score += 1\n print(\"Correct\")\n\n # If user gets Wrong Answer\n else:\n print(\"Incorrect\")\n end_quiz(score)\n\n\n# end quiz function\ndef end_quiz(score):\n\n # Telling user the score\n print(f\"You got {score}/10 questions correct\")\n\n # Asking if they want to play again\n play_again = yes_no(\"Would you like to play again? (Yes/No) \")\n\n # If user says yes, restart program\n if play_again == \"yes\":\n question(QUESTIONS, 0)\n\n # If no then sends thank you message\n elif play_again == \"no\":\n print(\"Thanks for playing!\")\n else:\n print(\"Invalid Input\")\n\nend_quiz(score=5)\n\n\n","repo_name":"joshuagiddy/counting_quiz","sub_path":"04_End_Quiz_v2.py","file_name":"04_End_Quiz_v2.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"71556207122","text":"\ndef close_popup(d):\n \"\"\"关闭登录后各种弹窗提示\"\"\"\n while True:\n if d(resourceId=\"com.lang.lang:id/id_close\").exists(timeout=1):\n \"\"\"登录后的2019弹窗\"\"\"\n d(resourceId=\"com.lang.lang:id/id_close\").click()\n continue\n elif d(resourceId=\"com.lang.lang:id/daily_sign_close\").exists(timeout=1):\n \"\"\"登录后的签到提示\"\"\"\n d(resourceId=\"com.lang.lang:id/daily_sign_close\").click()\n continue\n elif d(resourceId=\"com.lang.lang:id/negativeButton\").exists(timeout=1):\n \"\"\"登录后的本日推荐提示\"\"\"\n d(resourceId=\"com.lang.lang:id/negativeButton\").click()\n continue\n elif d(resourceId=\"com.lang.lang:id/positiveButton\").exists(timeout=1):\n \"\"\"开播前温馨提示,开启系统权限\"\"\"\n d(resourceId=\"com.lang.lang:id/positiveButton\").click()\n d(resourceId=\"com.android.permissioncontroller:id/permission_allow_button\").click(timeout=2)\n continue\n else:\n break\n\n\ndef close_popup_livePK(d):\n \"\"\"关闭连麦PK的提示\"\"\"\n if d(resourceId=\"com.lang.lang:id/positiveButton\").exists(timeout=1):\n d(resourceId=\"com.lang.lang:id/positiveButton\").click()\n else:\n pass\n\n\ndef close_panel(d, num=1):\n \"\"\"\n 实现点击空白处关闭弹出的面板,有的功能会弹出多层面板,因此这里根据num执行点击次数\n :param num: 要关闭面板的数量\n :param d: 连接对象\n \"\"\"\n for i in range(num):\n d.click(0.713, 0.178)\n\n\ndef swipe_screen(d, direction=\"right\"):\n \"\"\"\n 实现整个屏幕左右划动(默认为向右划),可用于关闭各种弹出面板等场景\n :param direction: 划动方向\n :param d: 连接对象\n \"\"\"\n \"\"\"测试发现该方法有风险,如果前一操作因异常没有弹出面板,此时执行划屏操作可能导致退出app\"\"\"\n \"\"\"需要优化一下,先检测如果有弹出面板才去执行此方法\"\"\"\n if direction == \"right\":\n d.swipe_ext(\"right\", scale=0.9)\n elif direction == \"left\":\n d.swipe_ext(\"left\", scale=0.9)\n else:\n print(\"您没有选择划屏方向!!\")\n\n\ndef swipe_until(d):\n \"\"\"实现一直划屏,直到可以定位到某个元素\"\"\"\n while True:\n swipe_screen(d)\n if d(textContains=\"八八六十\", resourceId=\"com.lang.lang:id/tv_user_name\").wait(timeout=3):\n break\n\n\n# 监听弹窗\ndef watch_popup(d):\n \"\"\"实现关闭各种弹窗提示\"\"\"\n try:\n ctx = d.watch_context()\n # 登录后弹出的2019专属回忆提示\n # 通过底部关闭按钮x关闭\n ctx.when(\"com.lang.lang:id/id_close\").click()\n\n # 登录后弹出的日间签到提示\n # 通过右上角关闭按钮x关闭\n ctx.when(\"com.lang.lang:id/daily_sign_close\").click()\n\n # 登录后弹出的本日推荐提示\n # 新功能[本日推荐]上线啰,想要看到更多的推荐主播,需要您的协助给予权限\n ctx.when(\"前往\").when(\"我知道了\").click()\n\n ctx.wait_stable() # 等待界面不在有弹窗了\n\n # 开播前弹出的温馨提示,开启相关权限(安装app后第一次登录前也会弹;第一次登录后也会弹)\n # 由于您即将要开播,需要您允许以下的权限喔 -拍摄及录影 -存取装置中的档案 -允许定位服务\n ctx.when(\"继续\").click()\n ctx.when(\"取消\").when(\"确定\").click() # 开播前提示,点继续后会弹 取消/确定 # 连播PK的弹出提示也是用的这个\n ctx.when(\"允许\").click() # 登录前提示,点继续后会弹 拒绝/允许\n\n ctx.wait_stable() # 等待界面不在有弹窗了\n\n # 直播间弹出碎片合成弹窗提示\n ctx.when(\"去合成\").when(\"取消\").click()\n # 直播间弹出网络异常弹窗提示\n ctx.when(\"取消\").when(\"重新载入\").click()\n\n # 导航栏点击私信 弹出的提示\n # “建议开启应用程序通知功能 好友发出的讯息会通知您哦!”\n ctx.when(\"去开启\").when(\"取消\").click()\n\n # 导航栏点击个人中心弹出的领取现金提示\n # 无限量赚现金模式已开启\n # 通过右上角关闭按钮x关闭\n ctx.when(\"恭喜你\").when(\"com.lang.lang:id/dialog_close\").click()\n\n ctx.wait_stable() # 等待界面不在有弹窗了\n\n except Exception as e:\n print(e)\n\n # d.ctx.close()\n","repo_name":"xieguoyong/Lang-UI-Tests","sub_path":"lang/common/close_popup.py","file_name":"close_popup.py","file_ext":"py","file_size_in_byte":4548,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"14674741594","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom skimage.measure import label, regionprops\r\nfrom skimage import color\r\n\r\ndef get_colors(elements):\r\n c = {\"blue\": 0,\"crimson\": 0,\"cobalt\": 0,\"cyan\": 0,\"green\": 0,\"lime\": 0,\"magenta\": 0,\"orange\": 0,\"red\" : 0,\"turquoise\": 0,\"violet\": 0,\"yellow\": 0,}\r\n for color in elements:\r\n if (0 <= color < 15 or 345 <= color <= 360):\r\n c['red'] += 1\r\n if (15 <= color < 45):\r\n c['orange'] += 1\r\n if (45 <= color < 75):\r\n c['yellow'] += 1\r\n if (75 <= color < 105):\r\n c['lime'] += 1\r\n if (105 <= color < 135):\r\n c['green'] += 1\r\n if (135 <= color < 165):\r\n c['turquoise'] += 1\r\n if (165 <= color < 195):\r\n c['cyan'] += 1\r\n if (195 <= color < 225):\r\n c['cobalt'] += 1\r\n if (225 <= color < 255):\r\n c['blue'] += 1\r\n if (255 <= color < 285):\r\n c['violet'] += 1\r\n if (285 <= color < 315):\r\n c['magenta'] += 1\r\n if (315 <= color < 345):\r\n c['crimson'] += 1 \r\n print(c)\r\n\r\nimage = plt.imread(\"balls_and_rects.png\")\r\nbinary = image.copy()[:, :, 0]\r\nbinary[binary > 0] = 1\r\n\r\nimage = color.rgb2hsv(image)[:, :, 0] * 360\r\n\r\nlabeled = label(binary)\r\nballs, rects = [], []\r\nprint(\"Number of all forms:\", np.max(labeled))\r\n\r\nfor region in regionprops(labeled):\r\n v = np.max(image[region.bbox[0]:region.bbox[2], region.bbox[1]:region.bbox[3]])\r\n if region.area == (region.image.shape[0] * region.image.shape[1]):\r\n rects.append(v)\r\n else:\r\n balls.append(v)\r\n\r\nprint(\"Circles:\", len(balls))\r\nget_colors(balls)\r\n\r\nprint(\"Rectangles:\", len(rects))\r\nget_colors(rects)\r\n\r\nplt.figure()\r\nplt.imshow(image)\r\nplt.show()\r\n","repo_name":"losttrollsssss/Computer-vision","sub_path":"forms_shades/form_shades.py","file_name":"form_shades.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"12661862925","text":"def dapatkan_indeks(id, daftar_item):\n # Fungsi yang berfungsi mencari indeks dari id di daftar_item\n # Misal mencari indeks G1 di data_gadget\n # Return indeks tersebut, atau -1 jika tidak ada\n\tfor i in range(len(daftar_item)):\n\t\tif daftar_item[i][0] == id:\n\t\t\treturn i\n\treturn -1\t# Jika tidak ditemukan id tersebut, return -1\n\ndef dapatkan_indeks_col(id, daftar_item, col):\n # Fungsi yang berfungsi mencari indeks dari komponen kolom di daftar_item\n # Misal mencari indeks Dorayaki di data_gadget\n for i in range(len(daftar_item)):\n if daftar_item[i][col] == id:\n return i\n return -1\n\ndef csv_parser(location):\n csvfile = open(location, 'r')\n raw_list = csvfile.readlines()\n result_list = [pecah_string(x, ';') for x in raw_list]\n csvfile.close()\n return result_list\n\n\ndef pecah_string(s, pemisah):\n s = s.strip()\n isebelum = 0\n hasil = []\n for i in range(len(s)):\n if s[i] == pemisah:\n if s[isebelum+1:i].isdigit():\n hasil.append(int(s[isebelum+1:i]))\n else:\n hasil.append(s[isebelum+1:i])\n isebelum = i\n \n if s[isebelum+1:len(s)].isdigit():\n hasil.append(int(s[isebelum+1:len(s)]))\n else:\n hasil.append(s[isebelum+1:len(s)])\n\n hasil[0] = s[0] + hasil[0]\n if hasil[0].isdigit():\n hasil[0] = int(hasil[0])\n return hasil","repo_name":"bryanahusna/Python-Inventory-System-Doremonangis","sub_path":"additional_tools.py","file_name":"additional_tools.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"17350403805","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Sep 26 16:44:37 2019\r\n\r\n@author: badit\r\n\"\"\"\r\n\r\n#!/usr/bin/env python\r\n\"\"\"Modification of test_eval_example2 except that when park=ON, always go to GOAL or if fuel runs out, go to refuel and then go to goal.\r\nRepeat this when park is OFF. \r\nThis example illustrates the use of TuLiP to synthesize a reactive\r\ncontroller for a GR(1) specification. We code the specification\r\ndirectly in GR(1) form and then use TuLiP to synthesize a reactive\r\ncontroller.\r\n\r\nThe system is modeled as a discrete transition system in which the\r\nrobot can be located anyplace on a 4x4 grid of cells with a refueling station:\r\n +----+----+----+----+\r\n | X1 | X2 | X3 | X4 | Home: X16, Refuel: X8, Goal: X1\r\n +----+----+----+----+\r\n | X5 | X6 | X7 | X8 |\r\n +----+----+----+----+\r\n | X9 | X10| X11| X12|\r\n +----+----+----+----+\r\n | X13| X14| X15| X16|\r\n +----+----+----+----+\r\n\r\nThe robot is allowed to transition between any two adjacent cells;\r\ndiagonal motions are not allowed. The robot should continuously\r\nrevisit the cell X1.\r\n\r\nThe environment consists of a single state called 'park' that\r\nindicates that the robot should move to cell X1.\r\n\r\nThe system specification in its simplest form is given by\r\nHopefully, it automatically refuels...\r\nOtherwise, revert back to example2.\r\n []<>park -> []<>X1 && [](!park -> <>X16) \r\n\r\nWe must convert this specification into GR(1) form:\r\n\r\n env_init && []env_safe && []<>env_prog_1 && ... && []<>env_prog_m ->\r\n sys_init && []sys_safe && []<>sys_prog_1 && ... && []<>sys_prog_n\r\n\"\"\"\r\n# 21 Jul 2013, Richard M. Murray (murray@cds.caltech.edu)\r\n# Import the packages that we need\r\n# from __future__ import print_function\r\nimport logging\r\nfrom tulip import spec\r\nfrom tulip import synth\r\nfrom tulip.transys import machines\r\nfrom tulip import dumpsmach\r\n\r\nlogging.basicConfig(level=logging.WARNING)\r\n#\r\n# Environment specification\r\n#\r\n# The environment can issue a park signal that the robot must respond\r\n# to by moving to the lower left corner of the grid. We assume that\r\n# the park signal is turned off infinitely often.\r\n#\r\nenv_vars = {}\r\nenv_vars['park'] = 'boolean'\r\nenv_vars['Cr'] = (2,15)\r\nenv_init = {'(Cr = 2)'} \r\n# Car is not patrolling. It is moving anywhere in the center two columns\r\nenv_safe = {'(Cr = 14) -> X(Cr = 15 || Cr = 10)', '(Cr = 10) -> X(Cr = 14 || Cr = 6)', \\\r\n '(Cr = 6) -> X(Cr = 10 || Cr = 2)', '(Cr = 2) -> X(Cr = 6 || Cr = 3)', \\\r\n '(Cr = 3) -> X(Cr = 7 || Cr = 2)', '(Cr = 7) -> X(Cr = 3 || Cr = 11)', \\\r\n '(Cr = 11) -> X(Cr = 7 || Cr = 15)', '(Cr = 15) -> X(Cr = 11 || Cr = 14)',\r\n } \r\nenv_prog = {'park'} # []<>(park)\r\n\r\n#\r\n# System dynamics\r\n#\r\n# The system specification describes how the system is allowed to move\r\n# and what the system is required to do in response to an environmental\r\n# action.\r\n# System can wait for the environment to get out of the way\r\nsys_vars = {}\r\nsys_vars['Xr'] = (1, 16)\r\nsys_vars['fuel'] = (0,10)\r\nsys_init = {'Xr=16', 'fuel = 10'}\r\n# try and see if it is possible to let the vehicle move a little:\r\n# that is, from Xr = 16, \r\nsys_safe = {'(Xr = 1) -> X (Xr=1 || Xr = 2 || Xr = 5)',\r\n '(Xr = 2) -> X (Xr = 1 || Xr = 3 || Xr = 6 || Xr = 2)',\r\n '(Xr = 3) -> X (Xr = 2 || Xr = 4 || Xr = 7 || Xr = 3)',\r\n '(Xr = 4) -> X (Xr = 4 || Xr = 3 || Xr = 8)',\r\n '(Xr = 5) -> X (Xr = 1 || Xr = 5 || Xr = 6 || Xr = 9)',\r\n '(Xr = 6) -> X(Xr = 2 || Xr = 5 || Xr = 7 || Xr = 10 || Xr = 6)',\r\n '(Xr = 7) -> X(Xr = 3 || Xr = 6 || Xr = 8 || Xr = 11 || Xr = 7)',\r\n '(Xr = 8) -> X(Xr = 4 || Xr = 7 || Xr = 8 || Xr = 12)',\r\n '(Xr = 9) -> X (Xr = 5 || Xr = 9 || Xr = 10 || Xr = 13)',\r\n '(Xr = 10) -> X (Xr = 6 || Xr = 9 || Xr = 11 || Xr = 14 || Xr = 10)',\r\n '(Xr = 11) -> X (Xr = 7 || Xr = 10 || Xr = 12 || Xr = 15 || Xr = 11)',\r\n '(Xr = 12) -> X (Xr = 8 || Xr = 11 || Xr = 12 || Xr = 16)',\r\n '(Xr = 13) -> X (Xr = 9 || Xr = 13 || Xr = 14)',\r\n '(Xr = 14) -> X(Xr = 10 || Xr = 13 || Xr = 15 || Xr = 14)',\r\n '(Xr = 15) -> X(Xr = 11 || Xr = 14 || Xr = 16 || Xr = 15)',\r\n '(Xr = 16) -> X(Xr = 12 || Xr = 15 || Xr = 16)',\r\n 'Cr = 14 -> !(Xr = 14)',\r\n 'Cr = 10 -> !(Xr = 10)',\r\n 'Cr = 6 -> !(Xr = 6)',\r\n 'Cr = 2 -> !(Xr = 2)',\r\n 'Cr = 3 -> !(Xr = 3)',\r\n 'Cr = 7 -> !(Xr = 7)',\r\n 'Cr = 11 -> !(Xr = 11)',\r\n 'Cr = 15 -> !(Xr = 15)',\r\n 'fuel > 0',\r\n '(fuel = 10) <-> X(fuel = 9)',\r\n '(fuel = 9) <-> X(fuel = 8)',\r\n '(fuel = 8) <-> X(fuel = 7)',\r\n '(fuel = 7) <-> X(fuel = 6)',\r\n '(fuel = 6) <-> X(fuel = 5)',\r\n '(fuel = 5) <-> X(fuel = 4)',\r\n '(fuel = 4) <-> X(fuel = 3)',\r\n '(fuel = 3) <-> X(fuel = 2)',\r\n '(fuel = 2) <-> X(fuel = 1)',\r\n '(fuel = 1) -> X(fuel = 0) || (Xr = 4 && X(Xr = 8)) || (Xr = 7 && X(Xr = 8)) || (Xr = 12 && X(Xr = 8))',\r\n '(Xr = 8) -> (fuel = 10)',\r\n}\r\n\r\nsys_prog = set() # empty set\r\n\r\n# Environment won't crash into you:\r\nfor xi in range(2,16):\r\n env_safe |= {'(Xr = '+str(xi)+') -> X(!(Cr = '+str(xi)+'))'}\r\n#\r\n# System specification\r\n#\r\n# The system specification is that the robot should repeatedly revisit\r\n# the upper right corner of the grid while at the same time responding\r\n# to the park signal by visiting the lower left corner. The LTL\r\n# specification is given by\r\n#\r\n# []<> X1 && [](!park -> <>X16) && [](!park -> <>X8)\r\n#boolean\r\n# Since this specification is not in GR(1) form, we introduce an\r\n# environment variable X0reach that is initialized to True and the\r\n# specification [](park -> <>X0) becomes\r\n#\r\n# [](X (X16reach) <-> X16 || (X16reach && park)), []((X8reach && park) || X (X8reach) <-> X8))\r\n#\r\n# Augment the system description to make it GR(1)\r\nsys_vars['X16reach'] = 'boolean'\r\nsys_init |= {'X16reach'}\r\nsys_safe |= {'(X (X16reach) <-> (Xr=16)) || (X16reach && park)'}\r\nsys_prog |= {'X16reach', 'Xr=1'}\r\n\r\n# Create a GR(1) specification\r\nspecs = spec.GRSpec(env_vars, sys_vars, env_init, sys_init, env_safe, sys_safe, env_prog, sys_prog)\r\n# Print specifications:\r\nprint(specs.pretty())\r\n\r\n#\r\n# Controller synthesis\r\n#\r\n# The controller decides based on current variable values only,\r\n# without knowing yet the next values that environment variables take.\r\n# A controller with this information flow is known as Moore.\r\nspecs.moore = True\r\n# Ask the synthesizer to find initial values for system variables\r\n# that, for each initial values that environment variables can\r\n# take and satisfy `env_init`, the initial state satisfies\r\n# `env_init /\\ sys_init`.\r\nspecs.qinit = '\\E \\A' # i.e., \"there exist sys_vars: forall sys_vars\"\r\n\r\n# At this point we can synthesize the controller\r\n# using one of the available methods.\r\nstrategy = synth.synthesize(specs)\r\nassert strategy is not None, 'unrealizable'\r\n\r\n# Generate a graphical representation of the controller for viewing, or a textual representation if pydot is missing.\r\n# if not strategy.save('test_eval_example_modified.png'):\r\n# print(strategy)\r\n\r\n# Writing strategy to file\r\nfor elem in env_init:\r\n break\r\nelem = elem.strip('()').split()\r\nenv0 = int(elem[2])\r\n\r\nif(env0 == 2):\r\n print(\"2\")\r\n dumpsmach.write_python_case(\"TE2_v2.py\", strategy, classname=\"TE_ctrl_init2\")\r\nelif(env0 == 3):\r\n print(\"3\")\r\n dumpsmach.write_python_case(\"TE3_v2.py\", strategy, classname=\"TE_ctrl_init3\")\r\nelif(env0 == 6):\r\n print(\"6\")\r\n dumpsmach.write_python_case(\"TE6_v2.py\", strategy, classname=\"TE_ctrl_init6\")\r\nelif(env0 == 7):\r\n print(\"7\")\r\n dumpsmach.write_python_case(\"TE7_v2.py\", strategy, classname=\"TE_ctrl_init7\")\r\nelif(env0 == 10):\r\n print(\"10\")\r\n dumpsmach.write_python_case(\"TE10_v2.py\", strategy, classname=\"TE_ctrl_init10\")\r\nelif(env0 == 11):\r\n print(\"11\")\r\n dumpsmach.write_python_case(\"TE11_v2.py\", strategy, classname=\"TE_ctrl_init11\")\r\nelif(env0 == 14):\r\n print(\"14\")\r\n dumpsmach.write_python_case(\"TE14_v2.py\", strategy, classname=\"TE_ctrl_init14\")\r\nelif(env0 == 15):\r\n print(\"15\")\r\n dumpsmach.write_python_case(\"TE15_v2.py\", strategy, classname=\"TE_ctrl_init15\")\r\nelse:\r\n print('Keep the obstacle car initial position in the middle two columns')\r\n \r\n\r\n## Generate a graph that represents the specifications set out by this file\r\n\r\n","repo_name":"abadithela/Test-and-Eval-","sub_path":"BFS_Search_Tests/generate_strategy_tulip.py","file_name":"generate_strategy_tulip.py","file_ext":"py","file_size_in_byte":8496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"28312629855","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n__author__ = 'Li qiaoxia'\n\nfrom twisted.words.protocols.irc import lowDequote\nfrom builtins import str\nfrom _ast import Str\nimport logging; logging.basicConfig(level=logging.INFO)\nfrom chardet.chardistribution import Big5DistributionAnalysis\nfrom aiohttp.web_urldispatcher import get\nfrom _socket import IPPORT_RESERVED\nfrom socket import *\nimport traceback\n \n\nimport logging; logging.basicConfig(level=logging.INFO)\nfrom switch import switch\n\nimport asyncio, os, json, time\nfrom datetime import datetime\nfrom multiprocessing import Pool\nimport os, time, random\nfrom config import configs\nimport mysql.connector\nimport socket\n\n\n'''\nasync tcp application.\n'''\n\n\n\nPlCstationtype= {\n 'typename':'A',\n 'register':'a',\n 'instruction':'aRSC',\n }\n \n# {'typename':'B',\n# 'register':'b',\n# 'instruction':'bRSC'\n# },\n \n \n\ndef sendmes(stationnum,ipaddr,port,stationtype,data):\n #waiting for content to be filled in\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # 建立连接:\n s.connect((ipaddr, port))\n # 接收欢迎消息:\n #print(s.recv(1024).decode('utf-8'))\n # 将读到的信息发出去\n for data in [b'Michael', b'Tracy', b'Sarah']:\n # 发送数据:\n s.send(data)\n print(s.recv(1024).decode('utf-8'))\n s.send(b'exit')\n s.close() \n pass\n\n\n\ndef plccmd(stationnum,instr,data=''):\n \n instruction='%'\n instruction+=stationnum\n\n #stationnum本来就是字符串,需转换成ascii\n \n instruction+='#' #命令指令\n \n instruction+= instr+data\n #BCC\n bcc=0\n insascii=instruction.encode('ascii')\n print(\"first ascii\",insascii)\n for i in range(len(insascii)) :\n# print (insascii[i])\n bcc^=insascii[i]\n# print('bcc=',bcc)\n \n highbcc= hex(int(bcc/16))\n lowbcc= hex(int(bcc%16))\n\n print('high=%s,low=%s' % (highbcc.encode(\"ascii\"),lowbcc.encode(\"ascii\")))\n \n insascii+=highbcc.strip(\"0x\").encode(\"ascii\")\n insascii+=lowbcc.strip(\"0x\").encode(\"ascii\")\n\n# insascii+=hex(10).encode(\"ascii\") #0x0a的值转为ascii码,终于对了。\n insascii+=b'\\x0d'\n\n\n# insascii+=chr(int('0a', 16)).encode(\"ascii\")\n \n print(insascii)\n return insascii\n \ndef rcvplcmsg(msgdatacome,instr):\n \n # msgdata= msgdatacome.decode(\"utf-8\")\n datafromplc={\n 'getdata':\"\",\n 'res':False\n }\n msgdata= msgdatacome\n print(\"msgdata=\",msgdata)\n datafromplc['res']=False\n l= len(msgdata)\n \n \n \n if msgdata[l-1:l]== b'\\x0d' :\n print('get cr =',msgdata[l-1:l])\n \n if msgdata[0:1] != b'%' :\n logging.info(\"first character should be %%,not %s \" % msgdata[0])\n \n stationnum=msgdata[1:2]\n \n if msgdata[3:4] == b'$' : #正常应答\n if msgdata[4:6]==instr.encode(\"ascii\"):\n #bcc 校验\n\n mlen=len(msgdatacome)\n print(\"mlen=\",mlen)\n bcc=0\n \n for i in range(mlen-3) :\n# print (msgdata[i])\n bcc^=msgdata[i]\n# print('bcc=',bcc)\n# getbcc=int(msgdatacome[mlen-3:mlen-3],16)*16+int(msgdatacome[mlen-2:mlen-2])\n \n getbcc = int(str(msgdata[mlen-3:mlen-1],encoding=\"utf-8\"),16)\n \n# print('getbcc=',msgdata[mlen-3:mlen-1],getbcc)\n if getbcc == bcc :\n # print(\"ca=\",msgdatacome[4:6])\n ca=msgdatacome[4:6].decode()\n print(\"ca=\",ca)\n for case in switch(ca):\n if case('RD'): #读取数据寄存器\n #从第8个字符到len-3 之间的数据,每4个字符一组,16进制,高位在后,低位在前\n data=msgdata[6:mlen-3]\n i=0\n data0 =\"\"\n if len(data) <4 :\n break\n while (i= low: # This is done to ensure we've captured the last item in the list\n mid = (low + high)//2\n \n if target_item == a_list[mid]:\n found = True\n return found\n elif target_item < a_list[mid]:\n high = mid-1\n\n else:\n low = mid+1\n\n return found\n\n \n \n\nprint(binarySearchIterative([1,2,3,4,5,6], 5))\n\ndef binarySearchRecursive(a_list, target_item):\n low = 0\n high = len(a_list)-1\n\n \n def _binary_search(low, high):\n mid = (low + high)//2\n found = False\n\n if high>=low:\n\n if target_item == a_list[mid]:\n found = True\n return found\n\n elif target_item < a_list[mid]:\n return _binary_search(low, mid -1)\n\n else:\n return _binary_search(mid+1, high)\n\n else:\n return found\n\n return _binary_search(low, high)\n\n\nprint(binarySearchRecursive([1,2,3,4,5,6], 1))\n\n","repo_name":"sammienjihia/DataStructuresAlgorithms","sub_path":"binaryRecursive.py","file_name":"binaryRecursive.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"9350370634","text":"from django import forms\nfrom django.shortcuts import render, redirect\nfrom captcha.fields import ReCaptchaField\nfrom .models import Subscription, SubscriptionType\n\n\nclass SubscriptionForm(forms.ModelForm):\n captcha = ReCaptchaField()\n type = forms.ModelChoiceField(\n queryset=SubscriptionType.objects,\n widget=forms.RadioSelect(),\n empty_label=None,\n label=\"Abonelik tipi\",\n )\n\n class Meta:\n model = Subscription\n fields = ['name', 'email', 'address', 'type', 'renewal', 'phone', 'notes']\n widgets = {\n 'type': forms.RadioSelect(),\n 'renewal': forms.Select(),\n }\n\n\ndef subscribe(request):\n subscription = None\n if request.method == 'POST':\n form = SubscriptionForm(request.POST)\n if form.is_valid():\n subscription = form.save()\n else:\n form = SubscriptionForm()\n return render(request, 'subscriptions/subscription.html', {\n 'form': form,\n 'subscription_types': SubscriptionType.objects.filter(active=True),\n 'subscription': subscription,\n })\n","repo_name":"Solfasol/solfasol","sub_path":"solfasol/subscriptions/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"6041821342","text":"import fileinput\r\n\r\ndef write2file(text, filename2, mod):\r\n\tf = open(filename2,mod)\r\n\tf.write(text)\r\n\tf.close()\r\n\r\ndef normal_label():#the label 'year' ranges from 1922 to 2011\r\n\tprint(\"processing normal_label....\")\r\n\trange=2011-1922\r\n\tfilename='C:\\\\Users\\\\Yawei\\\\Desktop\\\\train_label.txt'\r\n\tf=open(filename)\r\n\tcount=0\r\n\tfor line in fileinput.input(filename):\r\n\t\tcount=count+1\r\n\t\tyear_normal = 1.0*(float(line)-1922)/range\r\n\t\tstr1=''\r\n\t\tif count == 463715:\r\n\t\t\tstr1=str(year_normal)\r\n\t\tstr1=str(year_normal)+'\\n'\r\n\t\twrite2file(str1,'C:\\\\Users\\\\Yawei\\\\Desktop\\\\train_label_normal.txt','a+')\r\n\tf.close()\r\n\r\ndef normal_sample():\r\n\tprint(\"processing normal_sample....\")\r\n\t#max=-999999999999.0\r\n\t#min=99999999999.0\r\n\tsample_list=[]\r\n\tfilename='train_sample.txt'\r\n\tf=open(filename)\r\n\t#for line in fileinput.input(filename):\r\n\t#\tif line.find('\\n')==-1:\r\n\t#\t\tweight_list=line.split(' ')\r\n\t#\telse:\r\n\t#\t\tind=line.find('\\n')\r\n\t#\t\tweight_list=line[:ind].split(' ')\r\n\t#\tfor elm in weight_list:\r\n\t#\t\t#print(elm)\r\n\t#\t\tif maxfloat(elm):\r\n\t#\t\t\tmin=float(elm)\r\n\tmax=65735.78125\r\n\tmin=-14861.695312\r\n\tnum=0\r\n\tfor line in fileinput.input(filename):\r\n\t\tnum=num+1\r\n\t\tsample_list=[]\r\n\t\tif line.find('\\n')==-1:\r\n\t\t\tweight_list=line.split(' ')\r\n\t\t\t#print('......................')\r\n\t\telse:\r\n\t\t\tindex=line.find('\\n')\r\n\t\t\tweight_list=line[:index].split(' ')\r\n\t\tcount=0\r\n\t\t#print(weight_list)\r\n\t\tfor elm in weight_list:\r\n\t\t\t#print(\"the last elm:\",elm)\r\n\t\t\telm_normal=1.0*(float(elm)-min)/(max-min)\r\n\t\t\t#print(elm_normal)\r\n\t\t\t#count=count+1\r\n\t\t\tsample_list.append(elm_normal)\r\n\t\t\t#sample_list.append(' ')\r\n\t\tcontent=''\r\n\t\tcount=0\r\n\t\t#print(\"the last elm:\",weight_list[89])\r\n\t\tfor elm in sample_list:\r\n\t\t\tcount=count+1\r\n\t\t\tif count==90:\r\n\t\t\t\t#print(\"last one:\",elm)\r\n\t\t\t\t#return;\r\n\t\t\t\tcontent=content+str(elm)\r\n\t\t\telse:\r\n\t\t\t\tcontent=content+str(elm)+' '\r\n\t\t#if(num<463715):\r\n\t\t#\tcontent=content+'\\n'\r\n\t\t#print(num)\r\n\t\tcontent=content+'\\n'\r\n\t\twrite2file(content,'train_sample_normal.txt','a+')\r\n\t\t#print(1.0*num/463715)\r\n\t#print(\"max=%f,min=%f\", max,min)\r\n\r\n\r\n#normal_label()\r\nnormal_sample()\r\n","repo_name":"YaweiZhao/simulation_based_machine_learning_library","sub_path":"dataset/tools/我写的脚本/normalized.py","file_name":"normalized.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"13753187780","text":"\ndef bahola(isimlar):\n baholar={}\n while isimlar:\n ism=isimlar.pop()\n baho=input(f'{ism.title()}ning bahosni kiriting >>>')\n baholar[ism]=baho\n return baholar\n \noquvchilar=['abror','eldor','said','xabi','azizbek']\nbaholar=bahola(oquvchilar[:])\n#print(baholar)\n\nprint(\"O'quvchilar bahosi royxati:\")\nfor ism,baho in baholar.items():\n print(f\"{ism.title()}ning bahosi : {baho}\")\n\n\n\n\ndef katta_harfqil(matnlar):\n #matnlar=matnlar[:]\n for i in range(len(matnlar)):\n matnlar[i]=matnlar[i].title()\n return matnlar\n\n\nismlar = ['ali', 'vali', 'hasan', 'husan']\nyangi_ismlar=katta_harfqil(ismlar[:])\nprint(yangi_ismlar)\nprint(ismlar)\n\n\ndef bahola(isimlar):\n baholar={}\n for ism in isimlar:\n baho=input(f\"{ism.title()}ning bahosini kiriitng >>>\")\n baholar[ism]=baho\n return baholar\n \noquvchilar=['abror','eldor','said','xabi','azizbek']\nbaholar=bahola(oquvchilar)\n#print(baholar)\n\nprint(\"O'quvchilar bahosi royxati:\")\nfor ism,baho in baholar.items():\n print(f\"{ism.title()}ning bahosi : {baho}\")\n\n","repo_name":"abrorbekanvarbekov/vaziflar","sub_path":"21 chi dars.py","file_name":"21 chi dars.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"19368608514","text":"#from default_connection import DefaultConnection\r\nimport os,sys,inspect\r\n##Esto sirve para poder usar import relativos\r\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\r\nparentdir = os.path.dirname(currentdir)\r\nsys.path.insert(0,parentdir) \r\n## Este es el import parent\r\nfrom default_connection import DefaultConnection\r\nimport psycopg2.extras\r\nimport sys\r\nclass Dummy:\r\n\r\n conexion = None\r\n\r\n def __init__(self):\r\n pass\r\n\r\n ############ retorna el cursor para poder interactuar con la DB #######\r\n def getCursor(self):\r\n try:\r\n #Conexion a postgre\r\n default = DefaultConnection()\r\n self.conexion = default.postgre_connect()\r\n cursor = self.conexion.cursor(cursor_factory=psycopg2.extras.DictCursor)\r\n return cursor\r\n except:\r\n print('Error obteniendo el cursor de dummy')\r\n raise Exception('Error no controlado: {}'.format(sys.exc_info()[0]))\t\t\t\r\n finally: \r\n pass\t\t\r\n #cursor.close()\r\n #self.cerrarConexion()\r\n\r\n ############ crear examen ###############################\r\n def prueba(self):\r\n try: \r\n #Conexion a postgre \r\n cursor = self.getCursor()\r\n #####\r\n insert = \"SELECT * FROM USUARIO\"\r\n cursor.execute(insert)\r\n filas = cursor.fetchall()\t\t\t\r\n return filas\r\n except:\r\n print('Error EN LA PRUEBA DUMMY POSTGRE')\r\n raise Exception('Error no controlado: {}'.format(sys.exc_info()[0]))\t\t\t\r\n finally:\r\n cursor.close()\r\n self.cerrarConexion()\r\n return None\r\n\r\n ########## Cerrar conexion ###################\r\n def cerrarConexion(self):\r\n self.conexion.close()\r\n\r\nif __name__ == '__main__':\r\n dum = Dummy()\r\n filas = dum.prueba()\r\n print(filas)\r\n\r\n","repo_name":"vicorious/itrainer","sub_path":"python/test/dummy.py","file_name":"dummy.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"27358952234","text":"from setuptools import setup, find_packages\nimport os\n\nwith open('./requirement.txt') as f:\n required = f.read().splitlines()\n \nclassifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Information Technology',\n 'Operating System :: Unix',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'License :: OSI Approved :: GNU General Public License (GPL)',\n 'Programming Language :: Python :: 3.6'\n]\n\nsetup(\n name='JSJumble',\n version='1.0.4',\n description='Tool for obfuscating, compressing Javascript, and collecting static files.',\n long_description=open('pypi.md').read() + '\\n\\n' + open('CHANGELOG.txt').read(),\n long_description_content_type='text/markdown',\n url='https://github.com/GoodDay360/JSJumble', \n author='GoodDay360',\n author_email='istartgame31@gmail.com',\n license='GNU General Public License (GPL)', \n classifiers=classifiers,\n keywords=['JSJumble','library','module','javascript','compress','obfuscator','obfuscate','static','collect','server'], \n packages=find_packages(exclude=[]),\n include_package_data=True,\n install_requires=required,\n)","repo_name":"GoodDay360/JSJumble","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"4026048247","text":"import requests \nimport os\nimport sys\nfrom sys import argv \nimport time\n\nprint('''\n /|\n / / \\ \n | | ||\n \\ \\ / /\n \\ \\/ / ________________________________________________\n \\_ | \\_/ \\ [ DirHunter\t\t\t\t - x]\n \\| | || |_______________________________________________|\n | \\_/ |\t\t\t\t\t\t |\n \\_| | by Mathis Pais __ \t |\tdate: 22/12/2021\t\t\t |\n ---__\\ \\ ___________ /|_\\ __--- |\tversion: 1.3\t\t\t |\n \\ - -\\ \\- -/ /-- - / |\tdescription: brute-force tool for |\n \\ \\ \\ / / / |\twebsites directories discovery. |\n \\___/ __.__ \\___/ |\tusage: |\n | | |\t\t |\tpython3 DirHunter.py \"url\" \"wordlist.txt\" |\n | ___ ___ | [_______________________________________________] \n \\ /|\\ /|\\ /\n \\ /\n |\\ /|\n |\\ | /|\n | \\ | / |\n \\ | /\n \\___|___/\n \\^ ^/@$\n \\_-_/\n\n\n''') \n\nclass bcolors:\n\tOK = '\\033[92m'\n\tWARNING = '\\033[93m'\n\tFAIL = '\\033[91m'\n\tRESET = '\\033[0m'\n\ndef main():\n\tif len(argv) == 1 :\n\t\tprint(bcolors.FAIL+\"[!] \"+bcolors.RESET+ 'usage: python3 DirHunter.py \"url\" \"wordlist\"')\n\telif len(argv) == 2:\n\t\tif argv[1][len(argv[1])-4:len(argv[1])] == '.txt':\n\t\t\tprint(bcolors.FAIL+\"[!] \"+bcolors.RESET+ 'usage: python3 DirHunter.py \"url\" \"wordlist\"')\n\t\telse:\n\t\t\tprint(bcolors.FAIL+\"[!] \"+bcolors.RESET+'usage: python3 DirHunter.py \"url\" \"wordlist\"')\n\telif len(argv) == 3:\n\t\tprint(bcolors.OK+\"[+] \"+bcolors.RESET+'ready to hunt the dir !')\n\t\tprint(bcolors.OK+\"[+] \"+bcolors.RESET+'searching for deers... Hum, I mean dirs')\n\t\tStart=time.time()\t#démarrage du chrono à t=0s\n\t\thunt(str(argv[1]),\"/\"+str(argv[2]))\t#On utilise la fonction hunt sur les arguments donnés\n\t\tcount=hunt.count\t#On initialise un compteur\n\n\t\tif not hunt.path:\t#Si on ne trouve pas de répértoire \n\t\t\tprint(bcolors.FAIL+\"[!] \"+bcolors.RESET+\"no dir found.\")\t#message d'erreur\n\t\telif hunt.path[0] == \"\": \t#Si le premier élément de la liste est vide\n\t\t\thunt.path.pop(0)\t#On le supprime\n\t\tfor dir in hunt.path:\t#Pour tout les répértoires trouvés lors du premier passage\n\t\t\thunt.count=hunt.count-1\t\t#On enleve 1 au compteur\n\t\t\thunt(str(argv[1])+\"/\"+dir,\"/\"+str(argv[2]))\t#On refait un deuxième passage dans le nouveau répértoire trouvé\n\n\t\tEnd=time.time()\t\t#On stop le chronomètre\n\t\tTime=End-Start\t\t#On fait le calcul du temps de traitement du script\n\t\tprint(\"\\n\")\n\t\tprint(bcolors.OK+\"[+] \"+bcolors.RESET+\"Hunting time: \", Time, \"sec\")\t#On affiche le temps de recherche\n\t\tprint(bcolors.OK+\"[+] \"+bcolors.RESET+\"End time: \", time.ctime())\t#On affiche la date à laquelle le script c'est terminé\n\t\tprint(bcolors.OK+\"[+] \"+bcolors.RESET+\"Your day's catch: \", count+hunt.count)\t#On affiche le nombre de répértoires trouvés\n\telse:\n\t\tprint(bcolors.FAIL+\"[!] \"+bcolors.RESET+ 'usage: python3 DirHunter.py \"url\" \"wordlist\"')\n\ndef hunt(urls,wordlist):\t#Définition de la fonction hunt avec les arguments urls et wordlist\n\thunt.path=[]\t#On initialise hunt.path en tant que liste\n\turl=urls\n\thunt.count=0\t#On démarre le compteur à 0\n\ttry:\n\t\tif os.path.exists(os.getcwd()+wordlist):\t#Si le dictionnaire de mots existe\n\t\t\tfile=open(os.getcwd()+wordlist,\"r\")\t#On l'ouvre\n\t\t\tfor i in file:\t#et pour chaque mot se trouvant dedans\n\t\t\t\tdir=i.splitlines()\t#On fait une liste de toutes les lignes du fichier\n\t\t\t\tdir=''.join(dir)\t#et on les rejoint\n\t\t\t\trq=requests.get(url+\"/\"+dir)\t#On envoie la requête à l'url suivie d'un / et du mot de la liste\n\t\t\t\tdir_len=len(dir)\t#initialise une variable qui contient la longueur du mot de la liste\n\n\t\t\t\tif rq.status_code == 200:\t#Si la rêquete renvoie un code 200\n\t\t\t\t\tprint(\"Aiming : \"+url+\"/\"+dir,end=\"\\r\")\t\t#On affiche l'url de la cible visée et on revient au début de la ligne du terminal\n\t\t\t\t\ttime.sleep(0.05)\t#On marque un temps de pause\n\t\t\t\t\tprint(\"Aiming : \"+url+\"/\"+dir_len*\" \",end=\"\\r\") \t#Puis affiche l'url en remplacant la cible par des espace pour la supprimer et reviens au début de la ligne\n\t\t\t\t\tprint(\"\\n\")\n\t\t\t\t\tprint(bcolors.OK+\"[+] \"+bcolors.RESET+\"Aiming : \"+url+\"/\"+dir+\" \"+bcolors.OK+str(rq.status_code)+bcolors.RESET+\": dir shot ︻デ═一\")\n\t\t\t\t\t#Enfin on affiche tout avec le code reçu et la confirmation du résultat écrit\n\t\t\t\t\thunt.goodir=dir\t\t#On crée une variable contenant le répertoire trouvé\n\t\t\t\t\thunt.count=hunt.count+1\t\t#On incrémente le compteur de 1\n\t\t\t\t\thunt.path.append(hunt.goodir)\t#On ajoute le répértoire trouvé dans la liste hunt.path\n\n\t\t\t\telif rq.status_code == 403:\t#Si la rêquete renvoie un code 403\n\t\t\t\t\tprint(\"Aiming : \"+url+\"/\"+dir,end=\"\\r\")\t\t#On affiche l'url de la cible visée et on revient au début de la ligne du terminal\n\t\t\t\t\ttime.sleep(0.05)\t#On marque un temps de pause\n\t\t\t\t\tprint(\"Aiming : \"+url+\"/\"+dir_len*\" \",end=\"\\r\")\t\t#Puis affiche l'url en remplacant la cible par des espace pour la supprimer et reviens au début de la ligne\n\t\t\t\t\tprint(\"\\n\")\n\t\t\t\t\tprint(bcolors.WARNING+\"[-] \"+bcolors.RESET+\"Aiming : \"+url+\"/\"+dir+\" \"+bcolors.WARNING+str(rq.status_code)+bcolors.RESET+\": restricted area !\") \n\t\t\t\t\t#Enfin on affiche tout avec le code reçu et la signification du code\n\t\t\t\telse:\t#Si la rêquete renvoie un autre code on affiche juste l'url ciblé sans message de confirmation\n\t\t\t\t\tprint(\"Aiming :\"+url+\"/\"+dir,end=\"\\r\")\n\t\t\t\t\ttime.sleep(0.05)\n\t\t\t\t\tprint(\"Aiming :\"+url+\"/\"+dir_len*\" \",end=\"\\r\") \n\n\t\t\tfile.close()\t#On ferme le dictionnaire\n\t\t\tprint(\"\\n\")\n\t\t\tprint(bcolors.OK+\"[+] \"+bcolors.RESET+\"Hunt finished.\")\t\t#On informe de la fin du script\n\n\t\telse: #Si le chemin d'accés au dictionnaire n'est pas bon, on affiche un message d'erreur\n\t\t\tprint(bcolors.FAIL+\"[!] \"+bcolors.RESET+wordlist+\" don't exist in this directory\")\n\n\texcept KeyboardInterrupt:\t#En cas d'interruption clavier\n\t\tprint(\"\\n\")\n\t\tprint(bcolors.FAIL+\"[!] \"+bcolors.RESET+\"Hunting has been abandoned\") \t#On affiche l'abandon du script\n\n\texcept Exception as e:\t\t#Si une autre erreur arrive\n\t\tprint(bcolors.FAIL+\"[!] \"+bcolors.RESET+e)\t#On affiche le message d'erreur généré\n\n\nif __name__ == '__main__':\n\tmain()\t#Lancement du script\n","repo_name":"mathis2001/DirHunter","sub_path":"DirHunter.py","file_name":"DirHunter.py","file_ext":"py","file_size_in_byte":6597,"program_lang":"python","lang":"fr","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"22638744666","text":"from .__head__ import *\n\n\nclass mlp_pol(BaseModel):\n def __init__(self, domain):\n super(mlp_pol, self).__init__(domain, \"policies\")\n self.name = \"mlp\"\n self.input_size = domain.series_in_dim\n self.hidden_size = domain.pol_config[\"hidden_dim\"]\n self.hidden_layers = domain.pol_config[\"hidden_layers\"]\n self.in_layer = nn.Linear(self.input_size, self.hidden_size)\n self.linears = nn.ModuleList(\n [\n nn.Linear(self.hidden_size, self.hidden_size)\n for _ in range(self.hidden_layers)\n ]\n )\n self.out_layer = nn.Linear(self.hidden_size, domain.y_dim)\n\n def forward(self, x):\n\n x = self.in_layer(x)\n for layer in self.linears:\n x = layer(x)\n x = F.elu(x)\n x = self.out_layer(x)\n\n return x\n\n def loss(self, batch):\n x_static, x_series, mask, y_series = batch\n batch_size = x_series.shape[0]\n seq_length = x_series.shape[1]\n\n pred = F.softmax(self.forward(x_series), 2)\n\n dist = torch.distributions.categorical.Categorical(probs=pred)\n ll = dist.log_prob(y_series)\n\n return -ll.masked_select(mask.bool()).mean()\n\n\nclass MLPPol(BasePol):\n def __init__(self, domain, load=True):\n\n self.name = \"mlp\"\n\n self.domain = domain\n self.domain.get_pol_config(self.name)\n\n self.model = mlp_pol(domain)\n if load:\n self.load_pretrained()\n self.model.eval()\n\n def select_action(self, history, stochastic=False, temperature=1.0):\n\n prev_obs, prev_acts = history\n pred = F.softmax(self.model.forward(prev_obs)[:, -1] / temperature, 1)\n\n if stochastic:\n act = torch.distributions.categorical.Categorical(probs=pred)\n action = act.sample()\n else:\n action = torch.argmax(pred)\n\n return action\n","repo_name":"XanderJC/medkit-learn","sub_path":"medkit/policies/MLP.py","file_name":"MLP.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"3"}
+{"seq_id":"5488437320","text":"from redbot.core import commands\r\nfrom redbot.cogs import audio\r\nfrom redbot.core import Config\r\nfrom redbot.core import checks\r\nfrom json import loads\r\nimport json\r\nimport base64\r\nimport requests\r\nimport re\r\nfrom Crypto.Cipher import AES\r\nclass Wyyyy(commands.Cog):\r\n\t\"\"\"Play song by netease music links!\"\"\"\r\n\t\r\n\t__author__ = \"JackXu\"\r\n\t__version__ = \"0.3.1\"\r\n\t\r\n\tdefault_global_settings = {\"user_cookies\": \"\"}\r\n\tdef __init__(self):\r\n\t\tself.config = Config.get_conf(self, identifier=2817739401)\r\n\t\tself.config.register_global(**self.default_global_settings)\r\n\t\r\n\t\r\n\t\r\n\t@commands.group()\r\n\t@checks.admin_or_permissions(manage_guild=True)\r\n\tasync def wyyset(self, ctx: commands.Context):\r\n\t\t\"\"\"Manage settings.\"\"\"\r\n\t\r\n\t@wyyset.group()\r\n\tasync def cookie(self, ctx: commands.Context):\r\n\t\t\"\"\"Cookie settings.\"\"\"\r\n\t\r\n\t@cookie.command()\r\n\tasync def set(self, ctx: commands.Context, *, cookies_string: str):\r\n\t\t\"\"\"Set cookie.\r\n\t\tThe useful keys are \\\"__crsf\\\",\\\"MUSIC_U\\\"\"\"\"\r\n\t\tcookies_dict = {}\r\n\t\tcookies_string.replace(\" \", \"\")\r\n\t\t#await ctx.send(cookies_string.split(';'))\r\n\t\tfor e in cookies_string.split(';'):\r\n\t\t\t#await ctx.send(e)\r\n\t\t\tk, v = e.split('=', 1)\r\n\t\t\t#await ctx.send(k)\r\n\t\t\t#await ctx.send(v)\r\n\t\t\tcookies_dict[k] = v\r\n\t\tawait self.config.user_cookies.set(cookies_dict)\r\n\t\tawait ctx.send(\"Cookie set complete.\")\r\n\t\r\n\t@cookie.command()\r\n\tasync def delete(self, ctx: commands.Context):\r\n\t\t\"\"\"Delete cookie.\"\"\"\r\n\t\tcookies_dict = {}\r\n\t\tawait self.config.user_cookies.set(cookies_dict)\r\n\t\tawait ctx.send(\"They are clean now.\")\r\n\t\r\n\t\t\r\n\t@commands.command()\r\n\tasync def wyy(self, ctx, *, sharelink: str):\r\n\t\t\"\"\"Play a netease music share link.\"\"\"\r\n\t\trid = None\r\n\t\tif \"song?\" in sharelink:\r\n\t\t\trid = re.search(r'\\?id=(\\d*)', sharelink)\r\n\t\telif \"song/\" in sharelink:\r\n\t\t\trid = re.search(r'song/(\\d*)/', sharelink)\r\n\t\telse:\r\n\t\t\tawait ctx.send(\"This is not a song link!\")\r\n\t\tif rid:\r\n\t\t\tsong_id = re.search(r'\\d+',str(rid.group()))\r\n\t\t\tnonce = \"0CoJUm6Qyw8W8jud\"\r\n\t\t\tdef AES_encrypt(text, key, iv):\r\n\t\t\t\tpad = 16 - len(text) % 16\r\n\t\t\t\ttext = text + pad * chr(pad)\r\n\t\t\t\ttext = text.encode(\"utf-8\")\r\n\t\t\t\tencryptor = AES.new(key.encode('utf-8'), AES.MODE_CBC, iv)\r\n\t\t\t\tencrypt_text = encryptor.encrypt(text)\r\n\t\t\t\tencrypt_text = base64.b64encode(encrypt_text)\r\n\t\t\t\treturn encrypt_text.decode('utf-8')\r\n\t\t\tdef asrsea(p1, p2):\r\n\t\t\t\tres = {}\r\n\t\t\t\trand_num = \"OFnV5T4hXEx90wxi\"\r\n\t\t\t\tvi = b\"0102030405060708\"\r\n\t\t\t\th_encText = AES_encrypt(p1, p2, vi)\r\n\t\t\t\th_encText = AES_encrypt(h_encText, rand_num, vi)\r\n\t\t\t\tres[\"encText\"] = h_encText\r\n\t\t\t\tres[\"encSecKey\"] = \"6b2e91bfea2fff78e82f13d16405c8ba0bd54af4076218463931b5ebfdb177f61ee9fe3db8566edb19cc5a5badd0d2cd1435553c6caa40f39e45c35e0957ec67e3ad36e074b6ee0224083b17d96fb734fdc6d11d42ea8d1c71cdd170f9d93dd98c7cb22624e8765bbd93ffc1a98b834bc86d847a229241b8f3750571cf199621\"\r\n\t\t\t\treturn res\r\n\t\t\treq = json.dumps({\r\n\t\t\t\t\"ids\": [song_id.group()],\r\n\t\t\t\t\"br\": 999000,\r\n\t\t\t\t\"csrf_token\": ''\r\n\t\t\t})\r\n\t\t\t#await ctx.send(req)\r\n\t\t\tasrsea_res = asrsea(req, nonce)\r\n\t\t\tparam_data = {\r\n\t\t\t\t\"params\": asrsea_res[\"encText\"],\r\n\t\t\t\t\"encSecKey\": asrsea_res[\"encSecKey\"]\r\n\t\t\t}\r\n\t\t\theaders = {\r\n\t\t\t\t\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:83.0) Gecko/20100101 Firefox/83.0\",\r\n\t\t\t\t\"Content-Type\": \"application/x-www-form-urlencoded\",\r\n\t\t\t\t\"Origin\": \"http://music.163.com\",\r\n\t\t\t\t\"Referer\": \"https://music.163.com\",\r\n\t\t\t\t\"Host\": \"music.163.com\",\r\n\t\t\t\t\"X-Real-IP\": \"27.38.4.87\"\r\n\t\t\t}\r\n\t\t\tu_cookies = await self.config.user_cookies()\r\n\t\t\tcookies = {\"os\": \"ios\"}\r\n\t\t\tcookies.update(u_cookies)\r\n\t\t\tsongapi = 'http://music.163.com/weapi/song/enhance/player/url?csrf_token='\r\n\t\t\tr = requests.post(songapi, headers=headers, data=param_data, verify=False, cookies=cookies)\r\n\t\t\treal_url = re.search(r'http.*\\.((mp3)|(flac))',r.text)\r\n\t\t\tif real_url:\r\n\t\t\t\turl_best = real_url.group()\r\n\t\t\t\tplay = ctx.bot.get_command(\"play\")\r\n\t\t\t\tawait ctx.invoke(play, query = url_best)\r\n\t\t\telse:\r\n\t\t\t\tawait ctx.send(\"Can't get this song. Might need netease music VIP.\")\r\n\t\telse:\r\n\t\t\tawait ctx.send(\"Can't find song id!\")\r\n","repo_name":"MeowingCafe/MeowingCafe-Cogs","sub_path":"wyyyy/wyyyy.py","file_name":"wyyyy.py","file_ext":"py","file_size_in_byte":4030,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"26351469224","text":"from django import forms\nfrom django.utils.translation import ugettext as _\n\nclass NewReviewForm(forms.Form):\n text = forms.CharField(\n label=_('Text'),\n widget=forms.Textarea(attrs={\n 'placeholder': _('Write Your Review about this book.'),\n 'rows': 3,\n 'class': 'span12',\n 'style': 'resize: none;',\n })\n )\n title = forms.CharField(\n label=_('Book'),\n widget=forms.TextInput(attrs={\n 'placeholder': _('Title of Review'),\n 'autocomplete': 'off',\n 'class': 'span10',\n })\n ) \n","repo_name":"s1na/darkoob","sub_path":"darkoob/book/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"43227033741","text":"'''\nGiven n non-negative integers representing an elevation map where the width of each bar is 1, compute how much water it is able to trap after raining.\n\nFor example,\nGiven [0,1,0,2,1,0,1,3,2,1,2,1], return 6.\n\n\nThe above elevation map is represented by array [0,1,0,2,1,0,1,3,2,1,2,1]. In this case, 6 units of rain water (blue section) are being trapped. Thanks Marcos for contributing this image!\n'''\n\nclass Solution(object):\n def trap(self, height):\n \"\"\"\n :type height: List[int]\n :rtype: int\n \"\"\"\n n = len(height)\n if n == 0:\n return 0\n\n lo, hi = 0, n - 1\n sum_left = sum_right = sum_temp = 0\n h_left, h_right = height[0], height[-1]\n while lo <= hi:\n if h_left < h_right:\n while lo <= hi:\n lo += 1\n if height[lo] >= h_left:\n h_left = height[lo]\n sum_left += sum_temp\n sum_temp = 0\n break\n else:\n sum_temp += h_left - height[lo]\n else:\n while lo <= hi:\n hi -= 1\n if height[hi] >= h_right:\n h_right = height[hi]\n sum_right += sum_temp\n sum_temp = 0\n break\n else:\n sum_temp += h_right - height[hi]\n return sum_left + sum_right\n\n\nif __name__ == '__main__':\n assert Solution().trap([0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1]) == 6\n","repo_name":"wufangjie/leetcode","sub_path":"042. Trapping Rain Water.py","file_name":"042. Trapping Rain Water.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"8398500846","text":"def solution(keyinput, board):\n answer = [0, 0]\n max_x, max_y = board[0] // 2, board[1] // 2\n for i in keyinput:\n if i == 'up' and answer[1] < max_y:\n answer[1] += 1\n elif i == 'right' and answer[0] < max_x:\n answer[0] += 1\n elif i == 'down' and answer[1] > -max_y:\n answer[1] -= 1\n elif i == 'left' and answer[0] > -max_x:\n answer[0] -= 1\n return answer","repo_name":"dostiny/algorithms_auto_save","sub_path":"프로그래머스/lv0/120861. 캐릭터의 좌표/캐릭터의 좌표.py","file_name":"캐릭터의 좌표.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"70675045201","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import String,Float64\n\ndef callback(data):\n print(\"Valor\",data.data)\n if(data.data<8):\n pub = rospy.Publisher('piscar_led', String, queue_size=10)\n pub.publish('liga')\n\ndef listener():\n rospy.init_node('controller', anonymous=True)\n rospy.Subscriber('distance', Float64, callback)\n rospy.spin()\n\ndef talker():\n pub = rospy.Publisher('chatter', String, queue_size=10)\n rospy.init_node('talker', anonymous=True)\n rate = rospy.Rate(1)\n while not rospy.is_shutdown():\n hello_str = \"Enviei %s\" % rospy.get_time()\n rospy.loginfo(hello_str)\n pub.publish(hello_str)\n rospy.Subscriber(\"chatter\", String, callback)\n rate.sleep()\n\n\nif __name__ == '__main__':\n try:\n listener()\n except rospy.ROSInterruptException:\n pass\n\n\n\n","repo_name":"joselitolima21/tutoriais-labiras","sub_path":"ws/src/tutoriais_labiras/src/alerta.py","file_name":"alerta.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"3597805702","text":"import argparse\nimport logging\n\nlogger = logging.getLogger(__name__)\n\nimport pdo.service_client.service_data.eservice as eservice_db\n\n__all__ = ['command_eservice_db']\n\n## -----------------------------------------------------------------\n## -----------------------------------------------------------------\ndef command_eservice_db(state, bindings, pargs) :\n \"\"\"controller command to manage the enclave service database\n \"\"\"\n\n parser = argparse.ArgumentParser(prog='eservice')\n\n subparsers = parser.add_subparsers(dest='command')\n\n add_parser = subparsers.add_parser('add', description='add an eservice to the database')\n add_parser.add_argument('--url', help='URL for the enclave service to add', type=str, required=True)\n add_parser.add_argument('--name', help='Short name for the enclave service', type=str, required=True)\n\n clear_parser = subparsers.add_parser('clear', description='remove all eservices in the database')\n list_parser = subparsers.add_parser('list', description='list eservices in the database')\n\n load_parser = subparsers.add_parser('load', description='load an eservice database')\n load_parser.add_argument('--database', help='Name of the eservice database to use', type=str, required=True)\n merge_group = load_parser.add_mutually_exclusive_group(required=False)\n merge_group.add_argument('--merge', help='Merge new database with current db', dest='merge', action='store_true')\n merge_group.add_argument('--no-merge', help='Overwrite current db with new database', dest='merge', action='store_false')\n load_parser.set_defaults(merge=False)\n\n remove_parser = subparsers.add_parser('remove', description='remove eservice from the database')\n remove_group = remove_parser.add_mutually_exclusive_group(required=True)\n remove_group.add_argument('--name', help='Short name for enclave service to remove', type=str)\n\n save_parser = subparsers.add_parser('save', description='save the current eservice database')\n save_parser.add_argument('--database', help='Name of the eservice database to use', type=str, required=True)\n\n options = parser.parse_args(pargs)\n\n default_database = state.get(['Service', 'EnclaveServiceDatabaseFile'])\n ledger_config = state.get(['Ledger'])\n\n if options.command == 'add' :\n if not eservice_db.add_by_url(ledger_config, options.url, name=options.name, update=True) :\n raise Exception('failed to add eservice {0} to the database'.format(options.name))\n return\n\n if options.command == 'clear' :\n eservice_db.clear_all_data()\n return\n\n if options.command == 'list' :\n enclave_names = list(eservice_db.get_enclave_names())\n enclave_names.sort()\n\n for enclave_name in enclave_names :\n enclave_info = eservice_db.get_by_name(enclave_name)\n enclave_short_id = _hashed_identity_(enclave_info.enclave_id)\n print(\"{0:<18} {1:<18} {2}\".format(enclave_name, enclave_short_id, enclave_info.url))\n\n if options.command == 'load' :\n eservice_db.load_database(options.database, options.merge)\n return\n\n if options.command == 'remove' :\n eservice_db.remove_by_name(name=options.name)\n return\n\n if options.command == 'save' :\n eservice_db.save_database(options.database, True)\n return\n\n raise Exception('unknown subcommand')\n","repo_name":"Yeuman/project4","sub_path":"common/crypto/pdo/client/pdo/client/controller/commands/eservice_db.py","file_name":"eservice_db.py","file_ext":"py","file_size_in_byte":3376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"2076866183","text":"from .model_init import connect_to_DB\n\nconnection = connect_to_DB()\n\n\ndef get_all():\n with connection.cursor() as cursor:\n query = \"SELECT * FROM Comments \"\n cursor.execute(query)\n result = cursor.fetchall()\n return result\n","repo_name":"Tzofia-Asherov/full_stack_learn_online_project","sub_path":"model/comments_model.py","file_name":"comments_model.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"26227331176","text":"'''\n[ 문제 ] : 안테나 (https://www.acmicpc.net/problem/18310)\n\n[ 문제 풀이 ]\n1) 안테나는 집이 위치한 곳 중 한 곳에서만 설치할 수 있고, 모든 집 사이의 거리가 최소가 되어야 한다.\n2) 따라서 모든 집의 위치 정보를 입력받고, 중앙값 출력\n\n[ Concept ]\n\n'''\nn = int(input())\ndata = list(map(int, input().split()))\ndata.sort()\nprint(data[(n-1)//2])","repo_name":"leejinlee-kr/Algorithm","sub_path":"Sort/안테나.py","file_name":"안테나.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"30514583162","text":"from typing import Dict\n\nfrom mmengine.model import BaseDataPreprocessor, ModuleDict\n\nfrom mmaction.registry import MODELS\n\n\n@MODELS.register_module()\nclass MultiModalDataPreprocessor(BaseDataPreprocessor):\n \"\"\"Multi-Modal data pre-processor for action recognition tasks.\"\"\"\n\n def __init__(self, preprocessors: Dict) -> None:\n super().__init__()\n self.preprocessors = ModuleDict()\n for name, pre_cfg in preprocessors.items():\n assert 'type' in pre_cfg, (\n 'Each data preprocessor should contain the key type, '\n f'but got {pre_cfg}')\n self.preprocessors[name] = MODELS.build(pre_cfg)\n\n def forward(self, data: Dict, training: bool = False) -> Dict:\n \"\"\"Preprocesses the data into the model input format.\n\n Args:\n data (dict): Data returned by dataloader.\n training (bool): Whether to enable training time augmentation.\n\n Returns:\n dict: Data in the same format as the model input.\n \"\"\"\n data = self.cast_data(data)\n inputs, data_samples = data['inputs'], data['data_samples']\n for modality, modality_data in inputs.items():\n preprocessor = self.preprocessors[modality]\n modality_data, data_samples = preprocessor.preprocess(\n modality_data, data_samples, training)\n inputs[modality] = modality_data\n\n data['inputs'] = inputs\n data['data_samples'] = data_samples\n return data\n","repo_name":"open-mmlab/mmaction2","sub_path":"mmaction/models/data_preprocessors/multimodal_data_preprocessor.py","file_name":"multimodal_data_preprocessor.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":3560,"dataset":"github-code","pt":"3"}
+{"seq_id":"28987029162","text":"from textwrap import dedent\nimport numpy as np \nimport re \nimport sys \n\n\n'''\n Owais A. SHahzada \n 02/19/2019\n Objective: Build the Needleman-Wunsch Algorithm \n Description: Needleman-Wunsch is an algorithm to align \n nucleotide or protein sequences. \n \n'''\n\n#reverse sequences function\ndef reverse_sequence_B(alignB):\n reversed_B = alignB[::-1]\n\n return reversed_B\n\ndef reverse_sequence_A(alignA):\n reversed_A = alignA[::-1]\n \n return reversed_A\n\n#Traceback function \ndef traceback(seq1,seq2, matrix, score_table, gap,score1):\n\n i = len(seq1)\n j = len(seq2)\n\n alignA = \"\"\n alignB = \"\"\n\n while i > 0 or j > 0:\n if i > 0 and j > 0 and matrix[i,j] == matrix[i-1,j-1] + score_table[i,j]:\n alignA += seq1[i-1]\n alignB += seq2[j-1]\n i -= 1\n j -= 1\n elif i > 0 and matrix[i,j] == matrix[i-1][j] + gap:\n alignA += seq1[i-1]\n alignB += \"-\"\n i -= 1\n else:\n alignA += \"-\"\n alignB += seq2[j-1]\n j -= 1\n aligned_sequence_output_A = reverse_sequence_A(alignA)\n aligned_sequence_output_B = reverse_sequence_B(alignB)\n \n print(dedent(f\"\"\"\n{matrix}\nMax score: {score1}\n{aligned_sequence_output_A}\n{aligned_sequence_output_B}\n \"\"\"))\n \n# Creates matrix \ndef needleman_matrix(seq1, seq2):\n \n #Penalties \n match = 1\n mismatch = -1\n gap = -2\n\n N,M = len(seq1), len(seq2)\n\n #2d array of zeros \n array_of_zeros = np.zeros((N+1,M+1)) \n matrix = array_of_zeros # Keeps track of all the paths taken\n score_table = np.zeros((N+1,M+1)) # Keeps track of all the scores \n \n for i in range(N+1):\n matrix[i][0] = gap * i\n for j in range(M+1):\n matrix[0][j] = gap * j\n \n for i in range(1,N+1):\n for j in range(1,M+1):\n if seq1[i-1] == seq2[j-1]: # Condtion checked if the nucleotides of the two sequeces match\n score = match\n else:\n score = mismatch \n score1 = matrix[i-1][j-1] + score # Depending on the match or mismatch score1 will store that\n score2 = matrix[i][j-1] + gap # Score2 reports back the score in an upwards direction\n score3 = matrix[i-1][j] + gap # Score3 reprots back a score in the leftward direction\n\n matrix[i,j] = max(score1,score2,score3) #reports back the max score of the three \n score_table[i,j] = score\n traceback(seq1,seq2,matrix,score_table,gap,score1)\n\n\ndef main():\n #Catches the seqeunces and stores into a list\n with open(sys.argv[1], 'r') as fasta: \n text = fasta.read() \n replaced_text = text.replace(\"\\n\", \" \")\n pattern = r\"(\\b[GATCRY\\n]+)\\b\" #Regex pattern\n sequences = re.findall(pattern, replaced_text)\n seq1 = sequences[0] \n seq2 = sequences[1]\n\n needleman_matrix(seq1, seq2)\n \n\nif __name__ == \"__main__\":\n main()\n \n","repo_name":"oshahzada98/Bioinformatics-Projects","sub_path":"Python/Needleman-Wunsch/needleman.py","file_name":"needleman.py","file_ext":"py","file_size_in_byte":2991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"978711415","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom tqdm import trange\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\nfrom transformers import GPT2Config\nfrom transformers import GPT2LMHeadModel, GPT2Tokenizer\nimport math\n\nclass GPT2:\n\n def set_seed(self, seed, n_gpu):\n np.random.seed(seed)\n torch.manual_seed(seed)\n if n_gpu > 0:\n torch.cuda.manual_seed_all(seed)\n\n def _top_k_top_p_filtering(self, logits):\n top_k = 0\n top_p = 0.9\n filter_value=-float('Inf')\n top_k = min(top_k, logits.size(-1)) # Safety check\n if top_k > 0:\n # Remove all tokens with a probability less than the last token of the top-k\n indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]\n logits[indices_to_remove] = filter_value\n\n if top_p > 0.0:\n sorted_logits, sorted_indices = torch.sort(logits, descending=True)\n cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)\n\n # Remove tokens with cumulative probability above the threshold\n sorted_indices_to_remove = cumulative_probs > top_p\n # Shift the indices to the right to keep also the first token above the threshold\n sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()\n sorted_indices_to_remove[..., 0] = 0\n\n # scatter sorted tensors to original indexing\n indices_to_remove = sorted_indices_to_remove.scatter(dim=1, index=sorted_indices, src=sorted_indices_to_remove)\n logits[indices_to_remove] = filter_value\n return logits\n\n def __sample_sequences__(self, model, length, context, num_samples):\n context = torch.tensor(context, dtype=torch.long, device=self.device)\n context = context.unsqueeze(0).repeat(num_samples, 1)\n generated = context\n result = []\n with torch.no_grad():\n for _ in trange(length):\n\n inputs = {'input_ids': generated}\n\n outputs = model(**inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet/CTRL (cached hidden-states)\n next_token_logits = outputs[0][:, -1, :]\n\n # repetition penalty from CTRL (https://arxiv.org/abs/1909.05858)\n for i in range(num_samples):\n for _ in set(generated[i].tolist()):\n next_token_logits[i, _] /= 1.0\n \n filtered_logits = self._top_k_top_p_filtering(next_token_logits)\n if self.temperature == 0: # greedy sampling:\n next_token = torch.argmax(filtered_logits, dim=-1).unsqueeze(-1)\n else:\n next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)\n generated = torch.cat((generated, next_token), dim=1)\n result.append(generated)\n return result\n\n def __init__(self, model_scale=0, dummy=False):\n if not dummy:\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.n_gpu = torch.cuda.device_count()\n self.set_seed(42, self.n_gpu)\n self.num_samples = 1\n if model_scale == 0:\n self.tokenizer = GPT2Tokenizer.from_pretrained(\"distilgpt2\")\n self.model = GPT2LMHeadModel.from_pretrained(\"distilgpt2\")\n elif model_scale == 1:\n self.tokenizer = GPT2Tokenizer.from_pretrained(\"gpt2\")\n self.model = GPT2LMHeadModel.from_pretrained(\"gpt2\")\n elif model_scale == 2:\n self.tokenizer = GPT2Tokenizer.from_pretrained(\"gpt2-medium\")\n self.model = GPT2LMHeadModel.from_pretrained(\"gpt2-medium\")\n elif model_scale == 3:\n self.tokenizer = GPT2Tokenizer.from_pretrained(\"gpt2-large\")\n self.model = GPT2LMHeadModel.from_pretrained(\"gpt2-large\")\n else:\n self.tokenizer = GPT2Tokenizer.from_pretrained(\"gpt2-xl\")\n self.model = GPT2LMHeadModel.from_pretrained(\"gpt2-xl\")\n\n self.model.to(self.device)\n self.model.eval()\n self.temperature = 1.0\n self.is_dummy = False\n else:\n self.is_dummy = True\n\n def generate_texts(self, prefix, length, num_samples):\n\n if self.is_dummy:\n return [\"This is a dummy text.\"]\n\n context_tokens = self.tokenizer.encode(prefix, add_special_tokens=False)\n\n out = self.__sample_sequences__(model=self.model, context=context_tokens, length=length, num_samples=num_samples)\n\n for t in out:\n t = t[:, len(context_tokens):].tolist()\n result = []\n for o in t:\n text = self.tokenizer.decode(o, clean_up_tokenization_spaces=True)\n result.append(text)\n\n return result\n\n def generate_text(self, prefix, length):\n texts = self.generate_texts(prefix, length, 1)\n if (len(texts) > 0):\n return texts[0]\n else:\n return \"\"\n\n # smaller result is more probable\n def score_probability(self, sentence):\n # https://github.com/huggingface/transformers/issues/1009\n \"\"\"tokenize_input = self.tokenizer.tokenize(sentence)\n tensor_input = torch.tensor([ [self.tokenizer.eos_token_id] + self.tokenizer.convert_tokens_to_ids(tokenize_input)])\n tensor_input = tensor_input.to(self.device)\n with torch.no_grad():\n outputs = self.model(tensor_input, labels=tensor_input)\n _, logits = outputs[:2] # first parameter is loss\n\n lp = 0.0\n for i in range(len(tokenize_input)):\n masked_index = i\n predicted_score = logits[0, masked_index].cpu()\n #predicted_prob = F.softmax(np.array(predicted_score))\n predicted_prob = F.softmax(predicted_score)\n predicted_prob = np.array(predicted_prob)\n lp += np.log(predicted_prob[self.tokenizer.convert_tokens_to_ids([tokenize_input[i]])[0]])\n return lp \"\"\"\n\n # https://github.com/huggingface/transformers/issues/473\n tokenize_input = self.tokenizer.tokenize(sentence)\n tensor_input = torch.tensor([self.tokenizer.convert_tokens_to_ids(tokenize_input)])\n tensor_input = tensor_input.to(self.device)\n outputs = self.model(tensor_input, labels=tensor_input)\n loss, _ = outputs[:2]\n return math.exp(loss)","repo_name":"padmalcom/InteractiveStorytelling","sub_path":"legacy/gpt2_old.py","file_name":"gpt2_old.py","file_ext":"py","file_size_in_byte":6588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"21253946197","text":"from groupproject3_finalversion import *\r\n\r\nmeasurements_from_UHF, measurements_from_dates = read_data(\"air_quality.csv\")\r\nmeasurements_from_zipcodes, measurements_from_boroughs = read_uhf(\"uhf.csv\")\r\n\r\ndef new_york_city_pollution(zipcode):\r\n nyc_uhf = measurements_from_zipcodes[zipcode]\r\n nyc_uhf = str(nyc_uhf[0])\r\n pollution_indexes = []\r\n nyc_uhf_pollution = measurements_from_UHF[nyc_uhf]\r\n\r\n for instance in nyc_uhf_pollution:\r\n pollution_indexes.append(instance[3])\r\n\r\n highest_pollution = max(pollution_indexes)\r\n lowest_pollution = min(pollution_indexes)\r\n\r\n text = f\"Highest pollution index in zipcode {zipcode} was {highest_pollution} and the lowest pollution index was {lowest_pollution}\"\r\n return text\r\n\r\ndef uhf_worst_pollution(year):\r\n year_users_data = year[2:]\r\n uhf_measurements = measurements_from_UHF.values()\r\n worst_pollution_index = 0\r\n most_polluted_uhf = 0\r\n for data_list in uhf_measurements:\r\n for measurement_tuple in data_list:\r\n date = measurement_tuple[2]\r\n year_from_date = date[len(date)-2:len(date)]\r\n if(year_users_data == year_from_date and measurement_tuple[3] > worst_pollution_index):\r\n worst_pollution_index = measurement_tuple[3]\r\n most_polluted_uhf = measurement_tuple[0]\r\n\r\n text = f\"{most_polluted_uhf} UHF had the worst pollution with the pollution index of {worst_pollution_index} in the year of {year}\"\r\n return text\r\n\r\ndef average_pollution(borough, year):\r\n borough_uhfs = measurements_from_boroughs[borough]\r\n year_users_data = year[2:]\r\n sum_pollution_indexes = 0\r\n counter = 0\r\n for uhf_id in borough_uhfs:\r\n uhf_id = str(uhf_id)\r\n measurement_tuples = measurements_from_UHF[uhf_id]\r\n for measurement_tuple in measurement_tuples:\r\n date = measurement_tuple[2]\r\n year_from_date = date[len(date)-2:len(date)]\r\n if(year_users_data == year_from_date):\r\n sum_pollution_indexes += measurement_tuple[3]\r\n counter += 1\r\n average = sum_pollution_indexes/counter\r\n return average\r\n\r\ndef most_polluted_borough_nyc(user_input_year):\r\n most_polluted_borough = \"\"\r\n most_polluted_borough_pollution_index = 0\r\n borough_names = [\"Manhattan\", \"Queens\", \"Brooklyn\", \"Bronx\", \"StatenIsland\"]\r\n\r\n for nyc_borough in borough_names:\r\n average_borough_pollution_index = average_pollution(nyc_borough,user_input_year)\r\n if(average_borough_pollution_index > most_polluted_borough_pollution_index):\r\n most_polluted_borough_pollution_index = average_borough_pollution_index\r\n most_polluted_borough = nyc_borough\r\n \r\n text2 = f\"The most polluted borough in {user_input_year} was {most_polluted_borough} with average pollution index of {most_polluted_borough_pollution_index:.2f}\"\r\n print(text2)\r\n return most_polluted_borough\r\n\r\ndef index_lower_10(measurements_from_UHF):\r\n uhf_lower_10 = set()\r\n uhf_measures = measurements_from_UHF.values()\r\n for data_set in uhf_measures:\r\n for measurement_tuple in data_set:\r\n uhf_pollution_index = measurement_tuple[3]\r\n if(measurement_tuple[2] == \"6/1/09\" and uhf_pollution_index < 10):\r\n uhf_lower_10.add(measurement_tuple[0])\r\n return uhf_lower_10\r\n\r\ndef main():\r\n zip_codes_nyc = measurements_from_zipcodes.keys()\r\n user_input_zipcode = \"\"\r\n while(user_input_zipcode not in zip_codes_nyc):\r\n user_input_zipcode = input(\"Enter a zip code: \")\r\n highest_lowest_pollution = new_york_city_pollution(user_input_zipcode)\r\n print(highest_lowest_pollution)\r\n user_input_year = 0\r\n while(user_input_year!=\"quit\"):\r\n user_input_year = int(input(\"Enter a year from 2009 to 2019 or type quit to go to the next prompt: \"))\r\n if(user_input_year >= 2009 and user_input_year<=2019):\r\n user_input_year = str(user_input_year)\r\n print(uhf_worst_pollution(user_input_year))\r\n break\r\n else:\r\n print(\"There is no data for the selected year. Try again\")\r\n\r\n user_input_borough = \"\"\r\n borough_names = [\"Manhattan\", \"Queens\", \"Brooklyn\", \"Bronx\", \"StatenIsland\"]\r\n while(user_input_borough not in borough_names):\r\n user_input_borough = input(\"Type in the name of one of the following boroughs: Manhattan, Queens, Brooklyn, Bronx, StatenIsland: \")\r\n\r\n average_pollution_in_area = average_pollution(user_input_borough, user_input_year)\r\n avg = f\"Average pollution index in {user_input_borough} in {user_input_year} is {average_pollution_in_area:.2f}\"\r\n print(avg)\r\n\r\n print(\"Which borough was by average the most polluted in \", user_input_year, \"?\")\r\n nyc_borough = most_polluted_borough_nyc(user_input_year)\r\n\r\n print(\"Which UHFs had a pollution index lower than 10 on 6/1/09?\")\r\n \r\n index_lower_than_10 = index_lower_10(measurements_from_UHF)\r\n print(index_lower_than_10)\r\n\r\nif __name__ == \"__main__\": \r\n main()","repo_name":"sandrazelen/Air-Pollution-Analysis","sub_path":"Zelen_air_pollution_additional_analysis.py","file_name":"Zelen_air_pollution_additional_analysis.py","file_ext":"py","file_size_in_byte":5035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"28674795324","text":"name=(input(\"Enter Your Name: \"))\r\n\r\ndef Fibo_series():\r\n num=int(input(\"Enter the Number for Fibonacci Series: \"))\r\n first=0\r\n new=1\r\n while new Tuple[str, int]:\r\n end_idx = s.find(delimiter, start_idx)\r\n if end_idx == -1:\r\n return (s[start_idx:], -1)\r\n if inclusive:\r\n end_idx += len(delimiter)\r\n return (s[start_idx:end_idx], end_idx)\r\n\r\n\r\ndef read_to_regex_match(s: str, start_idx: int, regex: str) -> Tuple[str, str, int]:\r\n match = re.search(regex, s[start_idx:], re.M)\r\n if not match:\r\n return (s[start_idx:], None, -1)\r\n start_match_idx = start_idx + match.span()[0]\r\n end_idx = start_idx + match.span()[1]\r\n return (s[start_idx:start_match_idx], match.group(), end_idx)\r\n\r\n\r\ndef skip_optional(s: str, start_idx: int, text: str) -> int:\r\n if s[start_idx:].startswith(text):\r\n return start_idx + len(text)\r\n return start_idx\r\n\r\n\r\ndef skip_optional_regex(s: str, start_idx: int, regex: str) -> int:\r\n match = re.match(regex, s[start_idx:], re.M)\r\n if not match:\r\n return start_idx\r\n return start_idx + match.span()[1] - match.span()[0]\r\n\r\n\r\ndef sql_to_columns() -> Dict[str, List[str]]:\r\n \"\"\"\r\n Pulls the latest table structures from the MusicBrainz GitHub and returns the columns for each table.\r\n\r\n :return: tables_to_columns\r\n \"\"\"\r\n sql_commented = requests.get(\r\n 'https://raw.githubusercontent.com/metabrainz/musicbrainz-server/master/admin/sql/CreateTables.sql').text\r\n sql_lines = sql_commented.split('\\n')\r\n for i in range(len(sql_lines)):\r\n sql_lines[i] = read_to_regex_match(sql_lines[i], 0, r'(--|$)')[0]\r\n sql = '\\n'.join(sql_lines)\r\n ignore_col_names = ['CONSTRAINT', 'INDEX', 'KEY', 'UNIQUE', 'PRIMARY', 'FULLTEXT', 'SPATIAL', 'CHECK']\r\n\r\n tables_to_columns = {}\r\n start_idx = 0\r\n nesting = {'CREATE': 0, 'parens': 0}\r\n current_table = None\r\n current_token_id = 0\r\n next_token_id = 0\r\n while start_idx >= 0:\r\n if nesting['CREATE'] == 0:\r\n _discard_, start_idx = read_to_delimiter(sql, start_idx, 'CREATE TABLE ', inclusive=True)\r\n if start_idx >= 0:\r\n start_idx = skip_optional(sql, start_idx, 'IF NOT EXISTS ')\r\n current_table, _discard_, start_idx = read_to_regex_match(sql, start_idx, r'[ \\t\\n]+')\r\n _discard_, _discard2_, start_idx = read_to_regex_match(sql, start_idx, r'[(]')\r\n tables_to_columns[current_table] = []\r\n nesting['CREATE'] = 1\r\n else:\r\n if nesting['parens'] == 0:\r\n start_idx = skip_optional_regex(sql, start_idx, r'[ \\t\\n]+')\r\n token, boundary, start_idx = read_to_regex_match(sql, start_idx, r'[(,)]')\r\n if next_token_id == current_token_id:\r\n next_token_id += 1\r\n col_name, _discard_, _discard2_ = read_to_regex_match(token, 0, r'[ \\t\\n]+')\r\n if col_name not in ignore_col_names:\r\n tables_to_columns[current_table].append(col_name)\r\n if boundary == '(':\r\n nesting['parens'] = 1\r\n elif boundary == ',':\r\n current_token_id += 1\r\n elif boundary == ')':\r\n current_token_id += 1\r\n nesting['CREATE'] = 0\r\n else:\r\n _discard_, boundary, start_idx = read_to_regex_match(sql, start_idx, r'[()]')\r\n if boundary == '(':\r\n nesting['parens'] += 1\r\n elif boundary == ')':\r\n nesting['parens'] -= 1\r\n return tables_to_columns\r\n","repo_name":"dylanburati/mbz-to-csv","sub_path":"sql_to_columns.py","file_name":"sql_to_columns.py","file_ext":"py","file_size_in_byte":3678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"923706294","text":"import time\n\nAvacadobear = Actor(\"ab1\")\nAvacadobear.pos = 100, 50\n\nWIDTH = 500\nHEIGHT = Avacadobear.height + 20\n\ndef draw():\n screen.clear()\n screen.fill((255, 255, 255))\n Avacadobear.draw()\n\ndef update():\n Avacadobear.left = Avacadobear.left + 2\n if Avacadobear.left > WIDTH:\n Avacadobear.right = 0\n\ndef on_mouse_down(pos):\n if Avacadobear.collidepoint(pos):\n print(\"Eek!\")\n Avacadobear.image = 'ab2'\n time.sleep(0.5)\n Avacadobear.image = 'ab1'\n else:\n print(\"You missed me!\")\n","repo_name":"RaisaAhsan/game","sub_path":"pygame/click_game.py","file_name":"click_game.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"4010591670","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 20 08:26:52 2019\r\n\r\n@author: HLB\r\n\"\"\"\r\n\r\nimport scipy.io as sio\r\nimport numpy as np\r\nfrom sklearn.decomposition import PCA\r\nfrom build_EMP import build_emp \r\n\r\ndef pca_whitening(image, number_of_pc):\r\n\r\n shape = image.shape\r\n \r\n image = np.reshape(image, [shape[0]*shape[1], shape[2]])\r\n number_of_rows = shape[0]\r\n number_of_columns = shape[1]\r\n pca = PCA(n_components = number_of_pc)\r\n image = pca.fit_transform(image)\r\n pc_images = np.zeros(shape=(number_of_rows, number_of_columns, number_of_pc),dtype=np.float32)\r\n for i in range(number_of_pc):\r\n pc_images[:, :, i] = np.reshape(image[:, i], (number_of_rows, number_of_columns))\r\n \r\n return pc_images\r\n\r\ndef load_data(dataset):\r\n if dataset == 'Indian':\r\n image_file = r'.\\datasets/Indian\\indian_pines_corrected.mat'\r\n label_file = r'.\\datasets/Indian\\Indian_pines_gt.mat'\r\n image_data = sio.loadmat(image_file)\r\n label_data = sio.loadmat(label_file)\r\n image = image_data['indian_pines_corrected']\r\n label = label_data['indian_pines_gt']\r\n elif dataset == 'Pavia':\r\n image_file = r'.\\datasets\\Pavia\\Pavia.mat'\r\n label_file = r'.\\datasets\\Pavia\\Pavia_groundtruth.mat'\r\n image_data = sio.loadmat(image_file)\r\n label_data = sio.loadmat(label_file) \r\n image = image_data['paviaU']#pavia1\r\n label = label_data['paviaU_gt']#pavia1\r\n elif dataset == 'CASI':\r\n image_file = r'.\\datasets\\Houston\\CASI.mat'\r\n label_file = r'.\\datasets\\Houston\\CASI_gnd_flag.mat'\r\n image_data = sio.loadmat(image_file)\r\n label_data = sio.loadmat(label_file) \r\n image = image_data['CASI']\r\n label = label_data['gnd_flag'] # houston \r\n else:\r\n raise Exception('dataset does not find')\r\n image = image.astype(np.float32)\r\n \r\n return image, label\r\n \r\n\r\ndef readdata(type, dataset, windowsize, train_num, val_num, num):\r\n\r\n or_image, or_label = load_data(dataset)\r\n # image = np.expand_dims(image, 2)\r\n halfsize = int((windowsize-1)/2)\r\n number_class = np.max(or_label)\r\n \r\n image = np.pad(or_image, ((halfsize, halfsize), (halfsize, halfsize), (0, 0)), 'edge')\r\n label = np.pad(or_label, ((halfsize, halfsize), (halfsize, halfsize)), 'constant',constant_values=0)\r\n \r\n if type == 'PCA':\r\n image1 = pca_whitening(image, number_of_pc = 3)\r\n elif type == 'EMP':\r\n image1 = pca_whitening(image, number_of_pc = 4)\r\n num_openings_closings = 3\r\n emp_image = build_emp(base_image=image1, num_openings_closings=num_openings_closings)\r\n image1 = emp_image\r\n elif type == 'none':\r\n image1 = np.copy(image)\r\n else:\r\n raise Exception('type does not find')\r\n image = (image1 - np.min(image1)) / (np.max(image1) - np.min(image1)) \r\n #set the manner of selecting training samples \r\n \r\n \r\n n = np.zeros(number_class,dtype=np.int64)\r\n for i in range(number_class):\r\n temprow, tempcol = np.where(label == i + 1)\r\n n[i] = len(temprow) \r\n total_num = np.sum(n)\r\n \r\n nTrain_perClass = np.ones(number_class,dtype=np.int64) * train_num\r\n for i in range(number_class):\r\n if n[i] <= nTrain_perClass[i]: \r\n nTrain_perClass[i] = 15 \r\n ###\r\n nValidation_perClass = (n/total_num)*val_num\r\n nvalid_perClass = nValidation_perClass.astype(np.int32) \r\n \r\n index = []\r\n flag = 0\r\n fl = 0\r\n\r\n \r\n bands = np.size(image,2) \r\n validation_image = np.zeros([np.sum(nvalid_perClass), windowsize, windowsize, bands], dtype=np.float32)\r\n validation_label = np.zeros(np.sum(nvalid_perClass), dtype=np.int64)\r\n train_image = np.zeros([np.sum(nTrain_perClass), windowsize, windowsize, bands], dtype=np.float32)\r\n train_label = np.zeros(np.sum(nTrain_perClass),dtype=np.int64)\r\n train_index = np.zeros([np.sum(nTrain_perClass), 2], dtype = np.int32) \r\n val_index = np.zeros([np.sum(nvalid_perClass), 2], dtype = np.int32) \r\n \r\n for i in range(number_class): \r\n temprow, tempcol = np.where(label == i + 1)\r\n matrix = np.zeros([len(temprow),2], dtype=np.int64)\r\n matrix[:,0] = temprow\r\n matrix[:,1] = tempcol\r\n np.random.seed(num)\r\n np.random.shuffle(matrix)\r\n \r\n temprow = matrix[:,0]\r\n tempcol = matrix[:,1] \r\n index.append(matrix)\r\n\r\n for j in range(nTrain_perClass[i]):\r\n train_image[flag + j, :, :, :] = image[(temprow[j] - halfsize):(temprow[j] + halfsize + 1),\r\n (tempcol[j] - halfsize):(tempcol[j] + halfsize + 1)]\r\n train_label[flag + j] = i\r\n train_index[flag + j] = matrix[j,:]\r\n flag = flag + nTrain_perClass[i]\r\n\r\n for j in range(nTrain_perClass[i], nTrain_perClass[i] + nvalid_perClass[i]):\r\n validation_image[fl + j-nTrain_perClass[i], :, :,:] = image[(temprow[j] - halfsize):(temprow[j] + halfsize + 1),\r\n (tempcol[j] - halfsize):(tempcol[j] + halfsize + 1)]\r\n validation_label[fl + j-nTrain_perClass[i] ] = i \r\n val_index[fl + j-nTrain_perClass[i]] = matrix[j,:]\r\n fl =fl + nvalid_perClass[i]\r\n \r\n\r\n return train_image, train_label, validation_image, validation_label,nTrain_perClass, nvalid_perClass,train_index, val_index, index, image, label,total_num\r\n","repo_name":"Candy-CY/Hyperspectral-Image-Classification-Models","sub_path":"SSMTR/data_read.py","file_name":"data_read.py","file_ext":"py","file_size_in_byte":5526,"program_lang":"python","lang":"en","doc_type":"code","stars":237,"dataset":"github-code","pt":"3"}
+{"seq_id":"15044506214","text":"from tkinter import * \r\nfrom tkinter.ttk import *\r\nfrom turtle import *\r\nfrom time import *\r\nroot = Tk()\r\n\r\nlost = [\"lil\",\" lol\", \"lel\"]\r\n\r\ntucan = Canvas(root, width= 500, height=500)\r\ntucan.pack()\r\n\r\nt= RawTurtle(tucan)\r\nt.forward(50)\r\n\r\naussuch = Combobox(root, height = 5 , width =15, values=lost)\r\naussuch.pack()\r\naussuch.set(lost[0])\r\n\r\n\"\"\"\r\n\r\n\r\nfor ii in range(10):\r\n for i in lost:\r\n a= i\r\n text = Label(root, text=a)\r\n text.grid(row=1, column=1)\r\n time.sleep(0.5)\r\n root.mainloop\r\n\r\n\"\"\" \r\n\r\nf = Frame(root)\r\nf.pack()\r\n\r\nl1 = Label(f, text=\"123\")\r\nl2 = Label(f, text=\"456\")\r\nll = Label(f, text=\"000\", relief=SUNKEN)\r\n\r\nl1.grid(row=0, column=0)\r\nl2.grid(row=1, column=1)\r\nll.grid(row=2, column=0, columnspan=2,sticky=W+E)\r\n\r\n\r\nroot.mainloop()","repo_name":"Enricone27/Random-Projekte","sub_path":"Python/tkinter tests/tktest3.py","file_name":"tktest3.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"35570795929","text":"\nimport os\n\n# basic\nimport numpy as np\nimport pandas as pd\nfrom sklearn.utils import class_weight\nfrom tqdm import tqdm, trange\nimport time\nimport pprint\nimport datetime\nimport argparse\nfrom scipy.stats import gmean\nimport yaml\nimport shutil\n\n# keras\nfrom keras.optimizers import Adam\nfrom keras.models import load_model\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\n\n# DIY\nimport utils_classif\nfrom feat_ext import load_audio_file, get_mel_spectrogram, modify_file_variable_length\nfrom data import get_label_files, DataGeneratorPatch, PatchGeneratorPerFile\nfrom architectures import get_model_crnn_seld_tagger\nfrom eval import Evaluator\n\nimport csv\nimport sys\nsys.path.append('../')\nfrom parameters import get_params\nfrom compute_doa_metrics import compute_DOA_metrics\nfrom file_utils import write_metadata_result_file, build_result_dict_from_metadata_array, write_output_result_file\n\n\nstart = time.time()\n\nnow = datetime.datetime.now()\nprint(\"Current date and time:\")\nprint(str(now))\n\n# =========================================================================================================\n# =========================================================================================================\n\n# ==================================================================== ARGUMENTS\nparser = argparse.ArgumentParser(description='DCASE2019 Task3')\nparser.add_argument('-p', '--params_yaml',\n dest='params_yaml',\n action='store',\n required=False,\n type=str)\nargs = parser.parse_args()\nprint('\\nYaml file with parameters defining the experiment: %s\\n' % str(args.params_yaml))\n\n\n# =========================================================================Parameters, paths and variables\n# =========================================================================Parameters, paths and variables\n# =========================================================================Parameters, paths and variables\n\n# Read parameters file from yaml passed by argument\nparams = yaml.load(open(args.params_yaml))\nparams_ctrl = params['ctrl']\nparams_extract = params['extract']\nparams_learn = params['learn']\nparams_loss = params['loss']\nparams_recog = params['recognizer']\nparams_crnn = params['crnn']\n\nsuffix_in = params['suffix'].get('in')\nsuffix_out = params['suffix'].get('out')\n\n# determine loss function for stage 1 (or entire training)\nif params_loss.get('type') == 'CCE':\n params_loss['type'] = 'categorical_crossentropy'\nelif params_loss.get('type') == 'MAE':\n params_loss['type'] = 'mean_absolute_error'\n\nparams_extract['audio_len_samples'] = int(params_extract.get('fs') * params_extract.get('audio_len_s'))\n\n# vip to deploy. for public, put directly params_ctrl.gt('dataset_path') within params_path\npath_root_data = params_ctrl.get('dataset_path')\n\nparams_path = {'path_to_features': os.path.join(path_root_data, 'features'),\n # 'featuredir_dev': 'audio_dev_varup1/',\n # 'featuredir_eval': 'audio_eval_varup1/',\n 'featuredir_dev': 'audio_dev_varup2_64mel/',\n 'featuredir_eval': 'audio_eval_varup2_64mel/',\n # 'featuredir_dev_param': 'audio_dev_param_varup2_64mel/',\n # 'featuredir_eval_param': 'audio_eval_param_varup2_64mel/',\n 'featuredir_dev_param': 'audio_dev_param_Q_varup2_64mel/',\n 'featuredir_eval_param': 'audio_eval_param_Q_varup2_64mel/',\n # 'featuredir_dev': 'audio_dev_varup1_64mel/',\n # 'featuredir_eval': 'audio_eval_varup1_64mel/',\n 'path_to_dataset': path_root_data,\n 'audiodir_dev': 'wav/dev/',\n 'audiodir_eval': 'wav/eval/',\n # 'audiodir_dev_param': 'wav/dev_param/',\n # 'audiodir_eval_param': 'wav/eval_param/',\n 'audiodir_dev_param': 'wav/dev_param_Q/',\n 'audiodir_eval_param': 'wav/eval_param_Q/',\n 'audio_shapedir_dev': 'audio_dev_shapes/',\n 'audio_shapedir_eval': 'audio_eval_shapes/',\n # 'audio_shapedir_dev_param': 'audio_dev_param_shapes/',\n # 'audio_shapedir_eval_param': 'audio_eval_param_shapes/',\n 'audio_shapedir_dev_param': 'audio_dev_param_Q_shapes/',\n 'audio_shapedir_eval_param': 'audio_eval_param_Q_shapes/',\n 'gt_files': path_root_data}\n\nif params_extract.get('n_mels') == 40:\n params_path['featuredir_dev'] = 'audio_dev_varup2_40mel/'\n params_path['featuredir_eval'] = 'audio_eval_varup2_40mel/'\n # params_path['featuredir_dev_param'] = 'audio_dev_param_varup2_40mel/'\n # params_path['featuredir_eval_param'] = 'audio_eval_param_varup2_40mel/'\n params_path['featuredir_dev_param'] = 'audio_dev_param_Q_varup2_40mel/'\n params_path['featuredir_eval_param'] = 'audio_eval_param_Q_varup2_40mel/'\nelif params_extract.get('n_mels') == 96:\n params_path['featuredir_dev'] = 'audio_dev_varup2_96mel/'\n params_path['featuredir_eval'] = 'audio_eval_varup2_96mel/'\n # params_path['featuredir_dev_param'] = 'audio_dev_param_varup2_96mel/'\n # params_path['featuredir_eval_param'] = 'audio_eval_param_varup2_96mel/'\n params_path['featuredir_dev_param'] = 'audio_dev_param_Q_varup2_96mel/'\n params_path['featuredir_eval_param'] = 'audio_eval_param_Q_varup2_96mel/'\nelif params_extract.get('n_mels') == 128:\n params_path['featuredir_dev'] = 'audio_dev_varup2_128mel/'\n params_path['featuredir_eval'] = 'audio_eval_varup2_128mel/'\n # params_path['featuredir_dev_param'] = 'audio_dev_param_varup2_128mel/'\n # params_path['featuredir_eval_param'] = 'audio_eval_param_varup2_128mel/'\n params_path['featuredir_dev_param'] = 'audio_dev_param_Q_varup2_128mel/'\n params_path['featuredir_eval_param'] = 'audio_eval_param_Q_varup2_128mel/'\n\nparams_path['featurepath_dev'] = os.path.join(params_path.get('path_to_features'), params_path.get('featuredir_dev'))\nparams_path['featurepath_eval'] = os.path.join(params_path.get('path_to_features'), params_path.get('featuredir_eval'))\nparams_path['featurepath_dev_param'] = os.path.join(params_path.get('path_to_features'), params_path.get('featuredir_dev_param'))\nparams_path['featurepath_eval_param'] = os.path.join(params_path.get('path_to_features'), params_path.get('featuredir_eval_param'))\n\nparams_path['audiopath_dev'] = os.path.join(params_path.get('path_to_dataset'), params_path.get('audiodir_dev'))\nparams_path['audiopath_eval'] = os.path.join(params_path.get('path_to_dataset'), params_path.get('audiodir_eval'))\nparams_path['audiopath_dev_param'] = os.path.join(params_path.get('path_to_dataset'), params_path.get('audiodir_dev_param'))\nparams_path['audiopath_eval_param'] = os.path.join(params_path.get('path_to_dataset'), params_path.get('audiodir_eval_param'))\n\n\nparams_path['audio_shapedir_dev'] = os.path.join(params_path.get('path_to_dataset'),\n params_path.get('audio_shapedir_dev'))\nparams_path['audio_shapedir_eval'] = os.path.join(params_path.get('path_to_dataset'),\n params_path.get('audio_shapedir_eval'))\nparams_path['audio_shapedir_dev_param'] = os.path.join(params_path.get('path_to_dataset'),\n params_path.get('audio_shapedir_dev_param'))\nparams_path['audio_shapedir_eval_param'] = os.path.join(params_path.get('path_to_dataset'),\n params_path.get('audio_shapedir_eval_param'))\n\n\n# ======================================================== SPECIFIC PATHS TO SOME IMPORTANT FILES\n# ground truth, load model, save model, predictions, results\nparams_files = {'gt_eval': os.path.join(params_path.get('gt_files'), 'gt_eval.csv'),\n 'gt_dev': os.path.join(params_path.get('gt_files'), 'gt_dev.csv')}\n\npath_trained_models = utils_classif.make_sure_isdir('trained_models', params_ctrl.get('output_file'))\nparams_files['save_model'] = os.path.join(path_trained_models, params_ctrl.get('output_file') + '_v' + str(params_ctrl.get('count_trial')) + '.h5')\npath_predictions = utils_classif.make_sure_isdir('predictions', params_ctrl.get('output_file'))\nparams_files['predictions'] = os.path.join(path_predictions, params_ctrl.get('output_file') + '_v' + str(params_ctrl.get('count_trial')) + '.csv')\npath_results = utils_classif.make_sure_isdir('logs/results', params_ctrl.get('output_file'))\nparams_files['results'] = os.path.join(path_results, params_ctrl.get('output_file') + '.pickle')\n# params_files['event_durations'] = os.path.join('logs/pics', params_ctrl.get('output_file') + '_event_durations.pickle')\n\n# # ============================================= print all params to keep record in output file\nprint('\\nparams_ctrl=')\npprint.pprint(params_ctrl, width=1, indent=4)\nprint('params_files=')\npprint.pprint(params_files, width=1, indent=4)\nprint('params_extract=')\npprint.pprint(params_extract, width=1, indent=4)\nprint('params_learn=')\npprint.pprint(params_learn, width=1, indent=4)\nprint('params_loss=')\npprint.pprint(params_loss, width=1, indent=4)\nprint('params_recog=')\npprint.pprint(params_recog, width=1, indent=4)\nprint('params_crnn=')\npprint.pprint(params_crnn, width=1, indent=4)\nprint('\\n')\n\n\n# ============================================================== READ TRAIN and TEST DATA\n# ============================================================== READ TRAIN and TEST DATA\n# ============================================================== READ TRAIN and TEST DATA\n# ============================================================== READ TRAIN and TEST DATA\n\n# aim: lists with all wav files for dev, which includes train/val/test\ngt_dev = pd.read_csv(params_files.get('gt_dev'))\nsplitlist_audio_dev = gt_dev.split.values.tolist()\nfilelist_audio_dev = gt_dev.fname.values.tolist()\n\n# create dict with ground truth mapping with labels:\n# -key: path to wav\n# -value: the ground truth label too\nfile_to_label = {params_path.get('audiopath_dev') + k: v for k, v in zip(gt_dev.fname.values, gt_dev.label.values)}\n\n# ========================================================== CREATE VARS FOR DATASET MANAGEMENT\n# list with unique n_classes labels and aso_ids\nlist_labels = sorted(list(set(gt_dev.label.values)))\n\n# create dicts such that key: value is as follows\n# fixed by DCASE\nlabel_to_int = {\n 'clearthroat': 2,\n 'cough': 8,\n 'doorslam': 9,\n 'drawer': 1,\n 'keyboard': 6,\n 'keysDrop': 4,\n 'knock': 0,\n 'laughter': 10,\n 'pageturn': 7,\n 'phone': 3,\n 'speech': 5\n}\nint_to_label = {v: k for k, v in label_to_int.items()}\n\n# create ground truth mapping with categorical values\nfile_to_label_numeric = {k: label_to_int[v] for k, v in file_to_label.items()}\n\n\n#\n# ========================================================== FEATURE EXTRACTION\n# ========================================================== FEATURE EXTRACTION\n# ========================================================== FEATURE EXTRACTION\n# compute T_F representation\n# mel-spectrogram for all files in the dataset and store it\nvar_lens = {item: [] for item in label_to_int.keys()}\nvar_lens['overall'] = []\n\nvar_lens_dev_param = {}\nvar_lens_dev_param['overall'] = []\n\nif params_ctrl.get('feat_ext'):\n if params_ctrl.get('pipeline') == 'T_F':\n n_extracted_dev = 0; n_extracted_te = 0; n_failed_dev = 0; n_failed_te = 0\n n_extracted_dev_param = 0; n_failed_dev_param = 0\n\n # only if features have not been extracted, ie\n # if folder does not exist, or it exists with less than 80% of the feature files\n # create folder and extract features\n nb_files_dev = len(filelist_audio_dev)\n if not os.path.exists(params_path.get('featurepath_dev')) or \\\n len(os.listdir(params_path.get('featurepath_dev'))) < nb_files_dev*0.8:\n\n if os.path.exists(params_path.get('featurepath_dev')):\n shutil.rmtree(params_path.get('featurepath_dev'))\n os.makedirs(params_path.get('featurepath_dev'))\n\n print('\\nFeature extraction for dev set (prints enabled). Features dumped in {}.........................'.\n format(params_path.get('featurepath_dev')))\n for idx, f_name in enumerate(filelist_audio_dev):\n f_path = os.path.join(params_path.get('audiopath_dev'), f_name)\n if os.path.isfile(f_path) and f_name.endswith('.wav'):\n # load entire audio file and modify variable length, if needed\n y = load_audio_file(f_path, input_fixed_length=params_extract['audio_len_samples'], params_extract=params_extract)\n\n # keep record of the lengths, per class, for insight\n duration_seconds = len(y)/int(params_extract.get('fs'))\n var_lens[f_name.split('_')[0]].append(duration_seconds)\n var_lens['overall'].append(duration_seconds)\n\n y = modify_file_variable_length(data=y,\n input_fixed_length=params_extract['audio_len_samples'],\n params_extract=params_extract)\n # print('Considered audio length: %6.3f' % (len(y) / params_extract.get('fs')))\n # print('%-22s: [%d/%d] of %s' % ('Extracting tr features', (idx + 1), nb_files_tr, f_path))\n\n # compute log-scaled mel spec. row x col = time x freq\n mel_spectrogram = get_mel_spectrogram(audio=y, params_extract=params_extract)\n\n # save the T_F rep to a binary file (only the considered length)\n utils_classif.save_tensor(var=mel_spectrogram,\n out_path=os.path.join(params_path.get('featurepath_dev'),\n f_name.replace('.wav', '.data')), suffix='_mel')\n\n # save also label\n utils_classif.save_tensor(var=np.array([file_to_label_numeric[f_path]], dtype=float),\n out_path=os.path.join(params_path.get('featurepath_dev'),\n f_name.replace('.wav', '.data')), suffix='_label')\n\n if os.path.isfile(os.path.join(params_path.get('featurepath_dev'),\n f_name.replace('.wav', suffix_in + '.data'))):\n n_extracted_dev += 1\n print('%-22s: [%d/%d] of %s' % ('Extracted dev features', (idx + 1), nb_files_dev, f_path))\n else:\n n_failed_dev += 1\n print('%-22s: [%d/%d] of %s' % ('FAILING to extract dev features', (idx + 1), nb_files_dev, f_path))\n else:\n print('%-22s: [%d/%d] of %s' % ('this dev audio is in the csv but not in the folder', (idx + 1), nb_files_dev, f_path))\n\n print('n_extracted_dev: {0} / {1}'.format(n_extracted_dev, nb_files_dev))\n print('n_failed_dev: {0} / {1}\\n'.format(n_failed_dev, nb_files_dev))\n\n else:\n print('Dev set is already extracted in {}'.format(params_path.get('featurepath_dev')))\n\n\n # do feature extraction for dev_param (outcome of complete parametric frontend)========================================\n # do feature extraction for dev_param (outcome of complete parametric frontend)========================================\n audio_files_dev_param = [f for f in os.listdir(params_path.get('audiopath_dev_param')) if not f.startswith('.')]\n\n nb_files_dev_param = len(audio_files_dev_param)\n if not os.path.exists(params_path.get('featurepath_dev_param')) or \\\n len(os.listdir(params_path.get('featurepath_dev_param'))) < nb_files_dev_param * 0.8:\n\n if os.path.exists(params_path.get('featurepath_dev_param')):\n shutil.rmtree(params_path.get('featurepath_dev_param'))\n os.makedirs(params_path.get('featurepath_dev_param'))\n\n print(\n '\\nFeature extraction for dev set parametric (outcome of parametric frontend). Features dumped in {}.........................'.\n format(params_path.get('featurepath_dev_param')))\n for idx, f_name in enumerate(audio_files_dev_param):\n f_path = os.path.join(params_path.get('audiopath_dev_param'), f_name)\n if os.path.isfile(f_path) and f_name.endswith('.wav'):\n # load entire audio file and modify variable length, if needed\n y = load_audio_file(f_path, input_fixed_length=params_extract['audio_len_samples'],\n params_extract=params_extract)\n\n # keep record of the lengths, per class, for insight\n duration_seconds = len(y) / int(params_extract.get('fs'))\n var_lens_dev_param['overall'].append(duration_seconds)\n\n y = modify_file_variable_length(data=y,\n input_fixed_length=params_extract['audio_len_samples'],\n params_extract=params_extract)\n # print('Considered audio length: %6.3f' % (len(y) / params_extract.get('fs')))\n # print('%-22s: [%d/%d] of %s' % ('Extracting tr features', (idx + 1), nb_files_tr, f_path))\n\n # compute log-scaled mel spec. row x col = time x freq\n mel_spectrogram = get_mel_spectrogram(audio=y, params_extract=params_extract)\n\n # save the T_F rep to a binary file (only the considered length)\n utils_classif.save_tensor(var=mel_spectrogram,\n out_path=os.path.join(params_path.get('featurepath_dev_param'),\n f_name.replace('.wav', '.data')), suffix='_mel')\n\n if os.path.isfile(os.path.join(params_path.get('featurepath_dev_param'),\n f_name.replace('.wav', suffix_in + '.data'))):\n n_extracted_dev_param += 1\n print('%-22s: [%d/%d] of %s' % ('Extracted dev_param features', (idx + 1), nb_files_dev_param, f_path))\n else:\n n_failed_dev_param += 1\n print('%-22s: [%d/%d] of %s' % (\n 'FAILING to extract dev_param features', (idx + 1), nb_files_dev_param, f_path))\n else:\n print('%-22s: [%d/%d] of %s' % (\n 'this dev_param audio is in the csv but not in the folder', (idx + 1), nb_files_dev_param, f_path))\n\n print('n_extracted_dev_param: {0} / {1}'.format(n_extracted_dev_param, nb_files_dev_param))\n print('n_failed_dev_param: {0} / {1}\\n'.format(n_failed_dev_param, nb_files_dev_param))\n\n else:\n print('Dev_param set is already extracted in {}'.format(params_path.get('featurepath_dev_param')))\n\n\n# select the subset of training data to consider: all, clean, noisy, noisy_small\n# =====================================================================================================================\n# =====================================================================================================================\n\nff_list_dev = [filelist_audio_dev[i].replace('.wav', suffix_in + '.data') for i in range(len(filelist_audio_dev))]\nlabels_audio_dev = get_label_files(filelist=ff_list_dev,\n dire=params_path.get('featurepath_dev'),\n suffix_in=suffix_in,\n suffix_out=suffix_out\n )\n\nprint('Number of clips considered as dev set: {0}'.format(len(ff_list_dev)))\nprint('Number of labels loaded for dev set: {0}'.format(len(labels_audio_dev)))\n\nscalers = [None]*4\n# determine the validation setup according to the folds, and perform training / val / test for each fold\nfor kfo in range(1, 5):\n print('\\n=========================================================================================================')\n print('===Processing fold {} within the x-val setup...'.format(kfo))\n print('=========================================================================================================\\n')\n # x-val setup given by DCASE organizers\n if kfo == 1:\n splits_tr = [3, 4]\n splits_val = [2]\n splits_te = [1]\n elif kfo == 2:\n splits_tr = [4, 1]\n splits_val = [3]\n splits_te = [2]\n elif kfo == 3:\n splits_tr = [1, 2]\n splits_val = [4]\n splits_te = [3]\n elif kfo == 4:\n splits_tr = [2, 3]\n splits_val = [1]\n splits_te = [4]\n\n params_ctrl['current_fold'] = kfo\n tr_files0 = [fname for idx, fname in enumerate(ff_list_dev) if splitlist_audio_dev[idx] == splits_tr[0]]\n tr_files1 = [fname for idx, fname in enumerate(ff_list_dev) if splitlist_audio_dev[idx] == splits_tr[1]]\n tr_files = tr_files0 + tr_files1\n val_files = [fname for idx, fname in enumerate(ff_list_dev) if splitlist_audio_dev[idx] == splits_val[0]]\n te_files = [fname for idx, fname in enumerate(ff_list_dev) if splitlist_audio_dev[idx] == splits_te[0]]\n\n # SC\n if len(tr_files) + len(val_files) + len(te_files) != len(ff_list_dev):\n print('ERROR: You messed up in x-val setup for fold: {0}'.format(len(kfo)))\n print('{} is not {}'.format(len(tr_files) + len(val_files) + len(te_files), len(ff_list_dev)))\n\n # ============================================================BATCH GENERATION\n # ============================================================BATCH GENERATION\n\n tr_gen_patch = DataGeneratorPatch(feature_dir=params_path.get('featurepath_dev'),\n file_list=tr_files,\n params_learn=params_learn,\n params_extract=params_extract,\n suffix_in='_mel',\n suffix_out='_label',\n floatx=np.float32\n )\n # to predict later on on dev_param clips\n scalers[kfo-1] = tr_gen_patch.scaler\n\n print(\"Total number of instances *only* for training: %s\" % str(tr_gen_patch.nb_inst_total))\n print(\"Batch_size: %s\" % str(tr_gen_patch.batch_size))\n print(\"Number of iterations (batches) in the training subset: %s\" % str(tr_gen_patch.nb_iterations))\n print(\"\\nShape of training subset: %s\" % str(tr_gen_patch.features.shape))\n print(\"Shape of labels in training subset: %s\" % str(tr_gen_patch.labels.shape))\n\n # compute class_weigths based on the labels generated\n if params_learn.get('mode_class_weight'):\n labels_nice = np.reshape(tr_gen_patch.labels, -1) # remove singleton dimension\n class_weights = class_weight.compute_class_weight('balanced',\n np.unique(labels_nice),\n labels_nice)\n class_weights_dict = dict(enumerate(class_weights))\n else:\n class_weights_dict = None\n\n val_gen_patch = DataGeneratorPatch(feature_dir=params_path.get('featurepath_dev'),\n file_list=val_files,\n params_learn=params_learn,\n params_extract=params_extract,\n suffix_in='_mel',\n suffix_out='_label',\n floatx=np.float32,\n scaler=tr_gen_patch.scaler\n )\n\n print(\"\\nShape of validation subset: %s\" % str(val_gen_patch.features.shape))\n print(\"Shape of labels in validation subset: %s\" % str(val_gen_patch.labels.shape))\n\n # ============================================================DEFINE AND FIT A MODEL\n # ============================================================DEFINE AND FIT A MODEL\n\n tr_loss, val_loss = [0] * params_learn.get('n_epochs'), [0] * params_learn.get('n_epochs')\n # ============================================================\n if params_ctrl.get('learn'):\n if params_learn.get('model') == 'crnn_seld_tagger':\n model = get_model_crnn_seld_tagger(params_crnn=params_crnn, params_learn=params_learn,\n params_extract=params_extract)\n\n if params_learn.get('stages') == 1:\n\n opt = Adam(lr=params_learn.get('lr'))\n model.compile(optimizer=opt, loss=params_loss.get('type'), metrics=['accuracy'])\n model.summary()\n\n # callbacks\n if params_learn.get('early_stop') == \"val_acc\":\n early_stop = EarlyStopping(monitor='val_acc', patience=params_learn.get('patience'), min_delta=0.001, verbose=1)\n elif params_learn.get('early_stop') == \"val_loss\":\n early_stop = EarlyStopping(monitor='val_loss', patience=params_learn.get('patience'), min_delta=0,\n verbose=1)\n\n # save one best model for every fold, as needed for submission\n params_files['save_model'] = os.path.join(path_trained_models, params_ctrl.get('output_file') + '_v' +\n str(params_ctrl.get('count_trial')) + '_f' + str(kfo) + '.h5')\n checkpoint = ModelCheckpoint(params_files.get('save_model'), monitor='val_acc', verbose=1, save_best_only=True)\n\n reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.5, patience=5, verbose=1)\n callback_list = [checkpoint, early_stop, reduce_lr]\n\n hist = model.fit_generator(tr_gen_patch,\n steps_per_epoch=tr_gen_patch.nb_iterations,\n epochs=params_learn.get('n_epochs'),\n validation_data=val_gen_patch,\n validation_steps=val_gen_patch.nb_iterations,\n class_weight=class_weights_dict,\n workers=4,\n verbose=2,\n callbacks=callback_list)\n\n # ==================================================================================================== PREDICT\n # ==================================================================================================== PREDICT\n\n print('\\nCompute predictions on test split, and save them in csv:==============================================\\n')\n\n # to store prediction probabilites\n te_preds = np.empty((len(te_files), params_learn.get('n_classes')))\n\n te_gen_patch = PatchGeneratorPerFile(feature_dir=params_path.get('featurepath_dev'),\n file_list=te_files,\n params_extract=params_extract,\n suffix_in='_mel',\n floatx=np.float32,\n scaler=tr_gen_patch.scaler\n )\n\n for i in trange(len(te_files), miniters=int(len(te_files) / 100), ascii=True, desc=\"Predicting...\"):\n patches_file = te_gen_patch.get_patches_file()\n\n preds_patch_list = model.predict(patches_file).tolist()\n preds_patch = np.array(preds_patch_list)\n\n if params_learn.get('predict_agg') == 'amean':\n preds_file = np.mean(preds_patch, axis=0)\n elif params_recog.get('aggregate') == 'gmean':\n preds_file = gmean(preds_patch, axis=0)\n else:\n print('unkown aggregation method for prediction')\n te_preds[i, :] = preds_file\n\n list_labels = np.array(list_labels)\n pred_label_files_int = np.argmax(te_preds, axis=1)\n pred_labels = [int_to_label[x] for x in pred_label_files_int]\n\n te_files_wav = [f.replace(suffix_in + '.data', '.wav') for f in te_files]\n if not os.path.isfile(params_files.get('predictions')):\n # fold 1: create the predictions file\n pred = pd.DataFrame(te_files_wav, columns=['fname'])\n pred['label'] = pred_labels\n pred['label_int'] = pred_label_files_int\n pred.to_csv(params_files.get('predictions'), index=False)\n del pred\n\n else:\n pred = pd.read_csv(params_files.get('predictions'))\n old_fname = pred.fname.values.tolist()\n old_label = pred.label.values.tolist()\n old_label_int = pred.label_int.values.tolist()\n\n new_pred_fname = old_fname + te_files_wav\n new_pred_label = old_label + pred_labels\n new_pred_label_int = old_label_int + pred_label_files_int.tolist()\n\n del pred\n pred = pd.DataFrame(new_pred_fname, columns=['fname'])\n pred['label'] = new_pred_label\n pred['label_int'] = new_pred_label_int\n pred.to_csv(params_files.get('predictions'), index=False)\n\n # deleter variables from past fold to free memory\n del tr_gen_patch\n del val_gen_patch\n # this model was trained on split X, and no need anymore\n del model\n\n# vip once we are done with all the 4 folds\n# # =================================================================================================== EVAL\n# # =================================================================================================== EVAL\nprint('\\nEvaluate ACC and print score for the cross validation setup============================================\\n')\n\n# init Evaluator object\nevaluator = Evaluator(gt_dev, pred, list_labels, params_ctrl, params_files)\n\nprint('\\n=============================ACCURACY===============================================================')\nprint('=============================ACCURACY===============================================================\\n')\nevaluator.evaluate_acc()\nevaluator.evaluate_acc_classwise()\n\nend = time.time()\nprint('\\n=============================Job finalized, but lacks DCASE metrics========================================\\n')\nprint('\\nTime elapsed for the job: %7.2f hours' % ((end - start) / 3600.0))\nprint('\\n====================================================================================================\\n')\n\n\nprint('\\n====================Starting metrics for challenge with REAL frontend=====================================')\nprint('====================Starting metrics for challenge with REAL frontend=====================================')\nprint('====================Starting metrics for challenge with REAL frontend=====================================\\n')\n\ndata_folder_path = '../data/foa_dev/'\n# Iterate over all audio files from the dev set, some are from split 1234\naudio_files = [f for f in os.listdir(data_folder_path) if not f.startswith('.')]\n\n# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n# Path stuff\n\n# This parameter will define the algorithm type\npreset_string = 'Q'\n\n# Default preset: contains path to folders\nparams = get_params(preset_string)\n\n# Dataset type:\ndataset_type_folder = params['dataset'] + \"_\" + params['mode']\ndataset_preset_folder = dataset_type_folder + '_' + preset_string\n\n# Get folder names before and after classification\ndoa_folder = params['before_classification_folder_name']\nclassif_folder = params['after_classification_folder_name']\n\n# Path to audio folder\ndataset_dir = '../data'\ndata_folder_path = os.path.join(dataset_dir, dataset_type_folder)\n\n# Path to results_metadata folder _before classification_; it should exist\nresults_metadata_doa_folder = os.path.join('.' + params['metadata_result_folder_path'],\n dataset_preset_folder,\n doa_folder)\nif not os.path.exists(results_metadata_doa_folder):\n os.mkdir(results_metadata_doa_folder)\n\n# Path to results_metadata folder _before classification_; create it if necessary\nresults_metadata_classif_folder = os.path.join('.' + params['metadata_result_folder_path'],\n dataset_preset_folder,\n classif_folder)\nif not os.path.exists(results_metadata_classif_folder):\n os.mkdir(results_metadata_classif_folder)\n\n# Path to results_output folder _before classification_; it should exist\nresults_output_doa_folder = os.path.join('.' + params['output_result_folder_path'],\n dataset_preset_folder,\n doa_folder)\nif not os.path.exists(results_output_doa_folder):\n os.mkdir(results_output_doa_folder)\n\n# Path to results_output folder _before classification_; create it if necessary\n# old: this overwrites the several trials\nresults_output_classif_folder = os.path.join('.' + params['output_result_folder_path'],\n dataset_preset_folder,\n classif_folder)\n# create just the folder classif if there is not such thing\nif not os.path.exists(results_output_classif_folder):\n os.mkdir(results_output_classif_folder)\n\n# new: a folder for each trial. This is already what we have to submit for development mode\nresults_output_classif_folder = os.path.join('.' + params['output_result_folder_path'],\n dataset_preset_folder,\n classif_folder,\n params_ctrl.get('output_file') + '_v' + str(params_ctrl.get('count_trial')))\nif not os.path.exists(results_output_classif_folder):\n os.mkdir(results_output_classif_folder)\n\n# load best model for every fold, for submission\nmodel_f1 = load_model(os.path.join(path_trained_models, params_ctrl.get('output_file') + '_v' +\n str(params_ctrl.get('count_trial')) + '_f1.h5'))\nmodel_f2 = load_model(os.path.join(path_trained_models, params_ctrl.get('output_file') + '_v' +\n str(params_ctrl.get('count_trial')) + '_f2.h5'))\nmodel_f3 = load_model(os.path.join(path_trained_models, params_ctrl.get('output_file') + '_v' +\n str(params_ctrl.get('count_trial')) + '_f3.h5'))\nmodel_f4 = load_model(os.path.join(path_trained_models, params_ctrl.get('output_file') + '_v' +\n str(params_ctrl.get('count_trial')) + '_f4.h5'))\n\nsr = 48000\nfor audio_file_name in audio_files:\n\n # Get associated metadata file\n metadata_file_name = os.path.splitext(audio_file_name)[0] + params['metadata_result_file_extension']\n\n # This is our modified metadata result array\n metadata_result_classif_array = []\n\n # Iterate over the associated doa metadata file\n with open(os.path.join(results_metadata_doa_folder, metadata_file_name), 'r') as f:\n reader = csv.reader(f, delimiter=',')\n for i, row in enumerate(reader):\n # Discard the first line (just the column titles)\n if i > 0:\n # Get values for this sound event\n sound_class_string = row[0]\n start_time_seconds = float(row[1])\n end_time_seconds = float(row[2])\n\n # Slice the b_format audio to the corresponding event length\n start_frame = int(np.floor(start_time_seconds * sr))\n end_frame = int(np.ceil(end_time_seconds * sr))\n filename = sound_class_string + '_' + str(start_frame) + '_' + str(end_frame) + '_' + metadata_file_name.split('.')[0] + '.wav'\n curent_split = int(filename.split('_')[3][-1])\n\n # Classify: this will need 4 models for 4 test splits in x-val in development mode + one model for evaluation mode\n te_preds = np.empty((1, params_learn.get('n_classes')))\n\n # only the file under question\n ff_list_dev_param = [filename.replace('.wav', suffix_in + '.data')]\n current_scaler = scalers[curent_split - 1]\n\n te_param_gen_patch = PatchGeneratorPerFile(feature_dir=params_path.get('featurepath_dev_param'),\n file_list=ff_list_dev_param,\n params_extract=params_extract,\n suffix_in='_mel',\n floatx=np.float32,\n scaler=current_scaler\n )\n\n patches_file = te_param_gen_patch.get_patches_file()\n\n # choose model accordingly\n # predicting now on the T_F patch level (not on the wav clip-level)\n if curent_split == 1:\n preds_patch_list = model_f1.predict(patches_file).tolist()\n elif curent_split == 2:\n preds_patch_list = model_f2.predict(patches_file).tolist()\n elif curent_split == 3:\n preds_patch_list = model_f3.predict(patches_file).tolist()\n elif curent_split == 4:\n preds_patch_list = model_f4.predict(patches_file).tolist()\n\n preds_patch = np.array(preds_patch_list)\n\n # aggregate softmax values across patches in order to produce predictions on the file/clip level\n if params_learn.get('predict_agg') == 'amean':\n preds_file = np.mean(preds_patch, axis=0)\n elif params_recog.get('aggregate') == 'gmean':\n preds_file = gmean(preds_patch, axis=0)\n else:\n print('unkown aggregation method for prediction')\n te_preds[0, :] = preds_file\n\n class_id = np.argmax(te_preds, axis=1)\n row[0] = class_id\n metadata_result_classif_array.append(row)\n\n # Write a new results_metadata_classif file with the modified classes\n metadata_result_classif_file_name = os.path.splitext(audio_file_name)[0] + params['metadata_result_file_extension']\n path_to_write = os.path.join(results_metadata_classif_folder, metadata_result_classif_file_name)\n write_metadata_result_file(metadata_result_classif_array, path_to_write)\n\n # Write a new result_output_classif file with the modified classes\n output_result_classif_dict = build_result_dict_from_metadata_array(metadata_result_classif_array, params['required_window_hop'])\n path_to_write = os.path.join(results_output_classif_folder, metadata_file_name)\n write_output_result_file(output_result_classif_dict, path_to_write)\n\n\nprint('-------------- COMPUTE DOA METRICS REAL--------------')\ngt_folder = os.path.join(dataset_dir, 'metadata_'+params['mode'])\ncompute_DOA_metrics(gt_folder, results_output_classif_folder)\n#\n#\n#\nprint('\\n====================Starting metrics for challenge with IDEAL frontend=====================================')\nprint('====================Starting metrics for challenge with IDEAL frontend=====================================')\nprint('====================Starting metrics for challenge with IDEAL frontend=====================================')\nprint('====================Starting metrics for challenge with IDEAL frontend=====================================\\n')\n\n\n# Path to results_metadata folder _before classification_; it should exist\nresults_metadata_doa_folder = os.path.join('.' + params['metadata_result_folder_path'],\n 'metadata_dev',\n doa_folder)\nif not os.path.exists(results_metadata_doa_folder):\n os.mkdir(results_metadata_doa_folder)\n\n# Path to results_metadata folder _before classification_; create it if necessary\nresults_metadata_classif_folder = os.path.join('.' + params['metadata_result_folder_path'],\n 'metadata_dev',\n classif_folder)\nif not os.path.exists(results_metadata_classif_folder):\n os.mkdir(results_metadata_classif_folder)\n\n# Path to results_output folder _before classification_; it should exist\nresults_output_doa_folder = os.path.join('.' + params['output_result_folder_path'],\n 'metadata_dev',\n doa_folder)\nif not os.path.exists(results_output_doa_folder):\n os.mkdir(results_output_doa_folder)\n\n# Path to results_output folder _before classification_; create it if necessary\nresults_output_classif_folder = os.path.join('.' + params['output_result_folder_path'],\n 'metadata_dev',\n classif_folder)\nif not os.path.exists(results_output_classif_folder):\n os.mkdir(results_output_classif_folder)\n\n# load best model for every fold, for submission\nmodel_f1 = load_model(os.path.join(path_trained_models, params_ctrl.get('output_file') + '_v' +\n str(params_ctrl.get('count_trial')) + '_f1.h5'))\nmodel_f2 = load_model(os.path.join(path_trained_models, params_ctrl.get('output_file') + '_v' +\n str(params_ctrl.get('count_trial')) + '_f2.h5'))\nmodel_f3 = load_model(os.path.join(path_trained_models, params_ctrl.get('output_file') + '_v' +\n str(params_ctrl.get('count_trial')) + '_f3.h5'))\nmodel_f4 = load_model(os.path.join(path_trained_models, params_ctrl.get('output_file') + '_v' +\n str(params_ctrl.get('count_trial')) + '_f4.h5'))\n\nsr = 48000\nfor audio_file_name in audio_files:\n\n # Get associated metadata file\n metadata_file_name = os.path.splitext(audio_file_name)[0] + params['metadata_result_file_extension']\n metadata_result_classif_array = []\n\n # Iterate over the associated doa metadata file\n with open(os.path.join(results_metadata_doa_folder, metadata_file_name), 'r') as f:\n reader = csv.reader(f, delimiter=',')\n for i, row in enumerate(reader):\n # Discard the first line (just the column titles)\n if i > 0:\n # Get values for this sound event\n sound_class_string = row[0]\n start_time_seconds = float(row[1])\n end_time_seconds = float(row[2])\n\n # Slice the b_format audio to the corresponding event length\n start_frame = int(np.floor(start_time_seconds * sr))\n end_frame = int(np.ceil(end_time_seconds * sr))\n\n # from one event entry in the csv, to its corresponding audio clip filename (that I stored previously)\n filename = sound_class_string + '_' + str(start_frame) + '_' + metadata_file_name.split('.')[0] + '.wav'\n curent_split = int(filename.split('_')[2][-1])\n\n # Classify: this will need 4 models for 4 test splits in x-val in development mode + one model for evaluation mode\n # to store prediction probabilites for one single test clip\n te_preds = np.empty((1, params_learn.get('n_classes')))\n\n # only the file under question\n ff_list_dev_ideal = [filename.replace('.wav', suffix_in + '.data')]\n current_scaler = scalers[curent_split - 1]\n te_idal_gen_patch = PatchGeneratorPerFile(feature_dir=params_path.get('featurepath_dev'),\n file_list=ff_list_dev_ideal,\n params_extract=params_extract,\n suffix_in='_mel',\n floatx=np.float32,\n scaler=current_scaler\n )\n\n patches_file = te_idal_gen_patch.get_patches_file()\n\n if curent_split == 1:\n preds_patch_list = model_f1.predict(patches_file).tolist()\n elif curent_split == 2:\n preds_patch_list = model_f2.predict(patches_file).tolist()\n elif curent_split == 3:\n preds_patch_list = model_f3.predict(patches_file).tolist()\n elif curent_split == 4:\n preds_patch_list = model_f4.predict(patches_file).tolist()\n\n preds_patch = np.array(preds_patch_list)\n\n # aggregate softmax values across patches in order to produce predictions on the file/clip level\n if params_learn.get('predict_agg') == 'amean':\n preds_file = np.mean(preds_patch, axis=0)\n elif params_recog.get('aggregate') == 'gmean':\n preds_file = gmean(preds_patch, axis=0)\n else:\n print('unkown aggregation method for prediction')\n te_preds[0, :] = preds_file\n\n class_id = np.argmax(te_preds, axis=1)\n row[0] = class_id\n metadata_result_classif_array.append(row)\n\n # Write a new results_metadata_classif file with the modified classes\n metadata_result_classif_file_name = os.path.splitext(audio_file_name)[0] + params['metadata_result_file_extension']\n path_to_write = os.path.join(results_metadata_classif_folder, metadata_result_classif_file_name)\n write_metadata_result_file(metadata_result_classif_array, path_to_write)\n\n # Write a new result_output_classif file with the modified classes\n output_result_classif_dict = build_result_dict_from_metadata_array(metadata_result_classif_array, params['required_window_hop'])\n path_to_write = os.path.join(results_output_classif_folder, metadata_file_name)\n write_output_result_file(output_result_classif_dict, path_to_write)\n\n\nprint('-------------- COMPUTE DOA METRICS IDEAL--------------')\ngt_folder = os.path.join(dataset_dir, 'metadata_'+params['mode'])\ncompute_DOA_metrics(gt_folder, results_output_classif_folder)\n\nprint('\\n=============================Job finalized==========================================================\\n')\nprint('====================================================================================================')\nprint('====================================================================================================')\n","repo_name":"andresperezlopez/DCASE2019_task3","sub_path":"classif/classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":46956,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"3"}
+{"seq_id":"34629610824","text":"import pandas as pd\nimport math\nfrom copy import copy, deepcopy\n\ndef cal_weight_with_time(days_diff, K=1, T=15):\n # weight with decaying time diff\n # formular based on https://stats.stackexchange.com/questions/196653/assigning-more-weight-to-more-recent-observations-in-regression\n weight = K * math.exp(-days_diff / T)\n return weight\n\n\ndef cal_ij_simi_time_weighting(cooc_info, K=1, T=15):\n simi = sum([cal_weight_with_time(abs(x[1] - x[0]), K, T)\n for x in cooc_info]) # plus one avoid divide by zero\n return simi\n\n\ndef cal_item_similariy_with_time_weighting(transaction, mode='time_weighting', K=200):\n # min_dcode = transaction['dcode'].min()\n info4similarity = transaction[['customer_id', 'article_id', 'dcode']]\n info4similarity['aid_n_dcode'] = info4similarity.apply(\n lambda x: (x.article_id, x.dcode), axis=1)\n info4similarity = info4similarity.groupby('customer_id')['aid_n_dcode'].apply(list).reset_index(\n name='historical_behaviors')\n info4similarity['historical_behaviors'] = info4similarity['historical_behaviors'].apply(\n lambda x: sorted(x, key=lambda x: x[1], reverse=False)\n )\n\n C = dict()\n N = dict()\n for cid, hb in zip(\n info4similarity['customer_id'],\n info4similarity['historical_behaviors']\n ):\n for aid_i, dcode_i in hb:\n if aid_i not in N:\n N[aid_i] = 0\n N[aid_i] += 1\n if aid_i not in C:\n C[aid_i] = {}\n for aid_j, dcode_j in hb:\n if aid_j == aid_i:\n continue\n if aid_j not in C[aid_i]:\n C[aid_i][aid_j] = []\n C[aid_i][aid_j].append((dcode_i, dcode_j)) # 记录aid_i, aid_j的时间\n\n W = {}\n for i, related_items in C.items():\n if i not in W:\n W[i] = {}\n for j, cooc_info in related_items.items():\n if mode == 'time_weighting':\n W[i][j] = cal_ij_simi_time_weighting(cooc_info)\n\n # for each item, only save items with largest k similarity\n W_with_largest_k_similarity = {}\n for i, i_nbors in W.items():\n W_with_largest_k_similarity[i] = {}\n largest_k_similarity = sorted(i_nbors.items(), key=lambda x: x[1], reverse=True)[:K]\n for j, j_simi in largest_k_similarity:\n W_with_largest_k_similarity[i][j] = j_simi\n return W_with_largest_k_similarity\n # return W\n\n\ndef add_adl(transactions):\n # concat article_id, dcode and ldcode\n transactions['adl'] = transactions.apply(\n lambda x: (x.article_id, x.dcode, x.ldcode), axis=1\n )\n return transactions\n\n\ndef get_customer_purchasing_items(transactions):\n transactions_cpy = deepcopy(transactions)\n transactions_cpy = add_adl(transactions_cpy)\n transactions_cpy = transactions_cpy.groupby('customer_id')['adl'].apply(list).reset_index(name='purchased_items')\n return transactions_cpy\n\n\ndef weight_item_by_ldcode(ldcode, min_ldcode):\n # session_split的命名可以优化\n diff = ldcode - min_ldcode\n return 1 / (1 + diff) if diff <= 2 else 0\n\n\ndef ibcf_recall_method2(purchased_items, W, min_ldcode, N=50):\n # 使用三个星期的购买记录作为召回的items,其中tw的weight是1, lw的weight是1/2, llw的weight是1/3\n # 使用多种不同的weight进行召回\n # 可能在2个小时左右\n # set weight\n recall_items = {}\n for item, _, ldcode in purchased_items:\n if item not in W:\n continue\n date_weight = weight_item_by_ldcode(ldcode, min_ldcode)\n # short cut for those items are too remote\n if date_weight == 0:\n continue\n for nbor, nbor_weight in W[item].items():\n if nbor not in recall_items:\n recall_items[nbor] = 0\n recall_items[nbor] += date_weight * nbor_weight\n recall_items = sorted(recall_items.items(), key = lambda x: x[1], reverse=True)\n # recall_items = [item[0] for item in recall_items]\n return recall_items[:N]\n\n\ndef get_recall_from_transaction_by_ibcf(transactions):\n W4ibcf = cal_item_similariy_with_time_weighting(transactions)\n min_ldcode = transactions.ldcode.min()\n\n # cpi is abbrevation for customer purchasing items\n cpi4recall = get_customer_purchasing_items(transactions)\n cpi4recall['recall_by_ibcf'] = cpi4recall['purchased_items'].apply(\n ibcf_recall_method2, args=(W4ibcf, min_ldcode,)\n )\n cpi4recall = cpi4recall[['customer_id', 'recall_by_ibcf']]\n return (W4ibcf, cpi4recall)\n\n\n\n\ndef get_majority_in_l3w(transactions, n=30):\n min_ldcode = transactions.ldcode.min()\n tw_condition = (transactions.ldcode - min_ldcode == 0)\n lw_condition = (transactions.ldcode - min_ldcode == 1)\n llw_condition = (transactions.ldcode - min_ldcode == 2)\n majority_tw = transactions[tw_condition].article_id.value_counts().index[:n]\n majority_lw = transactions[lw_condition].article_id.value_counts().index[:n]\n majority_llw = transactions[llw_condition].article_id.value_counts().index[:n]\n\n majority = list(set(majority_tw) | set(majority_lw) | set(majority_llw))\n\n return majority_tw, majority_lw, majority_llw, majority\n\n\ndef repurchase_recall(transactions, timespan_ldcode=2):\n min_ldcode = transactions['ldcode'].min()\n subsample = transactions[transactions['ldcode'] - min_ldcode <= timespan_ldcode]\n repurchase_recall = subsample.groupby('customer_id')['article_id'].apply(list).reset_index(name='recall_by_repurchased')\n return repurchase_recall\n\n\n\ndef get_recall_by_multi_methods(transactions):\n # W and recall by ibcf\n W4ibcf, cpi4recall = get_recall_from_transaction_by_ibcf(transactions)\n _, __, ___, majority = get_majority_in_l3w(transactions) # get recall by majority\n repurchase = repurchase_recall(transactions)\n\n cpi4recall['recall_by_majority'] = [majority for _ in range(len(cpi4recall))]\n cpi4recall = pd.merge(\n cpi4recall,\n repurchase,\n how='left',\n on='customer_id'\n )\n\n cpi4recall['recall_items'] = cpi4recall.apply(\n lambda x: list(set([item[0] for item in x.recall_by_ibcf]) \\\n | set(x.recall_by_majority) \\\n | set(x.recall_by_repurchased)),\n axis=1\n ) # merge them\n return (W4ibcf, cpi4recall)\n\n\n\ndef combine_recall_and_purchasing_items_nw(\n recall_items,\n purchased_items,\n):\n majority = recall_items['recall_by_majority'][0][: ], # setting for those not recall\n # purchased_items = get_customer_purchasing_items(purchased_items)\n purchased_items['purchased_items'] = purchased_items['purchased_items'].apply(\n lambda x: [item[0] for item in x]\n )\n output = pd.merge(purchased_items,\n recall_items[['customer_id', 'recall_items']],\n how='left',\n on='customer_id')\n\n # fill na in recall_items with majority, random recommend by majority algorithm\n output['recall_items'] = output['recall_items'].apply(lambda d: d if isinstance(d, list) else majority[:])\n # recall but not purchase\n output['recall_without_purchased_items'] = output.apply(\n lambda x: list(set(x.recall_items) - set(x.purchased_items)), axis=1\n )\n return output","repo_name":"xiaoshijian/HM_COMPETITION","sub_path":"block4kaggle/20220401_v2/temp_use_for_recall.py","file_name":"temp_use_for_recall.py","file_ext":"py","file_size_in_byte":7302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"12616009113","text":"\"\"\"\nRemoves user PII from OAuth2 models.\n\"\"\"\n\n\nfrom oauth2_provider.models import (\n AccessToken as DOTAccessToken,\n Application as DOTApplication,\n Grant as DOTGrant,\n RefreshToken as DOTRefreshToken,\n)\n\n\nclass ModelRetirer:\n \"\"\"\n Given a list of model names, provides methods for deleting instances of\n those models.\n \"\"\"\n\n def __init__(self, models_to_retire):\n self._models_to_retire = models_to_retire\n\n def retire_user_by_id(self, user_id):\n for model in self._models_to_retire:\n self._delete_user_id_from(model=model, user_id=user_id)\n\n @staticmethod\n def _delete_user_id_from(model, user_id):\n \"\"\"\n Deletes a user from a model by their user id.\n \"\"\"\n user_query_results = model.objects.filter(user_id=user_id)\n\n if not user_query_results.exists():\n return False\n\n user_query_results.delete()\n return True\n\n\ndef retire_dot_oauth2_models(user):\n dot_models = [DOTAccessToken, DOTApplication, DOTGrant, DOTRefreshToken]\n ModelRetirer(dot_models).retire_user_by_id(user.id)\n","repo_name":"openedx/edx-platform","sub_path":"openedx/core/djangolib/oauth2_retirement_utils.py","file_name":"oauth2_retirement_utils.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":6774,"dataset":"github-code","pt":"3"}
+{"seq_id":"28060219088","text":"from tkinter import *\nimport requests\nimport json\nimport os\n\n\n#Get/Post\ndef post(nome):\n if nome != '':\n if requests.post('http://localhost:8000/api/reis', data = {'name':nome}):\n labelFeed['text'] = \"Rei indicado com sucesso!\"\n labelFeed['fg'] = \"white\"\n labelFeed['bg'] = 'green'\n frame2['bg'] = 'green'\n \n labelFeed['font'] = 'none 12 bold'\n else :\n labelFeed['text'] = \"Não foi possível indicar esse bastardo\"\n labelFeed['bg'] = 'red'\n labelFeed['fg'] = \"white\"\n labelFeed['font'] = 'none 12 bold'\n frame2['bg'] = 'red'\n else:\n labelFeed['text'] = \"Estamos sem rei!!!!\"\n labelFeed['bg'] = '#ff531a'\n labelFeed['fg'] = \"white\"\n labelFeed['font'] = 'none 12 bold'\n frame2['bg'] = '#ff531a'\ndef get():\n data = requests.get('http://localhost:8000/api/reis')\n binary = data.content\n output = json.loads(binary)\n for item in output['data']:\n labelList['text'] = \"Saudem o Rei \" + item['name']\n#Window Configuration\nwindow = Tk()\nwindow.title(\"Game Of Python 3.0\")\ncanvas = Canvas(window, height='500', width='500')\ncanvas.pack()\n#Background\nbackground = PhotoImage(file='teste.png')\nbackgroundLabel = Label(window, image=background)\nbackgroundLabel.place(relwidth=1, relheight=1)\n#Frames\nframeList = Frame(window, bg='#f0f0f5', bd=10)\nframeList.place(relx=0.5, rely=0.50, relwidth=0.75, relheight=0.4, anchor='center')\n\nframeList2 = Frame(window, bg='#f0f0f5', bd=10)\nframeList2.place(relx=0.5, rely=0.9, relwidth=0.75, relheight=0.1, anchor='center')\n\nframe = Frame(window, bg='#f0f0f5', bd=5)\nframe.place(relx=0.5, rely=0.1, relwidth=0.75, relheight=0.1, anchor='center')\n\nframe2 = Frame(window, bg='#f0f0f5', bd=5)\nframe2.place(relx=0.5, rely=0.23, relwidth=0.75, relheight=0.1, anchor='center')\n#Entry inputs\nentry = Entry(frame, font=20)\nentry.place(relwidth=0.65, relheight=1)\n#Buttons\nbutton = Button(frame, text=\"King Name\",command=lambda : post(entry.get()))\nbutton.place(relx=0.7, relwidth=0.30, relheight=1)\n\nbutton2 = Button(frameList2, text=\"Saudação\",command=lambda : get())\nbutton2.place(relwidth=1, relheight=1)\n#Labels\nlabelList = Label(frameList)\nlabelList.place(relwidth=1, relheight=1)\n\nlabelFeed = Label(frame2, text='')\nlabelFeed.place(relwidth=1, relheight=1)\nwindow.mainloop()","repo_name":"guilhermegomes1/APILaravel-Python3","sub_path":"APIConsumidora/Graphic/python/Graphic/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"10601513915","text":"import matplotlib.pyplot as plt\nimport os\nimport pandas as pd\n\n\ncurso_dir = 'C:\\\\Users\\\\tiagog\\\\Documents\\\\curso-python'\ndata_dir = curso_dir + '\\\\datacamp\\\\python_programer\\\\dates_times'\nos.chdir(data_dir)\n\n# Load CSV into the rides variable\nrides = pd.read_csv('capital-onebike.csv',\n parse_dates=['Start date', 'End date'])\n\n# Import matplotlib\n\n# Resample rides to monthly, take the size, plot the results\nrides.resample('M', on='Start date')\\\n .size()\\\n .plot(ylim=[0, 150])\n\n# Show the results\nplt.show()\n","repo_name":"tgpmoraes/curso-python","sub_path":"datacamp/python_programer/dates_times/plot_date_pandas.py","file_name":"plot_date_pandas.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"14513123262","text":"from django.http import JsonResponse\nfrom django.views import View\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.csrf import csrf_exempt\nimport json\nfrom .models import Cochera, Servicio, TiempoAlquiler, Usuario\nimport mercadopago \nfrom django.http import JsonResponse\nfrom django.views import View\nfrom .models import Cochera\nfrom django.contrib.auth.views import LoginView\n\nfrom django.contrib.auth import authenticate, login\n#superusuario\nfrom django.contrib.auth.models import User\n\n\n\n\n#JWT \nfrom rest_framework.views import APIView\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\nfrom rest_framework.decorators import api_view, authentication_classes, permission_classes\nfrom rest_framework_simplejwt.tokens import RefreshToken\nfrom rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView\nfrom rest_framework.authentication import TokenAuthentication\n\nfrom django.contrib.auth import get_user_model\n\n# -------------------------------------------------------------------------------------\n\n#CONFIG PARA BLOQUEAR ACCESO SI NO SE AUTENTICO\nclass CustomTokenView(TokenObtainPairView):\n # permission_classes = [AllowAny] para que no requiera autenticacion\n permission_classes = [IsAuthenticated]\n\n def post(self, request, *args, **kwargs):\n response = super().post(request, *args, **kwargs)\n # Agregar cualquier personalización adicional a la respuesta aquí\n return response\n \nclass RegisterView(View):\n @method_decorator(csrf_exempt)\n def dispatch(self, request, *args, **kwargs):\n return super().dispatch(request, *args, **kwargs)\n \n def get(self, request, id=0):\n usuarios = Usuario.objects.all()\n data = {\"message\": \"Success\", \"usuarios\": []}\n for usuario in usuarios:\n usuario_data = {\n \"id_usuario\": usuario.id_usuario,\n \"nombre_usuario\": usuario.nombre_usuario,\n \"apellido_usuario\": usuario.apellido_usuario,\n \"correo_usuario\": usuario.correo_usuario,\n \"telefono_usuario\": usuario.telefono_usuario,\n \"contrasenia_usuario\": usuario.contrasenia_usuario,\n \"aceptar_terminos\": usuario.aceptar_terminos,\n \"rol\": usuario.rol,\n }\n data[\"usuarios\"].append(usuario_data)\n return JsonResponse(data)\n\n def post(self, request):\n jd = json.loads(request.body)\n # print(jd)\n nombre_usuario=jd[\"nombre_usuario\"]\n apellido_usuario=jd[\"apellido_usuario\"]\n correo_usuario=jd[\"correo_usuario\"]\n telefono_usuario=jd[\"telefono_usuario\"]\n contrasenia_usuario=jd[\"contrasenia_usuario\"]\n aceptar_terminos=jd[\"aceptar_terminos\"]\n rol=jd[\"rol\"]\n \n try:\n #si se selecciona el rol de Administrador\n if rol == \"Administrador\":\n #creo un super usuario\n #username deberia hacer referencia a correo_usuario. \n user = User.objects.create_superuser(username=nombre_usuario, password=contrasenia_usuario, email=correo_usuario)\n message = \"Superusuario creado\"\n\n #si se selecciona el rol de Usuario\n elif rol == \"Usuario\":\n #creo un usuario de lectura\n user = User.objects.create_user(username=nombre_usuario, password=contrasenia_usuario, email=correo_usuario)\n message = \"Usuario creado\"\n \n else:\n #si no se selecciona un rol\n data = {\"message\":\"Rol no valido\"}\n return JsonResponse(data)\n \n usuario = Usuario.objects.create(nombre_usuario=nombre_usuario, apellido_usuario=apellido_usuario, correo_usuario=correo_usuario, telefono_usuario=telefono_usuario, contrasenia_usuario=contrasenia_usuario, aceptar_terminos=aceptar_terminos, rol=rol)\n\n data = {\"message\": message, \"username\": user.username}\n except Exception as e:\n data = {\"message\":\"Error al crear el usuario\", \"error\": str(e)}\n return JsonResponse(data)\n\n def delete(self, request, id):\n usuarios = list(Usuario.objects.filter(id_usuario=id).values())\n if len(usuarios)>0:\n Usuario.objects.filter(id_usuario=id).delete()\n data = {\"message\":\"Usuario eliminado\"}\n else:\n data = {\"message\":\"Not Found...\"}\n return JsonResponse(data)\n\nclass LoginView(View):\n @method_decorator(csrf_exempt)\n def dispatch(self, request, *args, **kwargs):\n return super().dispatch(request, *args, **kwargs)\n \n def authenticate(self, request, email=None, password=None, **kwargs):\n UserModel = get_user_model()\n try:\n user = UserModel.objects.get(email=email)\n if user.check_password(password):\n return user\n except UserModel.DoesNotExist:\n return None\n \n def post(self, request):\n jd = json.loads(request.body)\n email = jd[\"correo_usuario\"]\n password = jd[\"contrasenia_usuario\"]\n\n\n \n user = self.authenticate(request, email=email, password=password)\n print(user)\n \n if user is not None:\n login(request, user)\n refresh = RefreshToken.for_user(user)\n access_token = str(refresh.access_token)\n email = str(email)\n\n if user.is_superuser:\n rol = \"Administrador\"\n else:\n rol = \"Usuario\"\n \n data = {\"message\": \"Inicio de sesión exitoso\", \"access_token\": access_token, \"email\": email, \"rol\": rol}\n else:\n data = {\"message\": \"Credenciales inválidas\", \"rol\": \"\"}\n \n return JsonResponse(data)\n \n\n\n# class CocheraView(APIView):\nclass CocheraView(View):\n @method_decorator(csrf_exempt)\n def dispatch(self, request, *args, **kwargs):\n return super().dispatch(request, *args, **kwargs)\n \n # authentication_classes = [TokenAuthentication]\n # permission_classes = [IsAuthenticated]\n\n def get(self, request, id=0):\n if id > 0:\n try:\n cochera = Cochera.objects.get(id_cochera=id)\n cochera_data = {\n \"id_cochera\": cochera.id_cochera,\n \"nombre_cochera\": cochera.nombre_cochera,\n \"img_cochera\": cochera.img_cochera,\n \"descripcion_cochera\": cochera.descripcion_cochera,\n \"tiempo_alquiler\": [],\n \"servicios\": [],\n }\n tiempo_alquiler = TiempoAlquiler.objects.filter(cochera=cochera)\n servicios = Servicio.objects.filter(cochera=cochera)\n for tiempo in tiempo_alquiler:\n cochera_data[\"tiempo_alquiler\"].append({\n \"tiempo\": tiempo.tiempo,\n \"precio\": tiempo.precio\n })\n \n for servicio in servicios:\n cochera_data[\"servicios\"].append({\n \"servicio\": servicio.servicio,\n \"precio\": servicio.precio\n })\n data = {\"message\": \"Success\", \"cochera\": cochera_data}\n return JsonResponse(data)\n except Cochera.DoesNotExist:\n data = {\"message\": \"Cochera not found\"}\n return JsonResponse(data, status=404)\n else:\n cocheras = Cochera.objects.all()\n data = {\"message\": \"Success\", \"cocheras\": []}\n for cochera in cocheras:\n cochera_data = {\n \"id_cochera\": cochera.id_cochera,\n \"nombre_cochera\": cochera.nombre_cochera,\n \"img_cochera\": cochera.img_cochera,\n \"descripcion_cochera\": cochera.descripcion_cochera,\n \"tiempo_alquiler\": [],\n \"servicios\": [],\n }\n tiempo_alquiler = TiempoAlquiler.objects.filter(cochera=cochera)\n servicios = Servicio.objects.filter(cochera=cochera)\n for tiempo in tiempo_alquiler:\n cochera_data[\"tiempo_alquiler\"].append({\n \"tiempo\": tiempo.tiempo,\n \"precio\": tiempo.precio\n })\n for servicio in servicios:\n cochera_data[\"servicios\"].append({\n \"servicio\": servicio.servicio,\n \"precio\": servicio.precio\n })\n data[\"cocheras\"].append(cochera_data)\n return JsonResponse(data)\n\n def post(self, request):\n jd = json.loads(request.body)\n # print(jd)\n Cochera.objects.create(nombre_cochera=jd[\"nombre_cochera\"], img_cochera=jd[\"img_cochera\"], descripcion_cochera=jd[\"descripcion_cochera\"])\n data = {\"message\":\"Cochera creada\"}\n return JsonResponse(data)\n\n def patch(self, request, id):\n jd=json.loads(request.body)\n cocheras = list(Cochera.objects.filter(id_cochera=id).values())\n if len(cocheras)>0:\n cochera = Cochera.objects.get(id_cochera=id)\n cochera.nombre_cochera=jd[\"nombre_cochera\"]\n cochera.img_cochera=jd[\"img_cochera\"]\n cochera.descripcion_cochera=jd[\"descripcion_cochera\"]\n cochera.save()\n data = {\"message\":\"Cochera modificada\"}\n else:\n data = {\"message\":\"Cochera no encontrada\"}\n \n return JsonResponse(data)\n\n def delete(self, request, id):\n cocheras = list(Cochera.objects.filter(id_cochera=id).values())\n if len(cocheras)>0:\n Cochera.objects.filter(id_cochera=id).delete()\n data = {\"message\":\"Cochera eliminada\"}\n else:\n data = {\"message\":\"Not Found...\"}\n return JsonResponse(data)\n\n\nclass AlquilarCocheraView(View):\n @method_decorator(csrf_exempt)\n def dispatch(self, request, *args, **kwargs):\n return super().dispatch(request, *args, **kwargs) \n \n def post(self, request):\n jd = json.loads(request.body)\n\n # Obtener los datos de la cochera seleccionada desde el cuerpo de la solicitud JSON\n cochera_id = jd.get(\"cochera_id\")\n tiempo_alquiler_id = jd.get(\"tiempo_alquiler_id\")\n servicio = jd.get(\"servicio\", [])\n\n\n try:\n # Obtengo la cochera seleccionada desde la base de datos\n cochera = Cochera.objects.get(id_cochera=cochera_id)\n # Obtengo el tiempo de alquiler seleccionado desde la base de datos\n tiempo_alquiler = TiempoAlquiler.objects.get(id=tiempo_alquiler_id)\n\n # Obtengo los servicios seleccionados desde la base de datos\n servicio_seleccionado = Servicio.objects.filter(id__in=servicio)\n\n # generar el enlace de pago utilizando el SDK de Mercado Pago\n ACCESS_TOKEN = \"TEST-865520782224511-052712-77c0791960cbab0081032c9906fc5539-1384417080\"\n sdk = mercadopago.SDK(ACCESS_TOKEN)\n\n preference_data = {\n \"items\": [\n {\n \"title\": cochera.nombre_cochera,\n \"quantity\": 1,\n \"unit_price\": tiempo_alquiler.precio + sum(servicio.precio for servicio in servicio_seleccionado), \n \"currency_id\": \"ARS\",\n }\n ],\n \"notification_url\": \"\",\n \"back_urls\": {\n \"success\": \"http://localhost:4200/\",\n \"failure\": \"\",\n \"pending\": \"\",\n },\n \"auto_return\": \"all\",\n }\n result = sdk.preference().create(preference_data)\n payment_url = result[\"response\"][\"init_point\"]\n data = {\"payment_url\": payment_url}\n return JsonResponse(data)\n except Cochera.DoesNotExist:\n data = {\"message\": \"Cochera not found\"}\n return JsonResponse(data, status=404)\n\n\n\n# tarjeta: 5031 7557 3453 0604\n# fecha: 11/25\n# cod: 123\n\n","repo_name":"valetommasini/TinderCar-ProyectoFinal","sub_path":"backend/backend1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12250,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"72793026607","text":"#!/usr/bin/env python3\n# coding: utf-8\n\n\nimport os\nimport time\n\n\ndirname = os.path.dirname(__file__)\ninputfile = os.path.join(dirname, 'input.txt')\n\n\ndef part1(sums):\n print('part1:')\n\n print(max(sums))\n\n\ndef part2(sums):\n print('part2:')\n\n top3 = sorted(sums, reverse=True)[:3]\n print(sum(top3))\n\n\ndef main():\n currentDay = os.path.basename(__file__).split('.')[0]\n print(currentDay)\n with open(inputfile) as f:\n data = f.read()\n data = data.split(\"\\n\\n\")\n elves = []\n for idx, d in enumerate(data):\n elfData = d.split(\"\\n\")\n elfData = sum([int(food) for food in elfData])\n elves.append(elfData)\n\n part1(elves) # 72511\n part2(elves) # 212117\n\n\nif __name__ == \"__main__\":\n start_time = time.time()\n main()\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n","repo_name":"Levivig/AdventOfCode2022","sub_path":"day1/day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"2442057612","text":"#!/usr/bin/python\n\nimport sys\nimport json\nimport subprocess\nimport os\n\nfrom charmhelpers.fetch import (\n add_source,\n apt_install,\n apt_update,\n)\n\nfrom charmhelpers.core.hookenv import (\n Hooks,\n UnregisteredHookError,\n service_name,\n relation_set,\n relation_ids,\n log\n)\n\nfrom cinder_contexts import VNXSubordinateContext\nfrom charmhelpers.payload.execd import execd_preinstall\n\nPACKAGES = [\n 'sysfsutils'\n]\n\nhooks = Hooks()\n\ndef juju_log(msg):\n log('[cinder-vnx] %s' % msg)\n\n@hooks.hook('install')\ndef install():\n execd_preinstall()\n\n\n@hooks.hook('config-changed',\n 'upgrade-charm')\ndef upgrade_charm():\n for rid in relation_ids('storage-backend'):\n storage_backend(rid)\n\ndef config_get(attribute):\n cmd = [\n 'config-get',\n '--format',\n 'json']\n out = subprocess.check_output(cmd).strip()\n cfg = json.loads(out)\n\n try:\n return cfg[attribute]\n except KeyError:\n return None\n\ndef valid_source(source):\n try:\n return \\\n (source.startswith('https') or \\\n source.startswith('http') or \\\n source.startswith('ppa'))\n except Exception:\n juju_log('invalid source: %s' % source)\n return False\n\ndef valid_key(key):\n try:\n return (len(key) >= 8)\n except Exception:\n juju_log('invalid key (len < 8): %s' % key)\n return False\n\n\n@hooks.hook('storage-backend-relation-joined',\n 'storage-backend-relation-changed')\ndef storage_backend(rel_id=None):\n # REQUIRED: add navicli source and key\n navicli_source = config_get('navicli_source')\n navicli_key = config_get('navicli_source_key')\n juju_log('storage_backend: navicli_source=%s navicli_key=%s' % (navicli_source,\n navicli_key))\n if not valid_source(navicli_source) or not valid_key(navicli_key):\n raise\n # add_source(navicli_source, navicli_key)\n\n os.system('find /var/lib/juju -type d -name \"navicli_7.33.2.0.51-amd64.deb\" -exec sudo dpkg -i {} \\;')\n\n # update and install packages\n apt_update()\n dpkg_opts = [\n '--option', 'Dpkg::Options::=--force-confnew',\n '--option', 'Dpkg::Options::=--force-confdef',\n ]\n apt_install(packages=PACKAGES, options=dpkg_opts, fatal=True)\n relation_set(\n relation_id=rel_id,\n backend_name=service_name(),\n subordinate_configuration=json.dumps(VNXSubordinateContext()())\n )\n\n\nif __name__ == '__main__':\n try:\n hooks.execute(sys.argv)\n except UnregisteredHookError as e:\n juju_log('Unknown hook {} - skipping.'.format(e))\n","repo_name":"alefnode/cinder-vnx-fc","sub_path":"hooks/cinder_hooks.py","file_name":"cinder_hooks.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"14862686071","text":"import random\nimport torch\nimport openai\nimport time\n\nfrom transformers import BertTokenizer, BertModel\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom app import app\nfrom app.model import *\n\ntokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\")\nmodel = BertModel.from_pretrained(\"bert-base-uncased\").eval()\napp.config.from_object(\"config\")\nopenai.api_key = app.config[\"OPENAI_API_KEY\"]\nMODEL_ID = \"gpt-4\"\nMAX_CALL = 100\ncall_count = 0\ntopic = \"\"\n\n\ndef is_question_definitive(question, team, answer):\n response = chatgpt_conversation(\n \"Yes or No? Does the following question have a single, definitive answer? \"\n + question\n )\n\n if response == \"Yes\" or response == \"yes\":\n print(question + \": This Question Is Unique\")\n return True\n else:\n existing_vague_question = Vague.query.filter_by(question=question).first()\n if not existing_vague_question:\n row = Vague(question=question, answer=answer, team=team, topic=topic)\n db.session.add(row)\n db.session.commit()\n print(question + \": Question added to the vague table\")\n return False\n\n\ndef can_question_be_reworded(question):\n response = chatgpt_conversation(\n \"Yes or No? Can you reword the following question to have a single, definitive answer? \"\n + question\n )\n\n if response == \"Yes\" or response == \"yes\":\n print(question + \": This Question Can Be Reworded\")\n return True\n else:\n return False\n\n\ndef ask_again(question):\n reword_question = chatgpt_conversation(\n f\"Can you turn this into a question with only one possible answer?\"\n + question\n + f\" Ensure that the question is below 255 characters and each answer is no more than \"\n f\"7 words. The format of the response should be question \\n option1 \\n option2 \\n option3 \\n option4 \\n \"\n f\"answer. do not provide anything else in the response to distinguish what each line represents, only the \"\n f\"requested information. You must provide a question, 4 options, and an answer.\"\n )\n question_details = reword_question.split(\"\\n\")\n question = question_details[0].strip()\n options = [option.strip() for option in question_details[1:5]]\n correct_option = question_details[5].strip()\n return question, options, correct_option\n\n\ndef is_answer_correct(question, answer, team):\n verify_response = chatgpt_conversation(\n \"Yes or No? Is the answer to \" + question + \"Answer: \" + answer\n )\n if verify_response == \"Yes\" or verify_response == \"yes\":\n print(question + \": This Answer Has Been Verified\")\n return True\n else:\n existing_accuracy_question = Accuracy.query.filter_by(question=question).first()\n if not existing_accuracy_question:\n row = Accuracy(question=question, answer=answer, team=team, topic=topic)\n db.session.add(row)\n db.session.commit()\n print(\"Question added to the accuracy table\")\n return False\n\n\ndef chatgpt_prompt(question_type, summary, team):\n global topic\n\n if question_type == \"history\":\n difficulty, chosen_sub_topic = generate_history_question_topic()\n topic = chosen_sub_topic\n print(chosen_sub_topic)\n # See if the prompt can change to only ask definitive questions\n prompt = chatgpt_conversation(\n f\"Give me a unique {difficulty} level difficulty multiple choice quiz question about the {team}'s \"\n f\"{chosen_sub_topic}. Ensure that the question is below 255 characters and each answer is no more than \"\n f\"7 words. The format of the response should be question \\n option1 \\n option2 \\n option3 \\n option4 \\n \"\n f\"answer. do not provide anything else in the response to distinguish what each line represents, only the \"\n f\"requested information. You must provide a question, 4 options, and an answer.\"\n )\n print(prompt)\n return prompt\n if question_type == \"pbp_current\":\n prompt = chatgpt_conversation(\n f'Based on the following plays from this game: \"{summary}\", generate a unique multiple '\n f\"choice quiz question from big plays. Ensure that the question is below 255 characters and each answer is \"\n f\"no more than 7 words. Provide four options and the correct answer. Please provide as much detail as \"\n f\"possible including but not limited to which teams were playing, which team made the play, what type of \"\n f\"play it was, the quarter the play occured, time left in quarter, who made the play, \"\n f\"and if it resulted in a touchdown or firstdown. If known, give full player names as options.\"\n f\"The format of the response should be question \\n option1 \\n option2 \\n option3 \\n option4 \\n \"\n f\"answer. do not provide anything else in the response to distinguish what each line represents, only the \"\n f\"requested information. You must provide a question, 4 options, and an answer.\"\n )\n return prompt\n return None\n\n\ndef create_question_from_chatgpt(question_type, game_id, team):\n global call_count\n global MAX_CALL\n global nfl_fact\n\n if game_id is not None:\n game = Game.query.filter_by(id=game_id).first()\n if not game:\n return \"Game not found.\", 404\n\n plays = Play.query.filter_by(game_id=game_id).all()\n\n if not plays:\n return f\"No data found.\", 404\n\n summary = \". \".join(\n [f\"{play.timestamp} - {play.description}\" for play in plays]\n )\n nfl_fact = chatgpt_prompt(question_type, summary, team)\n else:\n nfl_fact = chatgpt_prompt(question_type, None, team)\n\n for _ in range(MAX_CALL):\n if call_count >= MAX_CALL:\n print(\"Reached Max Call Count: Cannot Generate New Question\")\n\n call_count += 1\n question_details = nfl_fact.split(\"\\n\")\n print(question_details)\n\n if len(question_details) <= 5:\n print(\"Length Escape\")\n break\n\n try:\n count = 0\n question = question_details[0].strip()\n options = [option.strip() for option in question_details[1:5]]\n correct_option = question_details[5].strip()\n if game_id is None:\n definitive = is_question_definitive(question, team, correct_option)\n if not definitive:\n reworded = can_question_be_reworded(question)\n if not reworded:\n print(\"Question Cannot Be Reworded Escape\")\n break\n while reworded and count < 5:\n question, options, correct_option = ask_again(question)\n count += 1\n definitive = is_question_definitive(\n question, team, correct_option\n )\n\n if count >= 5:\n print(\"Not Definitive Escape\")\n break\n\n correct = is_answer_correct(question, correct_option, team)\n\n if None in options:\n print(\"None Escape\")\n break\n\n existing_questions_for_team = (\n Question.query.filter_by(team=team)\n .with_entities(Question.question, Question.answer)\n .all()\n )\n\n is_similar = False\n\n for q_text, q_answer in existing_questions_for_team:\n if bert_similarity(question, q_text) > 0.90:\n if correct_option == q_answer:\n is_similar = True\n else:\n continue\n\n if not is_similar and definitive and correct:\n row = Question(\n question=question,\n counter=get_next_question_id_for_game(),\n option1=options[0],\n option2=options[1],\n option3=options[2],\n option4=options[3],\n answer=correct_option,\n team=team,\n )\n db.session.add(row)\n db.session.commit()\n call_count = 0\n break\n else:\n break\n\n except Exception as e:\n print(f\"An error occurred: {e}\")\n\n\ndef chatgpt_conversation(prompt):\n response = openai.ChatCompletion.create(\n model=MODEL_ID, messages=[{\"role\": \"user\", \"content\": prompt}]\n )\n\n return response[\"choices\"][0][\"message\"][\"content\"]\n\n\ndef generate_history_question_topic():\n difficulty = \"medium\"\n sub_topics = [\n \"Team History\",\n \"Legendary Players\",\n \"Championship Seasons\",\n \"Coaches and Management\",\n \"Stadium and Fan Culture\",\n \"Rivalries\",\n \"Record Breaking Performances\",\n \"Draft Picks\",\n \"Current Charity Organizations\",\n \"Individual player awards\",\n \"Founding Facts\",\n \"Previous Team Names\",\n \"Legendary Teams\",\n \"Stadium Facts\",\n \"Hall of Fame Inductees\",\n \"Memorable Playoff Games\",\n \"Team Scandals and Controversies\",\n \"Franchise Records\",\n \"Community Engagement\",\n \"Notable Trades and Acquisitions\",\n \"Behind-the-Scenes Personnel\",\n \"Media Coverage and Team Perception\",\n \"Fan Traditions\",\n \"Retired Jerseys and Team Honors\",\n ]\n chosen_sub_topic = random.choice(sub_topics)\n\n return difficulty, chosen_sub_topic\n\n\ndef get_bert_embedding(sentence):\n tokens = tokenizer(\n sentence, return_tensors=\"pt\", truncation=True, padding=True, max_length=512\n )\n with torch.no_grad():\n output = model(**tokens)\n return output.last_hidden_state[:, 0, :].squeeze().numpy()\n\n\ndef bert_similarity(sent1, sent2):\n emb1 = get_bert_embedding(sent1)\n emb2 = get_bert_embedding(sent2)\n return cosine_similarity([emb1], [emb2])[0][0]\n\n\ndef get_next_question_id_for_game():\n last_question = Question.query.order_by(Question.counter.desc()).first()\n if last_question:\n return last_question.counter + 1\n else:\n return 1\n","repo_name":"MarkKarels/capstone_project","sub_path":"app/chatGPT.py","file_name":"chatGPT.py","file_ext":"py","file_size_in_byte":10332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"11758065699","text":"# shell : python3 /home/pi/skripts/prod/Log_Relais_Ini.py\n# Programm zum Initialisieren / reset der Text-Datei Log_Relais.txt\n\n#!/usr/bin/python\n\nfrom pathlib import Path\n\nimport time , GVS\n\nmy_dir = GVS.RelLogDir\nmy_file = GVS.RelLogFile\n\n## für Testzwecke :\n#my_dir = 'test' # für Test Verzeichnis existiert nicht\n#my_file = 'test.txt' # für Test Verzeichnis existiert nicht\n\nfile_name = my_dir +'/' + my_file\ntime_stamp = time.strftime(\"%Y.%m.%d %H:%M:%S\")\n\ntry: # Prüfen , ob Verzeichnis und Datei existieren\n \n datei = open(file_name, 'r') # öffnen zum lesen\n print ()\n \nexcept IOError as e :\n print ('IOError' , str(e))\n my_dir = Path(my_dir) \n if not my_dir.is_dir():\n print (time_stamp,' Verzeichnis existiert nicht')\n else :\n my_file = Path(file_name)\n if not my_file.is_file():\n print (time_stamp,' Datei existiert nicht')\n print ()\n \nelse: # Verarbeitung nur wenn Verzeichnis und Datei existieren\n # vorhandene Datei öffnen , Inhalt löschen und überschreiben\n datei = open(file_name,'w') # öffnen zum schreiben\n Text = time_stamp + ' Logdatei für Schaltvorgänge der Relais initialisiert'\n datei.write(Text)\n # vorhandene Datei öffnen , Inhalt in neuer Zeile anhängen\n datei = open(file_name,'a') # öffnen zum anhängen\n Text = '\\n' + time_stamp + ' ' + 83 * '-'\n datei.write(Text)\n # vorhandene Datei öffnen , Inhalt ausgeben\n datei = open(file_name,'r') # öffnen zum lesen\n print (time_stamp,' Datei ',file_name ,' initialisiert')\n print ()\n print ('Dateiinhalt nach Initialisierung , zeilenweise : ')\n print ()\n print(datei.read())\n #print(datei.readlines())\n print ()\n datei.close()\n \n\n","repo_name":"torstenkuhn77/Peter","sub_path":"skripts/prod/Log_Relais_Ini.py","file_name":"Log_Relais_Ini.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"35798824343","text":"from copy import deepcopy\nfrom functools import partial\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom scipy.special import softmax\nfrom typing import Callable, Dict, Optional, Union, Tuple\nfrom alibi_detect.cd.base import BaseClassifierDrift\nfrom alibi_detect.models.pytorch.trainer import trainer\nfrom alibi_detect.utils.pytorch import get_device\nfrom alibi_detect.utils.pytorch.data import TorchDataset\nfrom alibi_detect.utils.pytorch.prediction import predict_batch\nfrom alibi_detect.utils.warnings import deprecated_alias\nfrom alibi_detect.utils.frameworks import Framework\nfrom alibi_detect.utils._types import TorchDeviceType\n\n\nclass ClassifierDriftTorch(BaseClassifierDrift):\n @deprecated_alias(preprocess_x_ref='preprocess_at_init')\n def __init__(\n self,\n x_ref: Union[np.ndarray, list],\n model: Union[nn.Module, nn.Sequential],\n p_val: float = .05,\n x_ref_preprocessed: bool = False,\n preprocess_at_init: bool = True,\n update_x_ref: Optional[Dict[str, int]] = None,\n preprocess_fn: Optional[Callable] = None,\n preds_type: str = 'probs',\n binarize_preds: bool = False,\n reg_loss_fn: Callable = (lambda model: 0),\n train_size: Optional[float] = .75,\n n_folds: Optional[int] = None,\n retrain_from_scratch: bool = True,\n seed: int = 0,\n optimizer: Callable = torch.optim.Adam,\n learning_rate: float = 1e-3,\n batch_size: int = 32,\n preprocess_batch_fn: Optional[Callable] = None,\n epochs: int = 3,\n verbose: int = 0,\n train_kwargs: Optional[dict] = None,\n device: TorchDeviceType = None,\n dataset: Callable = TorchDataset,\n dataloader: Callable = DataLoader,\n input_shape: Optional[tuple] = None,\n data_type: Optional[str] = None\n ) -> None:\n \"\"\"\n Classifier-based drift detector. The classifier is trained on a fraction of the combined\n reference and test data and drift is detected on the remaining data. To use all the data\n to detect drift, a stratified cross-validation scheme can be chosen.\n\n Parameters\n ----------\n x_ref\n Data used as reference distribution.\n model\n PyTorch classification model used for drift detection.\n p_val\n p-value used for the significance of the test.\n x_ref_preprocessed\n Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only\n the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference\n data will also be preprocessed.\n preprocess_at_init\n Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference\n data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.\n update_x_ref\n Reference data can optionally be updated to the last n instances seen by the detector\n or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while\n for reservoir sampling {'reservoir_sampling': n} is passed.\n preprocess_fn\n Function to preprocess the data before computing the data drift metrics.\n preds_type\n Whether the model outputs 'probs' or 'logits'\n binarize_preds\n Whether to test for discrepency on soft (e.g. probs/logits) model predictions directly\n with a K-S test or binarise to 0-1 prediction errors and apply a binomial test.\n reg_loss_fn\n The regularisation term reg_loss_fn(model) is added to the loss function being optimized.\n train_size\n Optional fraction (float between 0 and 1) of the dataset used to train the classifier.\n The drift is detected on `1 - train_size`. Cannot be used in combination with `n_folds`.\n n_folds\n Optional number of stratified folds used for training. The model preds are then calculated\n on all the out-of-fold predictions. This allows to leverage all the reference and test data\n for drift detection at the expense of longer computation. If both `train_size` and `n_folds`\n are specified, `n_folds` is prioritized.\n retrain_from_scratch\n Whether the classifier should be retrained from scratch for each set of test data or whether\n it should instead continue training from where it left off on the previous set.\n seed\n Optional random seed for fold selection.\n optimizer\n Optimizer used during training of the classifier.\n learning_rate\n Learning rate used by optimizer.\n batch_size\n Batch size used during training of the classifier.\n preprocess_batch_fn\n Optional batch preprocessing function. For example to convert a list of objects to a batch which can be\n processed by the model.\n epochs\n Number of training epochs for the classifier for each (optional) fold.\n verbose\n Verbosity level during the training of the classifier. 0 is silent, 1 a progress bar.\n train_kwargs\n Optional additional kwargs when fitting the classifier.\n device\n Device type used. The default tries to use the GPU and falls back on CPU if needed.\n Can be specified by passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of\n ``torch.device``.\n dataset\n Dataset object used during training.\n dataloader\n Dataloader object used during training.\n input_shape\n Shape of input data.\n data_type\n Optionally specify the data type (tabular, image or time-series). Added to metadata.\n \"\"\"\n super().__init__(\n x_ref=x_ref,\n p_val=p_val,\n x_ref_preprocessed=x_ref_preprocessed,\n preprocess_at_init=preprocess_at_init,\n update_x_ref=update_x_ref,\n preprocess_fn=preprocess_fn,\n preds_type=preds_type,\n binarize_preds=binarize_preds,\n train_size=train_size,\n n_folds=n_folds,\n retrain_from_scratch=retrain_from_scratch,\n seed=seed,\n input_shape=input_shape,\n data_type=data_type\n )\n\n if preds_type not in ['probs', 'logits']:\n raise ValueError(\"'preds_type' should be 'probs' or 'logits'\")\n\n self.meta.update({'backend': Framework.PYTORCH.value})\n\n # set device, define model and training kwargs\n self.device = get_device(device)\n self.original_model = model\n self.model = deepcopy(model)\n\n # define kwargs for dataloader and trainer\n self.loss_fn = nn.CrossEntropyLoss() if (self.preds_type == 'logits') else nn.NLLLoss()\n self.dataset = dataset\n self.dataloader = partial(dataloader, batch_size=batch_size, shuffle=True)\n self.predict_fn = partial(predict_batch, device=self.device,\n preprocess_fn=preprocess_batch_fn, batch_size=batch_size)\n self.train_kwargs = {'optimizer': optimizer, 'epochs': epochs, 'preprocess_fn': preprocess_batch_fn,\n 'reg_loss_fn': reg_loss_fn, 'learning_rate': learning_rate, 'verbose': verbose}\n if isinstance(train_kwargs, dict):\n self.train_kwargs.update(train_kwargs)\n\n def score(self, x: Union[np.ndarray, list]) \\\n -> Tuple[float, float, np.ndarray, np.ndarray, Union[np.ndarray, list], Union[np.ndarray, list]]:\n \"\"\"\n Compute the out-of-fold drift metric such as the accuracy from a classifier\n trained to distinguish the reference data from the data to be tested.\n\n Parameters\n ----------\n x\n Batch of instances.\n\n Returns\n -------\n p-value, a notion of distance between the trained classifier's out-of-fold performance \\\n and that which we'd expect under the null assumption of no drift, \\\n and the out-of-fold classifier model prediction probabilities on the reference and test data \\\n as well as the associated reference and test instances of the out-of-fold predictions.\n \"\"\"\n x_ref, x = self.preprocess(x)\n x, y, splits = self.get_splits(x_ref, x) # type: ignore\n\n # iterate over folds: train a new model for each fold and make out-of-fold (oof) predictions\n preds_oof_list, idx_oof_list = [], []\n for idx_tr, idx_te in splits:\n y_tr = y[idx_tr]\n if isinstance(x, np.ndarray):\n x_tr, x_te = x[idx_tr], x[idx_te]\n elif isinstance(x, list):\n x_tr, x_te = [x[_] for _ in idx_tr], [x[_] for _ in idx_te]\n else:\n raise TypeError(f'x needs to be of type np.ndarray or list and not {type(x)}.')\n ds_tr = self.dataset(x_tr, y_tr)\n dl_tr = self.dataloader(ds_tr)\n self.model = deepcopy(self.original_model) if self.retrain_from_scratch else self.model\n self.model = self.model.to(self.device)\n train_args = [self.model, self.loss_fn, dl_tr, self.device]\n trainer(*train_args, **self.train_kwargs) # type: ignore\n preds = self.predict_fn(x_te, self.model.eval())\n preds_oof_list.append(preds)\n idx_oof_list.append(idx_te)\n preds_oof = np.concatenate(preds_oof_list, axis=0)\n probs_oof = softmax(preds_oof, axis=-1) if self.preds_type == 'logits' else preds_oof\n idx_oof = np.concatenate(idx_oof_list, axis=0)\n y_oof = y[idx_oof]\n n_cur = y_oof.sum()\n n_ref = len(y_oof) - n_cur\n p_val, dist = self.test_probs(y_oof, probs_oof, n_ref, n_cur)\n idx_sort = np.argsort(idx_oof)\n probs_sort = probs_oof[idx_sort]\n if isinstance(x, np.ndarray):\n x_oof = x[idx_oof]\n x_sort = x_oof[idx_sort]\n else:\n x_oof = [x[_] for _ in idx_oof]\n x_sort = [x_oof[_] for _ in idx_sort]\n return p_val, dist, probs_sort[:n_ref, 1], probs_sort[n_ref:, 1], x_sort[:n_ref], x_sort[n_ref:]\n","repo_name":"SeldonIO/alibi-detect","sub_path":"alibi_detect/cd/pytorch/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":10473,"program_lang":"python","lang":"en","doc_type":"code","stars":1980,"dataset":"github-code","pt":"2"}
+{"seq_id":"12395141083","text":"import os\nimport PIL\nfrom PIL import Image\nimport numpy as np\nimport json\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nimport itertools\nfrom torchvision import datasets, transforms, models\nfrom shapley.transform import ThresholdTransform,AddNoise,DetachWhite\nfrom einops import rearrange\nfrom itertools import product\nimport math\nimport torchvision.models as models\nmodel=models.efficientnet_b1(pretrained=True,progress=False)\nmodel.classifier[1] = torch.nn.Linear(1280, 2)\nimport torchvision\n# model=torchvision.models.resnet18()\n# in_feat=model.fc.in_features\n# model.fc=torch.nn.Linear(in_feat,2)\ndata_path='/data/datasets/asd/All_5split/01/val/TD/'\n# data_path='/data/datasets/ai_hub_sketch_4way/01/val/m_w'\n# data_path='/data/datasets/ai_hub/ai_hub_sketch_mw/01/val/w/'\nimport random\nweight='/data/jong980812/project/mae/result_ver2/All_5split/binary_240/OUT/02/checkpoint-29.pth'\ncheckpoint = torch.load(weight, map_location='cpu')\nprint(\"Load pre-trained checkpoint from: %s\" % weight)\ncheckpoint_model = checkpoint['model']\nstate_dict = model.state_dict()\nmsg = model.load_state_dict(checkpoint_model, strict=False)\ndef set_conv_padding_mode(model, padding_mode='replicate'):\n for name, layer in model.named_modules():\n if isinstance(layer, torch.nn.Conv2d):\n layer.padding_mode = padding_mode\nset_conv_padding_mode(model,padding_mode='replicate')\nmodel.eval()\ndef get_shapley_matrix(all_ordered_pair, correct_output):\n shapley_values = torch.zeros_like(all_ordered_pair, dtype=torch.float32)\n\n # 각 ordered pair에 대한 값을 가져와 shapley_values에 저장\n for a,ordered_pairs in enumerate(all_ordered_pair):\n for i, ordered_pair in enumerate(ordered_pairs):\n # ordered_pair를 인덱스로 사용하여 correct_output에서 값을 가져옴\n indices = ordered_pair # ordered_pair를 텐서로 변환\n # print(indices)\n values1 = correct_output[int(indices[0])]\n values2 = correct_output[int(indices[1])] # correct_output에서 해당 위치의 값 가져오기\n # print(values1,values2)\n shapley_values[a,i] = torch.cat([values1.unsqueeze(0),values2.unsqueeze(0)],dim=0)\n return shapley_values\ndef binary_to_decimal(binary_tuple):\n decimal_value = 0\n binary_length = len(binary_tuple)\n\n for i, bit in enumerate(binary_tuple):\n decimal_value += bit * (2 ** (binary_length - i - 1))\n\n return decimal_value\ndef decimal_to_binary(decimal_value, num_bits):\n binary_tuple = []\n \n for i in range(num_bits):\n bit = (decimal_value >> (num_bits - i - 1)) & 1\n binary_tuple.append(bit)\n \n return tuple(binary_tuple)\ndef count_ones(binary_tuple):\n count = 0\n for bit in binary_tuple:\n if bit == 1:\n count += 1\n return count\ndef get_ordered_pair():\n\n n = 6 # digit의 개수\n digits = [0, 1] # 각 digit의 가능한 값\n\n # 경우의 수 생성\n part_combinations = list(product(digits, repeat=n))\n\n\n index_to_insert = 1 # 두 번째 위치에 추가하려면 인덱스 1을 사용합니다.\n all_ordered_pair=[]\n for index in range(7):\n ordered_pair=[] \n index_to_insert = index\n for combi in part_combinations:\n insert_value = [0,1]\n new_combi_0= combi[:index_to_insert] + (insert_value[0],) + combi[index_to_insert:]\n new_combi_1= combi[:index_to_insert] + (insert_value[1],) + combi[index_to_insert:]\n ordered_pair.append([binary_to_decimal(new_combi_0),binary_to_decimal(new_combi_1)])\n all_ordered_pair.append(ordered_pair)\n all_ordered_pair=torch.Tensor(all_ordered_pair)\n num_part = (all_ordered_pair.shape[0])\n num_case = (all_ordered_pair.shape[1])\n weights = torch.zeros((num_part,num_case))\n for i in range(num_part):\n for j in range(num_case):\n # all_ordered_pair의 값 가져오기\n value = int(all_ordered_pair[i, j, 1])\n \n # 이진수로 변환\n binary_value = decimal_to_binary(value, 7)\n \n # 1의 개수 세기\n num_ones = binary_value.count(1)\n \n # num * (7 combination num) 계산\n combination = math.comb(num_part,num_ones)\n weight = num_ones * combination\n \n # 결과를 weights에 저장\n weights[i, j] = weight\n return all_ordered_pair, weights\nclass shapley_part(Dataset):\n def __init__(self, data_folder, json_folder,part, binary_thresholding=None, transform=None):\n self.json_folder = json_folder\n self.data_folder = data_folder\n self.binary_thresholding=binary_thresholding\n self.transform = transform\n self.part = part\n self.num_part = len(part)\n self.image_paths = [os.path.join(data_folder, f) for f in os.listdir(data_folder) if f.endswith(('.png', '.jpg', '.jpeg', '.gif'))]\n self.json_paths = [image_path.split('/')[-1].split('.')[0] + \".json\" for image_path in self.image_paths] #! Get json path from image paths.\n print(self.image_paths)\n def get_part_json(self, json_file_path, part_name):\n '''\n Get part dictionary from json path\n '''\n part_json = {}\n \n for part in part_name:\n part_json[part] = []\n with open(json_file_path, 'r') as f:\n boxes = json.load(f)['shapes']\n for box in boxes:\n part_json[box[\"label\"]].append(box[\"points\"])\n \n for key in part_json:#! 빈 애들은 None으로 처리해서 없다고 판단.\n if not part_json[key]:\n part_json[key] = None\n\n return part_json\n def get_coords(self, part):\n extracted_coordinates = []\n if part is None:\n return None\n elif len(part) == 1:\n # print(part[0][0])\n xmin, ymin = list(map(int,part[0][0]))\n xmax, ymax = list(map(int,part[0][1]))\n return [[xmin,ymin,xmax,ymax]]#아래 2일경우와 통일하기 위해 이중 리스트로 \n elif len(part) == 2:\n #! Eye, Ear, hand, foot -> These have 2 part, return list\n for a in part: \n # print(a)\n xmin, ymin = list(map(int,a[0]))\n xmax, ymax = list(map(int,a[1]))\n extracted_coordinates.append([xmin,ymin,xmax,ymax])\n return extracted_coordinates\n else:\n exit(0)\n def get_white_image(self,size):\n return Image.new(\"RGB\", size, (255, 255, 255))\n # def get_empty_face(self,img, part_imgs, part_json):\n # '''\n # empty_face is face detached 'eye','nose','mouth','ear'\n # '''\n # head_json = part_json['head']\n # head_coords = self.get_coords(head_json)\n # head = part_imgs['head'][0]#!\n # white_image = self.get_white_image(img.size)\n # white_image.paste(head,head_coords[0])\n # for part in ['eye','nose','mouth','ear']:\n # if part_json[part] is not None:\n # part_coords= self.get_coords(part_json[part])\n # part_img = part_imgs[part]\n # if part in ['eye','ear']: \n # white_image.paste(self.get_white_image(part_img[0].size),part_coords[0])\n # white_image.paste(self.get_white_image(part_img[1].size),part_coords[1])\n # else:\n # white_image.paste(self.get_white_image(part_img[0].size),part_coords[0])\n \n # return white_image \n def get_empty_face(self,img, part_imgs, part_json):\n '''\n empty_face is face detached 'eye','nose','mouth','ear'\n '''\n head_json = part_json['head']\n head_coords = self.get_coords(head_json)\n head = part_imgs['head'][0]#!\n white_image = self.get_white_image(img.size)\n white_image.paste(head,head_coords[0])\n for part in ['eye','nose','mouth','ear']:\n if part_json[part] is not None:\n part_coords= self.get_coords(part_json[part])\n part_img = part_imgs[part]\n if part in ['eye','ear']: \n white_image.paste(self.get_white_image(part_img[0].size),part_coords[0])\n white_image.paste(self.get_white_image(part_img[1].size),part_coords[1])\n else:\n white_image.paste(self.get_white_image(part_img[0].size),part_coords[0])\n # white_image.show()\n return white_image\n def get_empty_lower_body(self,img, part_imgs, part_json):\n '''\n empty_lower_body detacched foot\n '''\n lower_body_json = part_json['lower_body']\n lower_body_coords = self.get_coords(lower_body_json)\n lower_body = part_imgs['lower_body'][0]#!\n white_image = self.get_white_image(img.size)\n white_image.paste(lower_body,lower_body_coords[0])\n if part_json[\"foot\"] is not None:\n part_coords= self.get_coords(part_json[\"foot\"])\n part_img = part_imgs[\"foot\"] \n white_image.paste(self.get_white_image(part_img[0].size),part_coords[0])\n white_image.paste(self.get_white_image(part_img[1].size),part_coords[1])\n \n return white_image.crop(lower_body_coords[0])\n def get_empty_upper_body(self,img, part_imgs, part_json):\n '''\n empty_lower_body detacched foot\n '''\n upper_body_json = part_json['upper_body']\n upper_body_coords = self.get_coords(upper_body_json)\n upper_body = part_imgs['upper_body'][0]#!\n white_image = self.get_white_image(img.size)\n white_image.paste(upper_body,upper_body_coords[0])\n if part_json[\"hand\"] is not None:\n part_coords= self.get_coords(part_json[\"hand\"])\n part_img = part_imgs[\"hand\"] \n white_image.paste(self.get_white_image(part_img[0].size),part_coords[0])\n white_image.paste(self.get_white_image(part_img[1].size),part_coords[1])\n # white_image.crop(upper_body_coords[0]).show()\n return white_image.crop(upper_body_coords[0])\n \n def create_new_images(self,img, binary_combination, part_imgs,part_json):\n #! Making New images\n original_img = img\n empty_face_active, eye_active, nose_active, ear_active, mouth_active, hand_active, foot_active = binary_combination\n # New white image\n\n new_image = self.get_white_image(original_img.size)\n if empty_face_active:\n new_image.paste(part_imgs[\"empty_face\"][0],(0,0))\n # print(part_json['lower_body'][0])\n # print(part_imgs[\"empty_lower_body\"][0].size,self.get_coords(part_json['lower_body'])[0] )\n new_image.paste(part_imgs[\"empty_lower_body\"][0], self.get_coords(part_json['lower_body'])[0]) # 원하는 위치에 붙임\n new_image.paste(part_imgs[\"empty_upper_body\"][0], self.get_coords(part_json['upper_body'])[0]) # 원하는 위치에 붙임\n # 각 파트 이미지를 읽어와서 새로운 이미지에 붙임\n if eye_active and (part_json[\"eye\"] is not None):\n new_image.paste(part_imgs[\"eye\"][0], self.get_coords(part_json['eye'])[0]) # 원하는 위치에 붙임\n new_image.paste(part_imgs[\"eye\"][1], self.get_coords(part_json['eye'])[1]) # 원하는 위치에 붙임 \n if nose_active and (part_json[\"nose\"] is not None):\n new_image.paste(part_imgs[\"nose\"][0], self.get_coords(part_json['nose'])[0]) # 원하는 위치에 붙임 \n if ear_active and (part_json[\"ear\"] is not None):\n new_image.paste(part_imgs[\"ear\"][0], self.get_coords(part_json['ear'])[0]) # 원하는 위치에 붙임 \n new_image.paste(part_imgs[\"ear\"][1], self.get_coords(part_json['ear'])[1]) # 원하는 위치에 붙임 \n if mouth_active and (part_json[\"mouth\"] is not None):\n new_image.paste(part_imgs[\"mouth\"][0], self.get_coords(part_json['mouth'])[0]) # 원하는 위치에 붙임 \n if hand_active and (part_json[\"hand\"] is not None):\n new_image.paste(part_imgs[\"hand\"][0], self.get_coords(part_json['hand'])[0]) # 원하는 위치에 붙임 \n new_image.paste(part_imgs[\"hand\"][1], self.get_coords(part_json['hand'])[1]) # 원하는 위치에 붙임 \n if foot_active and (part_json[\"foot\"] is not None):\n new_image.paste(part_imgs[\"foot\"][0], self.get_coords(part_json['foot'])[0]) # 원하는 위치에 붙임 \n new_image.paste(part_imgs[\"foot\"][1], self.get_coords(part_json['foot'])[1]) # 원하는 위치에 붙임 \n # 다른 파트들에 대해서도 같은 방식으로 처리\n return new_image\n def __len__(self):\n return len(self.image_paths)\n def __getitem__(self, idx):\n img_path = self.image_paths[idx]\n print(img_path)\n label = 0 if (img_path.split('/')[-1].split('.')[0].split('-')[0])=='A' else 1\n image = Image.open(img_path)\n part_name = self.part#[\"head\", \"eye\", \"nose\", \"ear\", \"mouth\", \"hand\", \"foot\", \"upper_body\", \"lower_body\"]\n # if self.binary_thresholding:\n # image = image.convert(\"L\")#! Convert grayscale\n # image = image.point(lambda p: p > self.binary_thresholding and 255)\n part_json = self.get_part_json(os.path.join(self.json_folder,self.json_paths[idx]),part_name=part_name)\n part_imgs = {}\n for part in part_name:#모든 part를 다시 dict으로 리턴하기위함.\n part_imgs[part]=[]\n # print(part)\n coords = self.get_coords(part_json[part])\n # print(coords)\n if coords is None:\n part_imgs[part].append(None) \n \n elif len(coords) ==1:\n part_imgs[part].append(image.crop(coords[0])) \n elif len(coords) == 2:\n part_imgs[part].append(image.crop(coords[0])) \n part_imgs[part].append(image.crop(coords[1])) \n empty_face = self.get_empty_face(image,part_imgs,part_json)\n # empty_face.show()\n empty_upper_body = self.get_empty_upper_body(image,part_imgs,part_json)\n empty_lower_body = self.get_empty_lower_body(image,part_imgs,part_json)\n part_imgs['empty_face']=[empty_face]\n part_imgs['empty_lower_body']=[empty_lower_body]\n part_imgs['empty_upper_body']=[empty_upper_body]\n part_combinations = list(itertools.product([0, 1], repeat=7))\n new_imgs = []\n for combination in part_combinations:\n # print(combination)\n new_img=self.create_new_images(img=image,binary_combination=combination, part_imgs=part_imgs,part_json=part_json)\n if self.transform:\n new_img=self.transform(new_img)\n new_imgs.append(new_img.unsqueeze(0))\n new_imgs = torch.cat(new_imgs,dim=0)\n image = self.transform(image)\n image_3ch = image.expand(3,-1,-1)\n return new_imgs,image_3ch,label \n \n \n\n\n\n\nif __name__==\"__main__\":\n transform= transforms.Compose([transforms.Resize((224,224)),transforms.ToTensor(),ThresholdTransform(240)])\n part_name = [\"head\", \"eye\", \"nose\", \"ear\", \"mouth\", \"hand\", \"foot\", \"upper_body\", \"lower_body\"]\n dataset = shapley_part('/data/jong980812/project/mae/util/shapley/TD','/data/jong980812/project/mae/util/shapley/TD',part_name,240,transform=transform)\n data_loader=DataLoader(dataset,5,num_workers=4)\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n all_ordered_pair,weights = get_ordered_pair()\n part_number = all_ordered_pair.shape[0]\n part_count = {i: 0 for i in range(part_number)}\n num_correct = 0\n for new_imgs,original_image,label in data_loader:\n # print(new_imgs.shape)\n input_data = new_imgs\n # print('complete')\n batch_size = input_data.shape[0]\n input_data = rearrange(input_data, 'b t c h w -> (b t) c h w')\n \n \n model.to(device)\n input_data = input_data.to(device)\n original_image = original_image.to(device)\n label = label.to(device)\n model.eval()\n with torch.no_grad():\n prediction = model(original_image)\n output=model(input_data)\n output = rearrange(output, '(b t) o -> b t o', b=batch_size) # batch_size, 128, output(2)\n prediction = prediction.argmax(1)\n # print(output.shape)\n # print(label)\n \n for i in range(batch_size):\n if prediction[i] == label[i]:\n num_correct +=1\n correct_output = output[:,:,label[i]]# Take correct logits, (b, 128), 밖에서. \n shapley_matrix = get_shapley_matrix(all_ordered_pair,correct_output[i])\n shapley_contributions = shapley_matrix[:,:,1] - shapley_matrix[:,:,0] \n shapley_value = (shapley_contributions * 1/weights).sum(dim=1)\n max_part_number = (int(shapley_value.argmax()))\n part_count[max_part_number] += 1\n print(part_count)\n print(num_correct)\n \n","repo_name":"jong980812/Part_shapley","sub_path":"shapely_asd.py","file_name":"shapely_asd.py","file_ext":"py","file_size_in_byte":17112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"74912154927","text":"#!/usr/bin/python3\n\nimport os\nimport re\nimport getopt\nimport sys\nimport json\nimport logging\n\nfrom typing import (\n Optional,\n Tuple,\n Any,\n List,\n Dict,\n)\n\nimport whois\n\n# import whoisdomain as whois # to be compatible with dannycork\n\nlog = logging.getLogger(__name__)\nlogging.basicConfig(level=os.environ.get(\"LOGLEVEL\", \"INFO\"))\n\n# if we are not running as test2.py run in a simplistic way\nSIMPLISTIC: bool = False\nWithRedacted: bool = False\n\nPrintJson: bool = False\nVerbose: bool = False\nPrintGetRawWhoisResult: bool = False\nRuleset: bool = False\n\nFailures: Dict[str, Any] = {}\nIgnoreReturncode: bool = False\nTestAllTld: bool = False\nTestRunOnly: bool = False\n\nWithPublicSuffix: bool = False\nWithExtractServers: bool = False\nWithStripHttpStatus: bool = False\n\n\nclass ResponseCleaner:\n data: str\n rDict: Dict[str, Any] = {}\n\n def __init__(\n self,\n pathToTestFile: str,\n ):\n self.data = self.readInputFile(pathToTestFile)\n\n def readInputFile(\n self,\n pathToTestFile: str,\n ) -> str:\n if not os.path.exists(pathToTestFile):\n return \"\"\n\n with open(pathToTestFile, mode=\"rb\") as f: # switch to binary mode as that is what Popen uses\n # make sure the data is treated exactly the same as the output of Popen\n return f.read().decode(errors=\"ignore\")\n\n def cleanSection(\n self,\n section: List[str],\n ) -> List[str]:\n # cleanup any beginning and ending empty lines from the section\n\n if len(section) == 0:\n return section\n\n rr = r\"^\\s*$\"\n n = 0 # remove empty lines from the start of section\n while re.match(rr, section[n]):\n section.pop(n)\n # n stays 0\n\n n = len(section) - 1 # remove empty lines from the end of the section\n while re.match(rr, section[n]):\n section.pop(n)\n n = len(section) - 1 # remove empty lines from the end of section\n\n return section\n\n def splitBodyInSections(\n self,\n body: List[str],\n ) -> List[str]:\n # split the body on empty line, cleanup all sections, remove empty sections\n # return list of body's\n\n sections: List[List[str]] = []\n n = 0\n sections.append([])\n for line in body:\n if re.match(r\"^\\s*$\", line):\n n += 1\n sections.append([])\n continue\n sections[n].append(line)\n\n m = 0\n while m < len(sections):\n sections[m] = self.cleanSection(sections[m])\n m += 1\n\n # now remove empty sections and return\n sections2: List[str] = []\n m = 0\n while m < len(sections):\n if len(sections[m]) > 0:\n sections2.append(\"\\n\".join(sections[m]))\n m += 1\n\n return sections2\n\n def cleanupWhoisResponse(\n self,\n verbose: bool = False,\n with_cleanup_results: bool = False,\n ) -> Tuple[str, Dict[Any, Any]]:\n result = whois.cleanupWhoisResponse(\n self.data,\n verbose,\n with_cleanup_results,\n )\n\n self.rDict: Dict[str, Any] = {\n \"BodyHasSections\": False, # if this is true the body is not a list of lines but a list of sections with lines\n \"Preamble\": [], # the lines telling what whois servers wwere contacted\n \"Percent\": [], # lines staring with %% , often not present but may contain hints\n \"Body\": [], # the body of the whois, may be in sections separated by empty lines\n \"Postamble\": [], # copyright and other not relevant info for actual parsing whois\n }\n body: List[str] = []\n\n rr: List[str] = []\n z = result.split(\"\\n\")\n preambleSeen = False\n postambleSeen = False\n percentSeen = False\n for line in z:\n if preambleSeen is False:\n if line.startswith(\"[\"):\n self.rDict[\"Preamble\"].append(line)\n line = \"PRE;\" + line\n continue\n preambleSeen = True\n\n if preambleSeen is True and percentSeen is False:\n if line.startswith(\"%\"):\n self.rDict[\"Percent\"].append(line)\n line = \"PERCENT;\" + line\n continue\n percentSeen = True\n\n if postambleSeen is False:\n if line.startswith(\"-- \") or line.startswith(\">>> \") or line.startswith(\"Copyright notice\"):\n postambleSeen = True\n\n if postambleSeen is True:\n self.rDict[\"Postamble\"].append(line)\n line = \"POST;\" + line\n continue\n\n body.append(line)\n\n if \"\\t\" in line:\n line = \"TAB;\" + line # mark lines having tabs\n\n if line.endswith(\"\\r\"):\n line = \"CR;\" + line # mark lines having CR (\\r)\n\n rr.append(line)\n\n body = self.cleanSection(body)\n self.rDict[\"Body\"] = self.splitBodyInSections(body)\n return \"\\n\".join(rr), self.rDict\n\n def printMe(self) -> None:\n zz = [\"Preamble\", \"Percent\", \"Postamble\"]\n for k in zz:\n n = 0\n for lines in self.rDict[k]:\n tab = \" [TAB] \" if \"\\t\" in lines else \"\" # tabs are present in this section\n cr = \" [CR] \" if \"\\r\" in lines else \"\" # \\r is present in this section\n print(k, cr, tab, lines)\n\n k = \"Body\"\n if self.rDict[k]:\n n = 0\n for lines in self.rDict[k]:\n ws = \" [WHITESPACE AT END] \" if re.search(r\"[ \\t]+\\r?\\n\", lines) else \"\"\n tab = \" [TAB] \" if \"\\t\" in lines else \"\" # tabs are present in this section\n cr = \" [CR] \" if \"\\r\" in lines else \"\" # \\r is present in this section\n print(f\"# --- {k} Section: {n} {cr}{tab}{ws}\")\n n += 1\n print(lines)\n\n\ndef prepItem(d: str) -> None:\n if PrintJson is False:\n print(\"\")\n print(f\"test domain: <<<<<<<<<< {d} >>>>>>>>>>>>>>>>>>>>\")\n\n\ndef xType(x: Any) -> str:\n s = f\"{type(x)}\"\n return s.split(\"'\")[1]\n\n\ndef testItem(\n d: str,\n printgetRawWhoisResult: bool = False,\n) -> None:\n global IgnoreReturncode\n global Verbose\n global PrintGetRawWhoisResult\n\n global SIMPLISTIC\n global TestAllTld\n global TestRunOnly\n\n global WithRedacted\n global WithPublicSuffix\n global WithExtractServers\n global WithStripHttpStatus\n\n pc = whois.ParameterContext(\n ignore_returncode=IgnoreReturncode,\n verbose=Verbose,\n internationalized=True,\n include_raw_whois_text=PrintGetRawWhoisResult,\n simplistic=SIMPLISTIC,\n withRedacted=WithRedacted,\n withPublicSuffix=WithPublicSuffix,\n extractServers=WithExtractServers,\n stripHttpStatus=WithStripHttpStatus,\n )\n\n # use the new query (can also simply use q2()\n w = whois.query(domain=d, pc=pc)\n\n if w is None:\n print(\"None\")\n print(\"\\n\", whois.get_last_raw_whois_data())\n return\n\n # the 3 date time items can be None if not present or a datetime string\n # dnssec is a bool\n # some strings are return as '' when empty (status)\n # statuses can be a array of one empty string if no data\n\n # not all values are always present it mainly depends on whet we see in the output of whois\n # if we return not None: the elements that ars always there ars domain_name , tld, dnssec\n\n wd = w.__dict__\n if PrintJson is True:\n for f in [\"creation_date\", \"expiration_date\", \"last_updated\"]:\n if f in wd:\n wd[f] = f\"{wd[f]}\"\n print(json.dumps(wd))\n return\n\n for k, v in wd.items():\n if SIMPLISTIC:\n ss = \"%-18s \"\n if isinstance(v, str):\n print((ss + \"'%s'\") % (k, v))\n else:\n print((ss + \"%s\") % (k, v))\n else:\n ss = \"%-18s %-17s \"\n if isinstance(v, str):\n print((ss + \"'%s'\") % (k, xType(v), v))\n else:\n print((ss + \"%s\") % (k, xType(v), v))\n\n # print(\"\\n\", whois.get_last_raw_whois_data())\n\n\ndef errorItem(d: str, e: Any, what: str = \"Generic\") -> None:\n if what not in Failures:\n Failures[what] = {}\n Failures[what][d] = e\n\n message = f\"Domain: {d}; Exception: {what}; Error: {e}\"\n print(message)\n\n\ndef testDomains(aList: List[str]) -> None:\n for d in aList:\n # skip empty lines\n if not d:\n continue\n\n if len(d.strip()) == 0:\n continue\n\n # skip comments\n if d.strip().startswith(\"#\"):\n continue\n\n # skip comments behind the domain\n d = d.split(\"#\")[0]\n d = d.strip()\n\n prepItem(d)\n try:\n testItem(d)\n except whois.UnknownTld as e:\n errorItem(d, e, what=\"UnknownTld\")\n except whois.FailedParsingWhoisOutput as e:\n errorItem(d, e, what=\"FailedParsingWhoisOutput\")\n except whois.UnknownDateFormat as e:\n errorItem(d, e, what=\"UnknownDateFormat\")\n except whois.WhoisCommandFailed as e:\n errorItem(d, e, what=\"WhoisCommandFailed\")\n except whois.WhoisQuotaExceeded as e:\n errorItem(d, e, what=\"WhoisQuotaExceeded\")\n except whois.WhoisPrivateRegistry as e:\n errorItem(d, e, what=\"WhoisPrivateRegistry\")\n except whois.WhoisCommandTimeout as e:\n errorItem(d, e, what=\"WhoisCommandTimeout\")\n # except Exception as e:\n # errorItem(d, e, what=\"Generic\")\n\n\ndef getTestFileOne(fPath: str, fileData: Dict[str, Any]) -> None:\n if not os.path.isfile(fPath): # only files\n return\n\n if not fPath.endswith(\".txt\"): # ending in .txt\n return\n\n bName = fPath[:-4]\n fileData[bName] = []\n xx = fileData[bName]\n\n with open(fPath, encoding=\"utf-8\") as f:\n for index, line in enumerate(f):\n line = line.strip()\n if len(line) == 0 or line.startswith(\"#\"):\n continue\n\n aa = re.split(r\"\\s+\", line)\n if aa[0] not in xx:\n xx.append(aa[0])\n\n return\n\n\ndef getTestFilesAll(\n tDir: str,\n fileData: Dict[str, Any],\n) -> None:\n for item in os.listdir(tDir):\n fPath = f\"{tDir}/{item}\"\n getTestFileOne(fPath, fileData)\n\n\ndef getAllCurrentTld() -> List[str]:\n return whois.validTlds()\n\n\ndef appendHintOrMeta(\n rr: List[str],\n allRegex: Optional[str],\n tld: str,\n) -> None:\n global TestAllTld\n global TestRunOnly\n\n if TestAllTld is True:\n hint = whois.getTestHint(tld)\n hint = hint if hint else f\"meta.{tld}\"\n rr.append(f\"{hint}\")\n else:\n rr.append(f\"meta.{tld}\")\n\n\ndef appendHint(\n rr: List[str],\n allRegex: Optional[str],\n tld: str,\n) -> None:\n global TestAllTld\n global TestRunOnly\n\n if TestAllTld is True:\n hint = whois.getTestHint(tld)\n if hint:\n rr.append(f\"{hint}\")\n\n\ndef makeMetaAllCurrentTld(\n allHaving: Optional[str] = None,\n allRegex: Optional[str] = None,\n) -> List[str]:\n rr: List[str] = []\n for tld in getAllCurrentTld():\n if allRegex is None:\n appendHintOrMeta(rr, allRegex, tld)\n continue\n\n if re.search(allRegex, tld):\n appendHintOrMeta(rr, allRegex, tld)\n\n return rr\n\n\ndef makeTestAllCurrentTld(\n allRegex: Optional[str] = None,\n) -> List[str]:\n rr: List[str] = []\n for tld in getAllCurrentTld():\n if allRegex is None:\n appendHint(rr, allRegex, tld)\n continue\n if re.search(allRegex, tld):\n appendHint(rr, allRegex, tld)\n\n return rr\n\n\ndef showAllCurrentTld() -> None:\n print(\"Tld's currently supported\")\n for tld in getAllCurrentTld():\n print(tld)\n\n\ndef ShowRuleset(tld: str) -> None:\n rr = whois.get_TLD_RE()\n if tld in rr:\n for key in sorted(rr[tld].keys()):\n rule = f\"{rr[tld][key]}\"\n if \"re.compile\" in rule:\n rule = rule.split(\"re.compile(\")[1]\n rule = rule.split(\", re.IGNORECASE)\")[0]\n print(key, rule, \"IGNORECASE\")\n\n\ndef usage() -> None:\n name = os.path.basename(sys.argv[0])\n\n print(\n f\"\"\"\n{name}\n [ -h | --usage ]\n print this text and exit\n\n [ -V | --Version ]\n print the build version string\n and exit\n\n [ -S | --SupportedTld ]\n print all known top level domains\n and exit\n\n [ -a | --all]\n test all existing tld currently supported\n and exit\n\n [ -f | --file = \" ]\n use the named file to test all domains (one domain per line)\n lines starting with # or empty lines are skipped, anything after the domain is ignored\n the option can be repeated to specify more then one file\n exits after processing all the files\n\n [ -D | --Directory = \" ]\n use the named directory, ald use all files ending in .txt as files containing domains\n files are processed as in the -f option so comments and empty lines are skipped\n the option can be repeated to specify more then one directory\n exits after processing all the dirs\n\n [ -d | --domain = \" ]\n only analyze the given domains\n the option can be repeated to specify more domain's\n\n [ -v | --verbose ]\n set verbose to True,\n verbose output will be printed on stderr only\n\n [ -j | --json ]\n print each result as json\n\n [ -I | --IgnoreReturncode ]\n sets the IgnoreReturncode to True,\n\n [ -p | --print ]\n also print text containing the raw output of the cli whois\n\n [ -R | --Ruleset ]\n dump the ruleset for the requested tld and exit\n should be combined with -d to specify tld's\n\n [ -C | --Cleanup ]\n read the input file specified and run the same cleanup as in whois.query,\n then exit\n\n # test two domains with verbose and IgnoreReturncode\n example: {name} -v -I -d meta.org -d meta.com\n\n # test all supported tld's with verbose and IgnoreReturncode\n example: {name} -v -I -a\n\n # test one specific file with verbose and IgnoreReturncode\n example: {name} -v -I -f tests/ok-domains.txt\n\n # test one specific directory with verbose and IgnoreReturncode\n example: {name} -v -I -D tests\n\n\"\"\"\n )\n\n \"\"\"\n TODO\n --all --reg \n from all tld a regex match sub selection\n\n --all --having \n from all but only the ones haveing a certain field\n \"\"\"\n sys.exit(1)\n\n\ndef showFailures() -> None:\n if len(Failures):\n print(\"\\n# ========================\")\n for i in sorted(Failures.keys()):\n for j in sorted(Failures[i].keys()):\n print(i, j, Failures[i][j])\n\n\ndef main() -> None:\n global PrintJson\n global Verbose\n global IgnoreReturncode\n global PrintGetRawWhoisResult\n global Ruleset\n global SIMPLISTIC\n global WithRedacted\n global TestAllTld\n global TestRunOnly\n global WithPublicSuffix\n global WithExtractServers\n global WithStripHttpStatus\n\n name: str = os.path.basename(sys.argv[0])\n if name == \"test2.py\":\n SIMPLISTIC = False\n else:\n SIMPLISTIC = True\n\n try:\n opts, args = getopt.getopt(\n sys.argv[1:],\n \"TtjRSpvVIhaf:d:D:r:H:C:\",\n [\n \"Testing\",\n \"test\",\n \"json\",\n \"Ruleset\",\n \"SupportedTld\",\n \"print\",\n \"verbose\",\n \"Version\",\n \"IgnoreReturncode\",\n \"all\",\n \"file=\",\n \"Directory=\",\n \"domain=\",\n \"reg=\",\n \"having=\",\n \"Cleanup=\",\n \"withRedacted\",\n \"withPublicSuffix\",\n \"extractServers\",\n \"stripHttpStatus\",\n ],\n )\n except getopt.GetoptError:\n usage()\n sys.exit(2)\n\n # TestAllTld: bool = False\n\n allHaving: Optional[str] = None # from all supported tld only process the ones having this :: TODO ::\n allRegex: Optional[str] = None # from all supported tld process only the ones matching this regex\n\n directory: Optional[str] = None\n dirs: List[str] = []\n\n filename: Optional[str] = None\n files: List[str] = []\n\n domain: Optional[str] = None\n domains: List[str] = []\n\n fileData: Dict[str, Any] = {}\n\n for opt, arg in opts:\n if opt in (\"-S\", \"SupportedTld\"):\n for tld in sorted(whois.validTlds()):\n print(tld)\n sys.exit(0)\n\n if opt in (\"-V\", \"Version\"):\n print(whois.getVersion())\n sys.exit(0)\n\n if opt == \"-h\":\n usage()\n sys.exit(0)\n\n if opt in (\"-a\", \"--all\"):\n TestAllTld = True\n\n if opt in (\"-H\", \"--having\"):\n TestAllTld = True\n allHaving = str(arg)\n\n if opt in (\"-r\", \"--reg\"):\n TestAllTld = True\n allRegex = str(arg)\n\n if opt in (\"-v\", \"--verbose\"):\n Verbose = True\n logging.basicConfig(level=\"DEBUG\")\n\n if opt in (\"-p\", \"--print\"):\n PrintGetRawWhoisResult = True\n\n if opt in (\"-j\", \"--json\"):\n PrintJson = True\n\n if opt in (\"-T\", \"--Testing\"):\n # print out all names of tld where we have _test\n TestAllTld = True\n rr = makeTestAllCurrentTld(None)\n for item in sorted(rr):\n print(item)\n sys.exit(0)\n\n if opt in (\"-t\", \"--test\"):\n # collect all _test entries defined and only run those,\n # o not run the default meta.tld\n TestAllTld = True\n TestRunOnly = True\n\n if opt in (\"-R\", \"--Ruleset\"):\n Ruleset = True\n\n if opt in (\"-D\", \"--Directory\"):\n directory = arg\n isDir = os.path.isdir(directory)\n if isDir is False:\n print(f\"{directory} cannot be found or is not a directory\", file=sys.stderr)\n sys.exit(101)\n\n if opt in (\"-C\", \"--Cleanup\"):\n inFile = arg\n isFile = os.path.isfile(arg)\n if isFile is False:\n print(f\"{inFile} cannot be found or is not a file\", file=sys.stderr)\n sys.exit(101)\n\n rc = ResponseCleaner(inFile)\n d1, rDict = rc.cleanupWhoisResponse()\n rc.printMe()\n sys.exit(0)\n\n if opt in (\"-f\", \"--file\"):\n filename = arg\n isFile = os.path.isfile(filename)\n if isFile is False:\n print(f\"{filename} cannot be found or is not a file\", file=sys.stderr)\n sys.exit(101)\n\n if filename not in files:\n files.append(filename)\n TestAllTld = False\n\n if opt in (\"-d\", \"--domain\"):\n domain = arg\n if domain not in domains:\n domains.append(domain)\n\n if opt in (\"--extractServers\"):\n WithExtractServers = True\n\n if opt in (\"--stripHttpStatus\"):\n WithStripHttpStatus = True\n\n if opt in (\"--withRedacted\"):\n WithRedacted = True\n\n if opt in (\"--withPublicSuffix\"):\n WithPublicSuffix = True\n\n msg = f\"{name} SIMPLISTIC: {SIMPLISTIC}\"\n log.debug(msg)\n\n if Ruleset is True and domains:\n for domain in domains:\n ShowRuleset(domain)\n sys.exit(0)\n\n if TestAllTld:\n if TestRunOnly is False:\n testDomains(makeMetaAllCurrentTld(allHaving, allRegex))\n else:\n testDomains(makeTestAllCurrentTld(allRegex))\n\n showFailures()\n sys.exit(0)\n\n if dirs:\n fileData = {}\n for dName in dirs:\n getTestFilesAll(dName, fileData)\n for testFile, x in fileData.items():\n testDomains(x)\n showFailures()\n sys.exit(0)\n\n if files:\n fileData = {}\n for testFile in files:\n getTestFileOne(testFile, fileData)\n for testFile, x in fileData.items():\n testDomains(x)\n showFailures()\n sys.exit(0)\n\n if domains:\n testDomains(domains)\n showFailures()\n sys.exit(0)\n\n usage()\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"DannyCork/python-whois","sub_path":"whois/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":20637,"program_lang":"python","lang":"en","doc_type":"code","stars":280,"dataset":"github-code","pt":"2"}
+{"seq_id":"20831607856","text":"def get_int(word):\n \"\"\"\n :param word: Приймає рядок який буде описувати для чого необхідно ввести число\n :return: Повертає число у форматі int\n \"\"\"\n while True:\n my_number = input(word)\n try:\n return int(my_number)\n except:\n print(\"Помилка, спробуй ще раз ввести число\")\n\n\ndef check_palindrome(num):\n \"\"\"\n Функція для перевірки чи є число паліндромом\n :param num: Число яке перевіряємо\n :return: True якщо число паліндром і False якщо ні\n \"\"\"\n num = str(num)\n left_bord = \"\"\n right_bord = \"\"\n centre = \"\"\n if int(num) < 10:\n return True\n elif len(num) % 2 == 0:\n for i in range(len(num)):\n if i < len(num) / 2:\n left_bord += num[i]\n else:\n right_bord += num[i]\n return left_bord == right_bord[::-1]\n else:\n for i in range(len(num)):\n if i < (len(num) / 2) - 1:\n left_bord += num[i]\n elif i > (len(num) / 2):\n right_bord += num[i]\n else:\n centre += num[i]\n return left_bord == right_bord[::-1]\n\nif __name__ == '__main__':\n pass","repo_name":"GlebOstapenko/homeWork","sub_path":"HM7/all_func_HM7.py","file_name":"all_func_HM7.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"25063206425","text":"from typing import Sequence\n\nimport torch\n\nfrom meddlr.metrics.metric import Metric\nfrom meddlr.ops import complex as cplx\nfrom meddlr.utils import env\n\nif env.package_available(\"lpips\"):\n from lpips import LPIPS as _LPIPS\n\n\n# TODO: Refactor SSFD Class to extract shared logic into parent class FeatureMetric\nclass LPIPS(Metric):\n \"\"\"\n Learned Perceptual Image Patch Similarity.\n\n LPIPS evaluates the feature distance between a pair of images from features extracted\n from a pre-trained neural network [1]. LPIPS has been shown to correspond well to\n perceived image quality on natural images.\n\n References:\n .. [1] R. Zhang, P. Isola, A. A. Efros, E. Shechtman, O. Wang.\n The Unreasonable Effectiveness of Deep Features as a Perceptual Metric.\n In CVPR, 2018 http://arxiv.org/abs/1801.03924\n \"\"\"\n\n is_differentiable = True\n higher_is_better = False\n\n def __init__(\n self,\n net_type: str = \"alex\",\n mode: str = \"grayscale\",\n lpips: bool = True,\n pretrained: bool = True,\n channel_names: Sequence[str] = None,\n reduction=\"none\",\n compute_on_step: bool = False,\n dist_sync_on_step: bool = False,\n process_group: bool = None,\n dist_sync_fn: bool = None,\n ):\n \"\"\"\n Args:\n net_type (str): The pre-trained network to use for extracting features. One of:\n * ``'alex'``: Alex-Net w/ feature extraction layers 'relu1' through 'relu5'\n * ``'vgg'``: VGG-16 w/ feature extration layers ['relu1_2', 'relu2_2',\n 'relu3_3', 'relu4_3', 'relu5_3']\n * ``'squeeze'``: Squeeze-Net w/ feature extration layers 'relu1' through 'relu7'\n mode (str): Determines how to interpret the channel dimension of the inputs. One of:\n * ``'grayscale'``: Each channel corresponds to a distinct grayscale input image.\n * ``'rgb'``: The 3 channel dimensions correspond to a single rgb image.\n Exception will be thrown if channel dimension != 3 dtype data is complex.\n lpips (bool): This flag determines if a linear layer is used on top of the\n extracted features.\n * ``True``: linear layers on top of base/trunk network.\n * ``False``: no linear layers; each layer is averaged together.\n pretrained (bool): This flag controls the linear layers, which are only in\n effect when lpips=True above.\n * ``True``: linear layers are calibrated with human perceptual judgments.\n * ``False``: linear layers are randomly initialized.\n \"\"\"\n\n if not env.package_available(\"lpips\"):\n raise ModuleNotFoundError(\n \"LPIPS metric requires that lpips is installed.\"\n \"Either install as `pip install meddlr[metrics]` or `pip install lpips`.\"\n )\n\n super().__init__(\n channel_names=channel_names,\n units=\"\",\n reduction=reduction,\n compute_on_step=compute_on_step,\n dist_sync_on_step=dist_sync_on_step,\n process_group=process_group,\n dist_sync_fn=dist_sync_fn,\n )\n\n valid_net_type = (\"vgg\", \"alex\", \"squeeze\")\n if net_type not in valid_net_type:\n raise ValueError(\n f\"Invalid `net_type` ('{net_type}'). Expected one of {valid_net_type}.\"\n )\n\n valid_modes = (\"grayscale\", \"rgb\")\n if mode not in valid_modes:\n raise ValueError(f\"Invalid `mode` ('{mode}'). Expected one of {valid_modes}.\")\n\n self.net = NoTrainLpips(net=net_type, lpips=lpips, verbose=False)\n self.mode = mode\n\n def func(self, preds: torch.Tensor, targets: torch.Tensor):\n\n if self.mode == \"grayscale\":\n loss_shape = (targets.shape[0], targets.shape[1])\n elif self.mode == \"rgb\":\n if targets.shape[1] != 3:\n raise ValueError(\n f\"Channel dimension must have size 3 for rgb mode,\\\n but got tensor of shape {targets.shape}.\"\n )\n\n is_complex = cplx.is_complex(targets) or cplx.is_complex_as_real(targets)\n if is_complex:\n raise TypeError(\n f\"Data type must be real when mode is {self.mode},\\\n but got data type {targets.dtype}\"\n )\n\n loss_shape = (targets.shape[0], 1)\n\n preds = self.preprocess_lpips(preds)\n targets = self.preprocess_lpips(targets)\n\n loss = self.net(preds, targets).squeeze()\n loss = loss.view(loss_shape)\n\n return loss\n\n def preprocess_lpips(self, img: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Preprocess image per LPIPS implementation.\n\n Converts images to magnitude images if complex and normalizes between [-1, 1].\n If self.mode is 'grayscale', then each channel dimension will be replicated 3 times.\n\n Args:\n img (torch.Tensor): Tensor to preprocess.\n\n Returns:\n img (torch.Tensor): Preprocessed tensor.\n \"\"\"\n\n is_complex = cplx.is_complex(img) or cplx.is_complex_as_real(img)\n if is_complex:\n img = cplx.abs(img)\n\n if self.mode == \"grayscale\":\n # normalize each image independently (channel dim. represents different images)\n shape = (img.shape[0], img.shape[1], -1)\n img_min = torch.amin(img.reshape(shape), dim=-1, keepdim=True).unsqueeze(-1)\n img_max = torch.amax(img.reshape(shape), dim=-1, keepdim=True).unsqueeze(-1)\n img = 2 * (img - img_min) / (img_max - img_min) - 1\n\n img = img.reshape(img.shape[0] * img.shape[1], 1, img.shape[2], img.shape[3])\n img = img.repeat(1, 3, 1, 1)\n elif self.mode == \"rgb\":\n # normalize each image independently (channel dim. represents the same image)\n shape = (img.shape[0], -1)\n img_min = (\n torch.amin(img.reshape(shape), dim=-1, keepdim=True).unsqueeze(-1).unsqueeze(-1)\n )\n img_max = (\n torch.amax(img.reshape(shape), dim=-1, keepdim=True).unsqueeze(-1).unsqueeze(-1)\n )\n img = 2 * (img - img_min) / (img_max - img_min) - 1\n\n return img\n\n\nif env.package_available(\"lpips\"):\n\n class NoTrainLpips(_LPIPS):\n def train(self, mode: bool) -> \"NoTrainLpips\":\n \"\"\"the network should not be able to be switched away from evaluation mode.\n Implementation adapted from torchmetrics LPIPS.\"\"\"\n return super().train(False)\n\nelse:\n NoTrainLpips = None\n","repo_name":"ad12/meddlr","sub_path":"meddlr/metrics/lpip.py","file_name":"lpip.py","file_ext":"py","file_size_in_byte":6703,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"2"}
+{"seq_id":"24276914216","text":"import sys\r\nimport pygame\r\nfrom scripts.utilities import load_image, load_images\r\nfrom scripts.entities import PhysicsEntity\r\nfrom scripts.tilemap import Tilemap\r\nfrom scripts.clouds import Clouds\r\n\r\nclass Game:\r\n \"\"\" Main Game class\r\n Attributes include loading in assets and setting up the screen\r\n \"\"\"\r\n def __init__(self):\r\n pygame.init()\r\n\r\n pygame.display.set_caption(\"Game\")\r\n self.screen = pygame.display.set_mode((640, 480))\r\n self.display = pygame.Surface((320, 240))\r\n\r\n self.clock = pygame.time.Clock()\r\n self.movement = [False, False]\r\n self.assets = {\r\n \"player\": load_image(\"entities/player.png\"),\r\n \"decor\": load_images(\"tiles/decor\"),\r\n \"grass\": load_images(\"tiles/grass\"),\r\n \"stone\": load_images(\"tiles/stone\"),\r\n \"large_decor\": load_images(\"tiles/large_decor\"),\r\n \"background\": load_image(\"background.png\"),\r\n \"clouds\": load_images(\"clouds\")\r\n } # dictionary for assets\r\n\r\n self.clouds = Clouds(self.assets[\"clouds\"], count = 16)\r\n\r\n self.player = PhysicsEntity(self, \"player\", (50, 50), (8, 15))\r\n\r\n self.tilemap = Tilemap(self, tile_size=16)\r\n self.scroll = [0, 0] # for the camera scrolling\r\n\r\n\r\n def run(self):\r\n while True:\r\n self.display.blit(self.assets[\"background\"], (0, 0))\r\n self.scroll[0] += (self.player.generate_rect().centerx - self.display.get_width() / 2 - self.scroll[0]) / 30\r\n self.scroll[1] += (self.player.generate_rect().centery - self.display.get_height() / 2 - self.scroll[1]) / 30\r\n render_scroll = (int(self.scroll[0]), int(self.scroll[1]))\r\n\r\n self.clouds.update()\r\n self.clouds.render(self.display, offset=render_scroll)\r\n\r\n self.tilemap.render(self.display, offset = render_scroll)\r\n\r\n self.player.update(self.tilemap, (self.movement[1] - self.movement[0], 0))\r\n self.player.render(self.display, offset = render_scroll)\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_LEFT or event.key == pygame.K_a:\r\n self.movement[0] = True\r\n if event.key == pygame.K_RIGHT or event.key == pygame.K_d:\r\n self.movement[1] = True\r\n if event.key == pygame.K_UP or event.key == pygame.K_w:\r\n self.player.velocity[1] = -3\r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_LEFT or event.key == pygame.K_a:\r\n self.movement[0] = False\r\n if event.key == pygame.K_RIGHT or event.key == pygame.K_a:\r\n self.movement[1] = False\r\n\r\n self.screen.blit(pygame.transform.scale(self.display, self.screen.get_size()), (0, 0))\r\n pygame.display.update()\r\n self.clock.tick(60)\r\n\r\nGame().run()\r\n","repo_name":"KuromeMochi/Pikachu-Platformer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"24788231928","text":"import PySimpleGUI as sg\r\n\r\nsg.theme('Dark Blue 17')\r\nlayout = [\r\n [sg.Text('I am Stennar',font=(\"Helvetica\", 25),background_color='#327ba2',relief=sg.RELIEF_RIDGE)],\r\n [sg.Text('Welcome', font=(\"Arial\", 20))],\r\n [sg.Text('Select a host', size=(15, 1))],\r\n [sg.Radio('Image! ', \"RADIO1\", default=True, size=(10,1)), sg.Radio('Audio!', \"RADIO1\")],\r\n [sg.Text('Choose a operation', size=(15, 1))],\r\n [sg.Radio('Encrypt ', \"RADIO2\", default=True, size=(10,1)), sg.Radio('Decrypt', \"RADIO2\")],\r\n [sg.Text('File 1', size=(8, 1)), sg.Input(), sg.FileBrowse()],\r\n [sg.Text('Your secret message(Text/image file', size=(15, 1))],\r\n [sg.Checkbox('Image?', size=(10,1))],\r\n [sg.Text('File 2', size=(8, 1)), sg.Input(), sg.FileBrowse()],\r\n [sg.Submit(), sg.Cancel()]\r\n]\r\n\r\nwindow = sg.Window('Math Assingment', layout)\r\nevent, values = window.read()\r\nwindow.close()\r\nprint(values[0], values[1], values[2],values[3],values[4],values[5])\r\n\r\n \r\nif (values[0]==True and values[2]==True):\r\n import textimageEncryption as tp\r\n import textreader as t\r\n tp.img=values[4]\r\n text=t.call(values[6])\r\n tp.data=text\r\n tp.encode()\r\n sg.Popup('Success!')\r\n \r\n\r\nelif(values[0]==True and values[3]==True):\r\n import textimageDecryption as dp\r\n dp.img=values[4]\r\n s=dp.call()\r\n sg.Popup('Your secret message was',s)\r\n\r\nelif(values[1]==True and values[2]==True):\r\n import au1encryption as ae\r\n import textreader as t\r\n ae.song1=values[4]\r\n text=t.call(values[6])\r\n ae.a1=text\r\n ae.audioen()\r\n sg.Popup('Success!')\r\n\r\nelif(values[1]==True and values[3]==True):\r\n import au1decryption as ad\r\n ad.song1=values[4]\r\n s=ad.call()\r\n sg.Popup('your secret message was',s)\r\n\r\nelif(values[0]==True and values[2]==True and values[5]==True):\r\n import imageimageEncryption as iie\r\n iie.img1=values[4]\r\n iie.img2=values[6]\r\n iie.merge()\r\n sg.Popup('Succes output.png') \r\n \r\nelif(values[0]==True and values[3]==True and values[5]==True):\r\n import imageimageDecryption as iid\r\n iid.img=values[4]\r\n iid.unmerge()\r\n sg.Image(\"SecretMesaage.PNG\")\r\n\r\n\r\n\r\n\r\n#while looop\r\n#image display.\r\n\r\n \r\n\r\n\r\n","repo_name":"CRUCIFIER0/Steganography","sub_path":"final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"39143080767","text":"WEBHOOK_SCHEMA = {\n\t\"$schema\": \"http://json-schema.org/draft-04/schema#\",\n\t\"type\": \"object\",\n\t\"properties\": {\n\t \"ref\": {\"type\": \"string\"},\n\t \"ref_type\": {\"type\": \"string\", \"enum\": [\"branch\", \"tag\"] },\n\t \"commits\": {\n\t\t\"type\": \"array\",\n\t\t\"items\": [\n\t\t {\n\t\t\t\"type\": \"object\",\n\t\t\t\"properties\": {\n\t\t\t \"id\": {\"type\": \"string\"},\n\t\t\t \"message\": {\"type\": \"string\"},\n\t\t\t \"timestamp\": {\"type\": \"string\"}\n\t\t\t },\n\t\t\t\"required\": [\"id\", \"message\", \"timestamp\"]\n\t\t\t}\n\t\t ]\n\t\t},\n\t \"repository\": {\n\t\t\"type\": \"object\",\n\t\t\"properties\": {\n\t\t \"ssh_url\": {\"type\": \"string\"}\n\t\t },\n\t\t\"required\": [\"ssh_url\"]\n\t\t}\n\t },\n\t\"required\": [\"ref\", \"repository\"],\n \"anyOf\": [\n {\n \"required\": [\"ref_type\"],\n },\n {\n \"required\": [\"commits\"],\n },\n ],\n }\n","repo_name":"DavidVentura/docker-builder","sub_path":"src/builder/schemas.py","file_name":"schemas.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"7453850411","text":"\"\"\"\nMade by *Lirgo*\n\nThis project is devoted to calculating and\npredicting the shape of a parabola\nbased on a video with a thrown ball\n\"\"\"\n\nimport cv2\nimport numpy as np\nimport time\n\npoints = []\n\ndef parabola(point_1, point_2, point_3):\n\n # we define coordinates to points\n\n x1, y1 = point_1\n x2, y2 = point_2\n x3, y3 = point_3\n\n \"\"\"\n the lines underneath are a result of a \n calculated parabola function using desmos\n that you can check out in this link:\n \n https://www.desmos.com/calculator/q5khflotcq?lang=en\n \"\"\"\n\n b = ((y1 - y2) * (x3 ** 2 - x1 ** 2) + (x1 ** 2 - x2 ** 2) * (y1 - y3)) / ((x3 - x1) * (x3 - x2) * (x1 - x2))\n a = ((y2 - y1) + b * (x1 - x2)) / (x2 ** 2 - x1 ** 2)\n c = y1 - a * x1 ** 2 - b * x1\n\n return a, b, c\n\ndef f(x, factors):\n a, b, c = factors\n return a * x ** 2 + b * x + c\n\ndef detect_circles(frame):\n blur = cv2.medianBlur(frame, 7)\n hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)\n lower_blue = np.array([10, 100, 100])\n upper_blue = np.array([40, 255, 255])\n mask = cv2.inRange(hsv, lower_blue, upper_blue)\n\n # convert image to grayscale image\n gray_image = cv2.cvtColor(cv2.bitwise_and(frame, frame, mask=mask), cv2.COLOR_BGR2GRAY)\n # convert the grayscale image to binary image\n ret, thresh = cv2.threshold(gray_image, 0, 255, 0)\n # calculate moments of binary image\n M = cv2.moments(thresh)\n # calculate x,y coordinate of center\n try:\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n # put text and highlight the center\n cv2.circle(frame, (cX, cY), 5, (255, 255, 255), -1)\n return cX, cY\n except:\n return None\n\ndef main():\n video = cv2.VideoCapture('parabola.mp4')\n while True:\n try:\n _, frame = video.read()\n point = detect_circles(frame)\n if point is not None:\n points.append(point)\n cv2.imshow('frame', frame)\n print(points)\n if cv2.waitKey(0) == 27:\n break\n except cv2.error as e:\n # print(e)\n break\n\n print('done')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"L1RG0/Ball-trajectory-prediction","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"19344443959","text":"import os\nimport sys\nimport time\nimport datetime\nimport os.path as osp\nimport numpy as np\nimport warnings\nimport tabulate as tab\nimport pickle\n\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\n\nfrom args import argument_parser, image_dataset_kwargs, optimizer_kwargs, lr_scheduler_kwargs\nfrom data.data_manager import ImageDataManager\nfrom data.dataset_loader import read_image\nimport models\nfrom training.losses import SigmoidCrossEntropyLoss, HardnessPredictorLoss, DeepMARLoss, SplitSoftmaxCrossEntropyLoss\nfrom utils.iotools import check_isfile, save_checkpoint\nfrom utils.avgmeter import AverageMeter\nfrom utils.loggers import Logger, AccLogger\nfrom utils.torchtools import count_num_param, open_all_layers, open_specified_layers, accuracy, \\\n load_pretrained_weights, freeze_all_layers\nimport evaluation.metrics as metrics\nfrom training.optimizers import init_optimizer\nfrom training.lr_schedulers import init_lr_scheduler\nfrom utils.plot import plot_epoch_losses, show_img_grid\nfrom trainer import Trainer\nimport evaluation.rejectors as rejectors\nfrom evaluation.result_manager import ResultManager\nfrom training.calibrators import NoneCalibrator, LinearCalibrator\n\n\nclass RealisticPredictorTrainer(Trainer):\n \"\"\"\n Trainer for a baseline.\n \"\"\"\n def __init__(self, args):\n \"\"\"\n Run the trainer.\n :param args: Command line args.\n \"\"\"\n super().__init__(args)\n\n\n def init_model(self):\n print('Initializing main model: {}'.format(args.model))\n self.model_main = models.init_model(name=self.args.model, num_classes=self.dm.num_attributes,\n pretrained=not self.args.no_pretrained, use_gpu=self.use_gpu)\n print('Model size: {:.3f} M'.format(count_num_param(self.model_main)))\n\n print('Initializing HP model: {}'.format(args.hp_model))\n # Determine the size of the output vector for the HP-Net.\n num_hp_net_outputs = 1 if self.args.hp_net_simple else self.dm.num_attributes\n # Init the HP-Net\n self.model_hp = models.init_model(name=\"hp_net_\" + self.args.hp_model, num_classes=num_hp_net_outputs,\n pretrained=not self.args.no_pretrained)\n print('Model size: {:.3f} M'.format(count_num_param(self.model_hp)))\n\n if self.args.rejector == \"none\":\n self.rejector = rejectors.NoneRejector()\n elif self.args.rejector == 'macc':\n self.rejector = rejectors.MeanAccuracyRejector(self.args.max_rejection_quantile)\n elif self.args.rejector == \"median\":\n self.rejector = rejectors.MedianRejector(self.args.max_rejection_quantile)\n elif self.args.rejector == \"threshold\":\n self.rejector = rejectors.ThresholdRejector(self.args.rejection_threshold, self.args.max_rejection_quantile)\n elif self.args.rejector == \"quantile\":\n self.rejector = rejectors.QuantileRejector(self.args.max_rejection_quantile)\n elif self.args.rejector == 'f1':\n self.rejector = rejectors.F1Rejector(self.args.max_rejection_quantile)\n else:\n raise ValueError(\"Unsupported rejection strategy: '{}'\".format(self.args.rejector))\n print(\"Using rejection strategy '{}'\".format(self.args.rejector))\n\n if self.args.hp_calib == 'none':\n self.hp_calibrator = NoneCalibrator()\n elif self.args.hp_calib == 'linear':\n self.hp_calibrator = LinearCalibrator()\n else:\n raise ValueError(\"Unsupported calibrator: '{}'\".format(self.args.hp_calib))\n print(\"Using calibrator for HP-Loss '{}'\".format(self.args.hp_calib))\n\n\n # Load pretrained weights if specified in args.\n load_file = osp.join(args.save_experiment, args.load_weights)\n self.loaded_args = self.args\n if args.load_weights:\n if check_isfile(load_file):\n cp = load_pretrained_weights([self.model_main, self.model_hp], load_file)\n if \"args\" in cp:\n self.loaded_args = cp[\"args\"]\n else:\n print(\"WARNING: Could not load args. \")\n\n if \"result_dict\" in cp and cp[\"result_dict\"] is not None and self.args.evaluate:\n self.result_dict = cp[\"result_dict\"]\n self.result_manager = ResultManager(self.result_dict)\n print(\"Loaded result dict with keys: \")\n print(sorted(list(self.result_dict.keys())))\n if \"rejection_thresholds\" in self.result_dict:\n self.rejector.load_thresholds(self.result_dict[\"rejection_thresholds\"])\n if self.rejector.is_initialized():\n print(\"Loaded rejection thresholds. \")\n else:\n print(\"Loaded uninitialized (None) rejection thresholds. \")\n else:\n print(\"WARNING: Could not load rejection thresholds. \")\n else:\n print(\"WARNING: Could not load pretrained weights\")\n self.new_eval_split = self.args.eval_split != self.loaded_args.eval_split\n # Load model onto GPU if GPU is used.\n self.model_main = self.model_main.cuda() if self.use_gpu else self.model_main\n self.model = self.model_main\n self.model_hp = self.model_hp.cuda() if self.use_gpu else self.model_hp\n\n self.pos_ratio = self.dm.dataset.get_positive_attribute_ratio()\n # Select Loss function.\n # Select Loss function.\n if args.loss_func == \"deepmar\":\n\n self.criterion = DeepMARLoss(self.pos_ratio, args.train_batch_size, use_gpu=self.use_gpu,\n sigma=args.loss_func_param)\n elif args.loss_func == \"scel\":\n self.criterion = SigmoidCrossEntropyLoss(num_classes=self.dm.num_attributes, use_gpu=self.use_gpu)\n else:\n self.criterion = None\n\n self.criterion_main = self.criterion\n self.criterion_hp = HardnessPredictorLoss(self.args.use_deepmar_for_hp, self.pos_ratio, self.dm.num_attributes,\n use_gpu=self.use_gpu, sigma=self.args.hp_loss_param,\n use_visibility=self.args.use_bbs_gt,\n visibility_weight=self.args.hp_visibility_weight)\n self.f1_calibration_thresholds = None\n\n\n self.optimizer_main = init_optimizer(self.model_main, **optimizer_kwargs(args))\n self.scheduler_main = init_lr_scheduler(self.optimizer_main, **lr_scheduler_kwargs(args))\n\n self.optimizer = self.optimizer_main\n self.scheduler = self.scheduler_main\n\n op_args = optimizer_kwargs(args)\n sc_args = lr_scheduler_kwargs(args)\n op_args['lr'] *= self.args.hp_net_lr_multiplier\n self.optimizer_hp = init_optimizer(self.model_hp, **op_args)\n sc_args[\"stepsize\"] = [i + self.args.hp_epoch_offset for i in sc_args[\"stepsize\"]]\n self.scheduler_hp = init_lr_scheduler(self.optimizer_hp, **sc_args)\n\n\n self.model_main = nn.DataParallel(self.model_main) if self.use_gpu else self.model_main\n self.model = self.model_main\n self.model_hp = nn.DataParallel(self.model_hp) if self.use_gpu else self.model_hp\n\n if not self.args.evaluate:\n self.init_epochs()\n\n self.model_list = [self.model_main, self.model_hp]\n self.optimizer_list = [self.optimizer_main, self.optimizer_hp]\n self.scheduler_list = [self.scheduler_main, self.scheduler_hp]\n self.criterion_list = [self.criterion_main, self.criterion_hp]\n\n # if args.resume and check_isfile(args.resume):\n # args.start_epoch = resume_from_checkpoint(args.resume, model, optimizer=optimizer)\n\n def update_rejector_thresholds(self):\n split = self.args.rejector_thresholds_split\n self.init_f1_calibration_threshold()\n if self.result_manager.check_output_dict(split):\n labels, prediction_probs, predictions, _ = self.result_manager.get_outputs(split)\n else:\n print(\"Computing label predictions for training data. \")\n labels, prediction_probs, predictions = self.get_label_predictions(split)\n self.result_manager.update_outputs(split, prediction_probs=prediction_probs, labels=labels,\n predictions=predictions)\n if self.args.use_confidence:\n if self.args.f1_calib:\n decision_thresholds = self.f1_calibration_thresholds\n assert decision_thresholds is not None\n else:\n decision_thresholds = None\n hp_scores = 1 - metrics.get_confidence(prediction_probs, decision_thresholds)\n print(\"Using confidence scores as HP-scores. \")\n elif self.result_manager.check_output_dict(split):\n _, _, _, hp_scores = self.result_manager.get_outputs(split)\n else:\n print(\"Computing hardness scores for training data. \")\n hp_scores, _, _ = self.get_full_output(model=self.model_hp, criterion=self.criterion_hp, split=split)\n hp_scores = self.criterion_hp.broadcast(hp_scores)\n self.result_manager.update_outputs(split, hp_scores=hp_scores)\n print(\"Updating rejection thresholds based on training data. \")\n self.rejector.update_thresholds(labels, predictions, hp_scores)\n\n def update_hp_calibrator_thresholds(self, thresholds=None):\n if self.args.hp_calib == \"none\":\n return\n if self.args.hp_calib_thr == \"f1\":\n if self.hp_calibrator.is_initialized():\n return\n thresholds = self.get_baseline_f1_calibration_thresholds()\n elif self.args.hp_calib_thr == \"mean\":\n thresholds = 0.5 if thresholds is None else thresholds\n else:\n raise ValueError(\"Unsupported HP-Loss calibration threshold: '{}'\".format(self.args.hp_calib_thr))\n self.hp_calibrator.update_thresholds(thresholds)\n\n def init_epochs(self):\n # Initialize the epoch thresholds.\n if self.args.max_epoch < 0 and (self.args.main_net_train_epochs < 0 or self.args.hp_net_train_epochs < 0):\n raise ValueError(\"Neither max-epochs or not-train-epochs is defined. \")\n if self.args.main_net_train_epochs < 0:\n self.args.main_net_train_epochs = (self.args.max_epoch - self.args.hp_epoch_offset\n - self.args.main_net_finetuning_epochs)\n if self.args.hp_net_train_epochs < 0:\n self.args.hp_net_train_epochs = (self.args.max_epoch - self.args.hp_epoch_offset\n - self.args.main_net_finetuning_epochs)\n if self.args.max_epoch < 0:\n self.args.max_epoch = (max(self.args.main_net_train_epochs, self.args.hp_net_train_epochs\n + self.args.hp_epoch_offset) + self.args.main_net_finetuning_epochs)\n print(\"Training schedule: \")\n print(tab.tabulate([\n [\"Main-Net train epochs\", self.args.main_net_train_epochs],\n [\"HP-Net epoch offset\", self.args.hp_epoch_offset],\n [\"HP-Net train epochs\", self.args.hp_net_train_epochs],\n [\"Main-Net finetuning epochs\", self.args.main_net_finetuning_epochs],\n [\"Total epochs\", self.args.max_epoch]\n ]))\n\n def train(self, fixbase=False):\n \"\"\"\n Train the model for an epoch.\n :param fixbase: Is this a fixbase epoch?\n :return: Time of execution end.\n \"\"\"\n losses_main = AverageMeter()\n losses_hp = AverageMeter()\n train_main = not self.args.train_hp_only and self.epoch < self.args.main_net_train_epochs\n train_main_finetuning = (not self.args.train_hp_only and self.epoch >= self.args.max_epoch\n - self.args.main_net_finetuning_epochs)\n rejection_epoch = (not self.args.train_hp_only and self.epoch == self.args.max_epoch\n - self.args.main_net_finetuning_epochs)\n train_hp = (self.args.hp_epoch_offset <= self.epoch < self.args.hp_net_train_epochs\n + self.args.hp_epoch_offset)\n num_batch = len(self.trainloader)\n\n if rejection_epoch:\n self.update_rejector_thresholds()\n if self.args.hp_epoch_offset == self.epoch:\n self.update_hp_calibrator_thresholds()\n\n if train_main or train_main_finetuning:\n self.model_main.train()\n losses = losses_main\n else:\n self.model_main.eval()\n losses = losses_hp\n\n if train_hp:\n self.model_hp.train()\n else:\n self.model_hp.eval()\n\n # For saving results to compute mean calibration thresholds.\n positive_logits_sum = torch.zeros(self.dm.num_attributes)\n negative_logits_sum = torch.zeros(self.dm.num_attributes)\n positive_num = torch.zeros(self.dm.num_attributes)\n negative_num = torch.zeros(self.dm.num_attributes)\n if self.use_gpu:\n positive_logits_sum = positive_logits_sum.cuda()\n negative_logits_sum = negative_logits_sum.cuda()\n positive_num = positive_num.cuda()\n negative_num = negative_num.cuda()\n\n for batch_idx, (imgs, labels, _) in enumerate(self.trainloader):\n\n\n if self.use_gpu:\n imgs, labels = imgs.cuda(), labels.cuda()\n if self.use_bbs:\n visibility_labels = labels[:, self.dm.num_attributes:]\n labels = labels[:, :self.dm.num_attributes]\n assert labels.shape == visibility_labels.shape\n else:\n visibility_labels = None\n # Run the batch through both nets.\n label_prediciton_probs = self.model_main(imgs)\n label_predicitons_logits = self.criterion_main.logits(label_prediciton_probs.detach())\n\n labels_bool = labels > 0.5 # TODO: make nicer\n positive_logits_sum += label_predicitons_logits[labels_bool].sum(0)\n negative_logits_sum += label_predicitons_logits[~labels_bool].sum(0)\n positive_num += labels_bool.sum(0, dtype=torch.float)\n negative_num += (~labels_bool).sum(0, dtype=torch.float)\n\n if not self.args.use_confidence:\n hardness_predictions = self.model_hp(imgs)\n if train_main or train_main_finetuning:\n if not self.args.use_confidence:\n hardness_predictions_logits = self.criterion_hp.logits(hardness_predictions.detach())\n hardness_predictions_logits = self.criterion_hp.broadcast(hardness_predictions_logits)\n elif train_main_finetuning:\n if self.args.f1_calib:\n decision_thresholds = self.f1_calibration_thresholds\n else:\n decision_thresholds = None\n hardness_predictions_logits = 1 - metrics.get_confidence(label_predicitons_logits,\n decision_thresholds).detach()\n if self.args.no_hp_feedback or not train_hp:\n main_net_weights = label_prediciton_probs.new_ones(label_prediciton_probs.shape)\n else:\n # Make a detached version of the hp scores for computing the main loss.\n main_net_weights = hardness_predictions_logits\n if self.args.use_bbs_feedback:\n main_net_weights *= visibility_labels\n if train_main_finetuning:\n select = self.rejector(hardness_predictions_logits)\n main_net_weights = main_net_weights * select\n # Compute main loss, gradient and optimize main net.\n loss_main = self.criterion_main(label_prediciton_probs, labels, main_net_weights)\n self.optimizer_main.zero_grad()\n loss_main.backward()\n nn.utils.clip_grad_norm_(self.model_main.parameters(), max_norm=10.0)\n self.optimizer_main.step()\n\n losses_main.update(loss_main.item(), labels.size(0))\n\n if train_hp and not self.args.use_confidence:\n # Compute HP loss, gradient and optimize HP net.\n # The label predictions are calibrated.\n loss_hp = self.criterion_hp(hardness_predictions, self.hp_calibrator(label_predicitons_logits), labels, visibility_labels)\n self.optimizer_hp.zero_grad()\n loss_hp.backward()\n nn.utils.clip_grad_norm_(self.model_hp.parameters(), max_norm=10.0)\n self.optimizer_hp.step()\n\n losses_hp.update(loss_hp.item(), labels.size(0))\n # Print progress.\n if (batch_idx + 1) % args.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t' \n 'Main loss {loss.avg:.4f}\\t'\n 'HP-Net loss {hp_loss.avg:.4f}'.format(\n self.epoch + 1, batch_idx + 1, num_batch,\n loss=losses_main,\n hp_loss=losses_hp\n ))\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Main loss {loss.avg:.4f}\\t'\n 'HP-Net loss {hp_loss.avg:.4f}'.format(\n self.epoch + 1, batch_idx + 1, num_batch,\n loss=losses_main,\n hp_loss=losses_hp\n ))\n\n # Update HP calibrator thresholds (mean thresholds are only used if the option is selected in args)\n positive_logits_sum /= positive_num\n negative_logits_sum /= negative_num\n self.update_hp_calibrator_thresholds((positive_logits_sum + negative_logits_sum) / 2)\n\n return losses_main.avg, losses_hp.avg\n\n def test(self, predictions=None, ground_truth=None):\n split = self.args.eval_split\n if not self.rejector.is_initialized() or self.args.no_cache:\n self.update_rejector_thresholds()\n\n # Get Hardness scores.\n\n if self.args.use_confidence:\n labels, prediction_probs, predictions = self.get_label_predictions(split)\n if self.args.f1_calib:\n decision_thresholds = self.f1_calibration_thresholds\n else:\n decision_thresholds = None\n hp_scores = 1 - metrics.get_confidence(prediction_probs, decision_thresholds)\n self.result_manager.update_outputs(split, hp_scores=hp_scores)\n print(\"Using confidence scores as HP-scores. \")\n elif self.args.evaluate and self.result_manager.check_output_dict(split) and not self.args.no_cache:\n _, _, _, hp_scores = self.result_manager.get_outputs(split)\n else:\n print(\"Computing hardness scores for testing data. \")\n hp_scores, _, _ = self.get_full_output(model=self.model_hp, criterion=self.criterion_hp)\n hp_scores = self.criterion_hp.broadcast(hp_scores)\n self.result_manager.update_outputs(split, hp_scores=hp_scores)\n\n ignore = np.logical_not(self.rejector(hp_scores))\n print(\"Rejecting the {:.2%} hardest of testing examples. \".format(ignore.mean()))\n # Run the standard accuracy testing.\n super().test(ignore)\n labels, prediction_probs, predictions, _ = self.result_manager.get_outputs(split)\n\n\n print(\"HP-Net Hardness Scores: \")\n print(tab.tabulate([\n [\"Mean\", np.mean(hp_scores)],\n [\"Variance\", np.var(hp_scores)]\n ]))\n\n # Display the hardness scores for every attribute.\n print(\"-\" * 30)\n header = [\"Attribute\", \"Positivity Ratio\", \"Accuracy\", \"Hardness Score Mean\", \"Average Precision\", \"cAP\", \"Rejection Threshold\",\n \"Rejection Quantile\"]\n mean = hp_scores.mean(0)\n var = np.sqrt(hp_scores.var(0))\n average_precision = metrics.hp_average_precision(labels, predictions, hp_scores)\n baseline_average_precision = self.get_baseline_average_precision()\n if baseline_average_precision is None:\n baseline_average_precision = 0\n comparative_average_precision = (average_precision > baseline_average_precision).astype(\"int8\")\n # mean_average_precision = metrics.hp_mean_average_precision(labels, label_predictions, hp_scores)\n\n rejection_quantiles = ignore.mean(0).flatten()\n rejection_thresholds = self.rejector.attribute_thresholds\n if rejection_thresholds is None:\n rejection_thresholds = np.ones_like(rejection_quantiles)\n else:\n rejection_thresholds = rejection_thresholds.flatten()\n data = list(zip(self.dm.attributes, self.positivity_ratio, self.acc_atts, mean, average_precision,\n comparative_average_precision, rejection_thresholds, rejection_quantiles))\n data += [[\"Total\", self.positivity_ratio.mean(), self.acc_atts.mean(), mean.mean(),\n average_precision.mean(), comparative_average_precision.mean(), rejection_thresholds.mean(),\n rejection_quantiles.mean()]]\n table = tab.tabulate(data, floatfmt='.4f', headers=header)\n print(table)\n print(\"Mean average precision of hardness prediction over attributes: {:.2%}\".format(average_precision.mean()))\n print(\"Comparative mean average precision: {:.2%}\".format(comparative_average_precision.mean()))\n csv_path = osp.join(self.args.save_experiment, self.ts + \"rp_result_table.csv\")\n np.savetxt(csv_path, np.transpose(data), fmt=\"%s\", delimiter=\"\\t\")\n print(\"Saved Table at \" + csv_path)\n\n self.result_dict.update({\n \"rejection_thresholds\": self.rejector.attribute_thresholds,\n \"calibration_thresholds\": self.hp_calibrator.thresholds_np,\n \"ignored_test_samples\": ignore,\n \"average_precision\": average_precision\n })\n self.save_result_dict()\n\n hard_att_labels = None\n hard_att_pred = None\n if self.args.num_save_hard + self.args.num_save_easy > 0:\n # This part only gets executed if the corresponding arguments are passed at the terminal.\n if self.args.hard_att in self.dm.attributes:\n # If a valid attribute is given the labels for that attribute are selected.\n print(\"Looking at Hard attribute \" + self.args.hard_att)\n att_idx = self.dm.attributes.index(self.args.hard_att)\n hard_att_labels = labels[:, att_idx]\n hard_att_pred = prediction_probs[:, att_idx]\n if not self.loaded_args.hp_net_simple:\n # If a valid attribute is given, the hardness scores for that attribute are selected, else the mean\n # over all attributes is taken.\n if self.args.hard_att in self.dm.attributes:\n hp_scores = hp_scores[:, att_idx]\n else:\n hp_scores = hp_scores.mean(1)\n hp_scores = hp_scores.flatten()\n sorted_idxs = hp_scores.argsort()\n # Select easy and hard examples as specified in the terminal.\n hard_idxs = np.concatenate((sorted_idxs[:self.args.num_save_easy],\n sorted_idxs[-self.args.num_save_hard:]))\n filename = osp.join(self.args.save_experiment, self.ts + \"hard_images.png\")\n title = \"Examples by hardness for \" + (self.args.load_weights if self.args.load_weights else self.ts)\n if hard_att_labels is not None:\n hard_att_labels = hard_att_labels[hard_idxs]\n if hard_att_pred is not None:\n hard_att_pred = hard_att_pred[hard_idxs]\n # Display the image examples.\n show_img_grid(self.dm.split_dict[self.args.eval_split], hard_idxs, filename, title, self.args.hard_att,\n hard_att_labels, hp_scores[hard_idxs], hard_att_pred)\n\n return comparative_average_precision.mean()\n\n def get_baseline_average_precision(self):\n return self.get_baseline_data(self.args.ap_baseline, \"average_precision\", \"baseline average precision\")\n\n def get_baseline_f1_calibration_thresholds(self):\n return self.get_baseline_data(self.args.f1_baseline, \"f1_thresholds\", \"baseline F1 calibration thresholds\")\n\n def get_baseline_data(self, filename, key, name):\n load_file = osp.join(self.args.save_experiment, filename)\n if filename and check_isfile(load_file):\n checkpoint = torch.load(load_file)\n\n if \"result_dict\" in checkpoint and checkpoint[\"result_dict\"] is not None:\n result_dict = checkpoint[\"result_dict\"]\n if key in result_dict and result_dict[key] is not None:\n print(\"Loaded {} from file: {}\".format(name, filename))\n return result_dict[key]\n\n print(\"WARNING: Could not load {}. \".format(name))\n return None\n\n def clear_output_cache(self):\n super().clear_output_cache()\n self.rejector.reset()\n\n\nif __name__ == '__main__':\n # global variables\n parser = argument_parser()\n args = parser.parse_args()\n trainer = RealisticPredictorTrainer(args)\n","repo_name":"Lucas-Florin/hardness-predictor-for-par","sub_path":"realistic_predictor_trainer.py","file_name":"realistic_predictor_trainer.py","file_ext":"py","file_size_in_byte":25456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"36800749817","text":"# -*- coding: utf-8 -*-\n\nimport importlib\nimport json\nimport os\nimport re\nimport vim\n\nfrom .compat import integer_types, to_bytes, to_unicode\n\ncurrent = None\n\n\ndef get_encoding():\n return to_unicode(vim.current.buffer.options['fileencoding'] or\n vim.options['encoding'] or 'utf-8', 'utf-8')\n\n\ndef _unicode(text):\n encoding = get_encoding()\n try:\n return to_unicode(text, encoding)\n except Exception:\n return text\n\n\ndef _read_args(path):\n try:\n with open(path) as f:\n return [l.strip() for l in f.readlines()]\n except Exception:\n return []\n\n\nclass Meta(type):\n def __init__(cls, name, bases, attrs):\n if name not in ('Completor', 'Base'):\n Completor._registry[to_unicode(cls.filetype, 'utf-8')] = cls()\n\n return super(Meta, cls).__init__(name, bases, attrs)\n\nBase = Meta('Base', (object,), {})\n\n\nclass Unusable(object):\n def __get__(self, inst, owner):\n raise RuntimeError('unusable')\n\n\nclass Completor(Base):\n _registry = {}\n\n filetype = Unusable()\n\n daemon = False\n sync = False\n trigger = None\n ident = re.compile(r'[^\\W\\d]\\w*', re.U)\n\n _type_map = {\n b'c': b'cpp'\n }\n\n _arg_cache = {}\n\n def __init__(self):\n self.input_data = ''\n self.ft = ''\n\n @property\n def current_directory(self):\n \"\"\"Return the directory of the file in current buffer\n\n :rtype: unicode\n \"\"\"\n return to_unicode(vim.Function('expand')('%:p:h'), 'utf-8')\n\n @property\n def tempname(self):\n \"\"\"Write buffer content to a temp file and return the file name\n\n :rtype: unicode\n \"\"\"\n return to_unicode(vim.Function('completor#utils#tempname')(), 'utf-8')\n\n @property\n def filename(self):\n \"\"\"Get the file name of current buffer\n\n :rtype: unicode\n \"\"\"\n return vim.current.buffer.name\n\n @property\n def cursor(self):\n line, _ = vim.current.window.cursor\n return line, len(self.input_data)\n\n # use cached property\n @property\n def filetype_map(self):\n m = self.get_option('completor_filetype_map') or {}\n self._type_map.update(m)\n return self._type_map\n\n @staticmethod\n def get_option(key):\n return vim.vars.get(key)\n\n @property\n def disabled(self):\n types = self.get_option('completor_disable_{}'.format(self.filetype))\n if isinstance(types, integer_types):\n return bool(types)\n if isinstance(types, (list, vim.List)):\n return to_bytes(self.ft) in types\n return False\n\n # input_data: unicode\n def match(self, input_data):\n if self.trigger is None:\n return True\n if isinstance(self.trigger, str):\n self.trigger = re.compile(self.trigger, re.X | re.U)\n\n return bool(self.trigger.search(input_data))\n\n def format_cmd(self):\n return ''\n\n # base: unicode or list\n def parse(self, base):\n return []\n\n # base: str or unicode or list\n def get_completions(self, base):\n if not isinstance(base, (list, vim.List)):\n base = _unicode(base)\n return self.parse(base)\n\n @staticmethod\n def find_config_file(file):\n cwd = os.getcwd()\n while True:\n path = os.path.join(cwd, file)\n if os.path.exists(path):\n return path\n if os.path.dirname(cwd) == cwd:\n break\n cwd = os.path.split(cwd)[0]\n\n def parse_config(self, file):\n key = \"{}-{}\".format(self.filetype, file)\n if key not in self._arg_cache:\n path = self.find_config_file(file)\n self._arg_cache[key] = [] if path is None else _read_args(path)\n return self._arg_cache[key]\n\n def ident_match(self, pat):\n if not self.input_data:\n return -1\n\n _, index = self.cursor\n for i in range(index):\n text = self.input_data[i:index]\n matched = pat.match(text)\n if matched and matched.end() == len(text):\n return len(to_bytes(self.input_data[:i], get_encoding()))\n return index\n\n def start_column(self):\n if not self.ident:\n return -1\n if isinstance(self.ident, str):\n self.ident = re.compile(self.ident, re.U | re.X)\n return self.ident_match(self.ident)\n\n def request(self):\n \"\"\"Generate daemon request arguments\n \"\"\"\n line, col = self.cursor\n return json.dumps({\n 'line': line - 1,\n 'col': col,\n 'filename': self.filename,\n 'content': '\\n'.join(vim.current.buffer[:])\n })\n\n def message_ended(self, msg):\n \"\"\"Test the end of daemon response\n\n :param msg: the message received from daemon (bytes)\n \"\"\"\n return True\n\n_completor = Completor()\n\n\n# ft: unicode\ndef _load(ft):\n if ft not in _completor._registry:\n try:\n importlib.import_module(\"completers.{}\".format(ft))\n except ImportError:\n return\n return _completor._registry.get(ft)\n\n\n# ft: bytes, input_data: bytes\ndef load_completer(ft, input_data):\n input_data = _unicode(input_data)\n\n if not ft or not input_data.strip():\n return\n ft = to_unicode(_completor.filetype_map.get(ft, ft), 'utf-8')\n\n if 'common' not in _completor._registry:\n import completers.common # noqa\n\n filename = get('filename')\n if filename.match(input_data) and not filename.disabled:\n c = filename\n else:\n c = _load(ft)\n if c is None:\n omni = get('omni')\n if omni.has_omnifunc(ft):\n c = omni\n if c is None or not c.match(input_data):\n c = get('common')\n c.input_data = input_data\n c.ft = ft\n return None if c.disabled else c\n\n\n# filetype: str, ft: bytes, input_data: bytes\ndef get(filetype, ft=None, input_data=None):\n completer = _completor._registry.get(filetype)\n if completer:\n if ft is not None:\n completer.ft = _unicode(ft)\n if input_data is not None:\n completer.input_data = _unicode(input_data)\n return completer\n","repo_name":"dNitro/dotfiles","sub_path":".vim/plugged-local/completor.vim/pythonx/completor/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6216,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"}
+{"seq_id":"32919195577","text":"c = 1\r\nn1 = int(input(\"Qual é o primeiro número? \"))\r\nn2 = int(input(\"Qual é o segundo número? \"))\r\nwhile c !=5:\r\n print('''Escolha uma das opções :\r\n [1] Adição\r\n [2] Multiplicação\r\n [3] Maior\r\n [4] Novos números\r\n [5] Desligar''')\r\n c = int(input(\"Qual é o número?\"))\r\n if c == 1:\r\n n = n1 + n2\r\n print(\"A soma vai ser {}\".format(n))\r\n elif c ==2:\r\n n = n1 * n2\r\n print(\"A multiplicação vai ser {}\".format(n))\r\n elif c == 3:\r\n if n1 > n2:\r\n maior = n1\r\n else:\r\n maior = n2\r\n print(\"O maior número será {}\".format(maior))\r\n elif c == 4:\r\n n1 = int(input('Escolha novamente! um número:'))\r\n n2 = int(input(\"Escolha novamente! outro número:\"))\r\n elif c == 5 :\r\n print(\"Acabo\")\r\n else:\r\n print(\"erro\")\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"BrunoSilvaBR/PythonExercicio","sub_path":"ex059.py","file_name":"ex059.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"43336650801","text":"from django.db import models\nfrom accounts.models.profile import Profile,Follower,Following\nfrom base.models.basemodel import BaseModel\nfrom django.dispatch import receiver\nfrom django.db.models.signals import post_save\n\nclass Post(BaseModel):\n profile = models.ForeignKey(Profile,on_delete=models.CASCADE,related_name='profile')\n caption = models.CharField(max_length=1000,blank=True, null=True)\n likes_counts = models.IntegerField(default = 0)\n\n def __str__(self) -> str:\n return self.caption\n\n\nclass PostImages(BaseModel):\n post_ref = models.ForeignKey(Post,on_delete=models.CASCADE,related_name='postimages')\n image = models.ImageField(upload_to='Uploads/Post')\n\n\nclass Likes(BaseModel):\n user = models.ManyToManyField(Profile,related_name='likes_by_users',blank=True) # who is liking\n image = models.ForeignKey(Post,on_delete=models.CASCADE,related_name='postlikes')\n \n\n @receiver(post_save,sender = Post)\n def CreateLikesObj(sender,instance,created,*args, **kwargs):\n if created:\n try:\n likes = Likes.objects.create(image = instance)\n except Exception as e:\n print(e)\n\n\n def __str__(self) -> str:\n return self.image.caption\n\n\n\nclass CommentPost(BaseModel):\n post = models.ForeignKey(Post,on_delete=models.CASCADE,related_name='comentpost')\n user = models.ForeignKey(Profile,on_delete=models.CASCADE,related_name='profilecommenting')\n comment = models.CharField(max_length=1000,blank=True, null=True)\n # likes_counts = models.IntegerField()\n\n # @receiver(post_save,sender = Post)\n # def CreateLikesObj(sender,instance,created,*args, **kwargs):\n # if created:\n # try:\n # likes = Likes.objects.create(image = instance ,like_counts=0)\n # except Exception as e:\n # print(e)\n \n\n","repo_name":"Mandalor-09/Clone","sub_path":"InstaClone/main/models/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"6659388680","text":"import argparse\nimport time\nimport torch\nfrom Models import get_model\nfrom Process import *\nimport torch.nn.functional as F\nfrom Optim import CosineWithRestarts\nfrom Batch import create_masks\nimport pdb\nimport dill as pickle\nimport argparse\nfrom Models import get_model\nfrom Beam import beam_search\nfrom nltk.corpus import wordnet\nfrom torch.autograd import Variable\nimport re\n\ndef get_synonym(word, SRC):\n syns = wordnet.synsets(word)\n for s in syns:\n for l in s.lemmas():\n if SRC.vocab.stoi[l.name()] != 0:\n return SRC.vocab.stoi[l.name()]\n \n return 0\n\ndef multiple_replace(dict, text):\n # Create a regular expression from the dictionary keys\n regex = re.compile(\"(%s)\" % \"|\".join(map(re.escape, dict.keys())))\n\n # For each match, look-up corresponding value in dictionary\n return regex.sub(lambda mo: dict[mo.string[mo.start():mo.end()]], text) \n\ndef translate_sentence(sentence, model, opt, SRC, TRG):\n \n model.eval()\n indexed = []\n sentence = SRC.preprocess(sentence)\n for tok in sentence:\n if SRC.vocab.stoi[tok] != 0 or opt.floyd is True:\n indexed.append(SRC.vocab.stoi[tok])\n else:\n indexed.append(get_synonym(tok, SRC))\n sentence = Variable(torch.LongTensor([indexed]), device=opt.device)\n\n sentence = beam_search(sentence, model, SRC, TRG, opt)\n\n return multiple_replace({' ?': '?', ' !': '!', ' .': '.', '\\' ': '\\'', ' ,': ','}, sentence)\n\ndef translate(opt, model, SRC, TRG):\n sentences = opt.text.lower().split('.')\n translated = []\n\n for sentence in sentences:\n translated.append(translate_sentence(sentence + '.', model, opt, SRC, TRG).capitalize())\n\n return (' '.join(translated))\n\n\ndef main():\n \n parser = argparse.ArgumentParser()\n parser.add_argument('-load_weights', required=True)\n parser.add_argument('-k', type=int, default=3)\n parser.add_argument('-max_len', type=int, default=80)\n parser.add_argument('-d_model', type=int, default=512)\n parser.add_argument('-n_layers', type=int, default=6)\n parser.add_argument('-src_lang', required=True)\n parser.add_argument('-trg_lang', required=True)\n parser.add_argument('-heads', type=int, default=8)\n parser.add_argument('-dropout', type=int, default=0.1)\n parser.add_argument('-no_cuda', action='store_true')\n parser.add_argument('-floyd', action='store_true')\n \n opt = parser.parse_args()\n\n opt.device = 'cuda' if opt.no_cuda is False else 'cpu'\n \n assert opt.k > 0\n assert opt.max_len > 10\n\n SRC, TRG = create_fields(opt)\n model = get_model(opt, len(SRC.vocab), len(TRG.vocab))\n \n while True:\n opt.text =input(\"Enter a sentence to translate (type 'f' to load from file, or 'q' to quit):\\n\")\n if opt.text==\"q\":\n break\n if opt.text=='f':\n fpath =input(\"Enter a sentence to translate (type 'f' to load from file, or 'q' to quit):\\n\")\n try:\n opt.text = ' '.join(open(opt.text, encoding='utf-8').read().split('\\n'))\n except:\n print(\"error opening or reading text file\")\n continue\n phrase = translate(opt, model, SRC, TRG)\n print('> '+ phrase + '\\n')\n\nif __name__ == '__main__':\n main()\n","repo_name":"SamLynnEvans/Transformer","sub_path":"translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":3280,"program_lang":"python","lang":"en","doc_type":"code","stars":1220,"dataset":"github-code","pt":"2"}
+{"seq_id":"26632482655","text":"from django.conf.urls import url\nfrom . import views\nurlpatterns = [\n\turl(r'^main$', views.index),\n\turl(r'^register$', views.register),\n\turl(r'^login$', views.login),\n\turl(r'^friends$', views.friends),\n\turl(r'^add_friend/(?P\\d+)$', views.add_friend),\n\turl(r'^user/(?P\\d+)$', views.user_profile),\n\turl(r'^remove_friend/(?P\\d+)$', views.remove_friend),\n\turl(r'^logout$', views.logout)\n]","repo_name":"mariatrojo/pythonbelt_2","sub_path":"apps/friends/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"2824314770","text":"import librosa, librosa.display\nimport math\nimport matplotlib.pyplot as plt\n\nSAMPLE_RATE = 22050\n\ndef load_sound(filename):\n # load the sound at the default sample rate 22050 HZ\n sound, sr = librosa.load(\n filename\n )\n assert sr == SAMPLE_RATE\n return sound, sr\n\n# Calculate the MFCCs over the segments, a.k.a. frames. Prepare the\n# parameters for calculating the MFCCs over the segments. In the video,\n# 10 frames per 30 sec was used, I have 5 sec, but let me use 5 frames.\nNUM_FRAMES = 10\n# 5 / 5 = 1 seconds per frame;\n# 22050 samples per frame.\nframe_length_in_samples = int(SAMPLE_RATE / NUM_FRAMES)\nprint(frame_length_in_samples)\n\ndef extract_mfccs_from_track(sound, sr):\n # calculate the MFCCs over the frames\n for i in range(NUM_FRAMES):\n start_sample = i * frame_length_in_samples\n end_sample = start_sample + frame_length_in_samples\n\n print(\"{}:{}\".format(start_sample, end_sample))\n\n frame = sound[start_sample:end_sample]\n\n mfcc = librosa.feature.mfcc(\n\n frame,\n sr,\n\n # may be increased to get more granular information, but 13 is\n # the minimum value\n n_mfcc = 13,\n\n # these are somewhat magic constants; IDK what they mean. It seems\n # redundant to me.\n n_fft = 2048,\n hop_length = 512,\n )\n\n num_mfcc_vectors_per_segment = math.ceil(\n frame_length_in_samples / 512\n )\n\n print(\"{}vs{}\".format(len(mfcc), num_mfcc_vectors_per_segment))\n # should always be the same, but in the video we\n # check if the length is not equal to expected length.\n\n # librosa.display.specshow(mfcc)\n # plt.show()\n\ndef prepare_data(root):\n data = {\n # a range of mfccs that represent the sound\n \"mfcc\": [],\n # labels\n \"label\": [],\n # filenames\n \"name\": []\n }\n","repo_name":"gevorgyana/tf_playground","sub_path":"other/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"12311974272","text":"import csv\nimport sys\n\nif len(sys.argv) == 2:\n file_name = sys.argv[1]\nelse:\n file_name = \"spx-monthly.csv\"\n\nwith open(file_name,\"rb\") as f:\n reader = csv.reader(f)\n mylist = list(reader)\n\nmylist[0] = ['Apr. 30, 2018', '0.27']\n\nclean_list=[]\nfor item in mylist:\n x= item[0].split(',')\n x.append(item[1])\n x[0]=x[0][0:3]\n x[1] = int(x[1].strip())\n x[2] = float(x[2])\n clean_list.append(x)\nclean_list.reverse()\n\nmonth_list = [\"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"]\nrange_list = [[] for y in range(12)] \n\nfor index, month in enumerate(month_list):\n start = False\n growth = 0.0\n count = 0\n for item in clean_list:\n if item[0] == month and start is False:\n start = True\n if start is True:\n if count % 12 == 0:\n growth = 0\n str1 = item[0] + \" \" + str(item[1])\n growth = growth + item[2]\n if count % 12 == 11:\n str1 = str1 + \"-\" + item[0] + \" \" + str(item[1])\n range_list[index].append([str1, round(growth,2)])\n count = count + 1\n\nmonth_dict = {}\n\nfor index, item in enumerate(range_list):\n #print (month_list[index], len(item))\n cap_val =11\n spread_val = 6\n cap_total = 0\n spread_total = 0\n spx_total = 0\n for sub_item in item:\n spx_total = spx_total + sub_item[1]\n if sub_item[1] <= 1:\n cap_total = cap_total + 1\n elif sub_item[1] > 1 and sub_item[1] <= cap_val:\n cap_total = cap_total + sub_item[1]\n elif sub_item[1] > cap_val:\n cap_total = cap_total + cap_val\n\n if sub_item[1] - spread_val <= 1:\n spread_total = spread_total + 1\n else:\n spread_total = spread_total + (sub_item[1] - spread_val)\n month_dict[month_list[index]]= [round(spx_total/len(item),2), round(cap_total/len(item),2),round(spread_total/len(item),2)]\n\nprint(\"Sorting by SPX\")\nfor key, value in sorted(month_dict.items(), key=lambda x: x[1][0], reverse=True):\n print (key, value)\nprint(\"Sorting by Penn 11 cap\")\nfor key, value in sorted(month_dict.items(), key=lambda x: x[1][1], reverse=True):\n print (key, value)\nprint(\"Sorting by Pen spread\")\nfor key, value in sorted(month_dict.items(), key=lambda x: x[1][2], reverse=True):\n print (key, value)\n\n","repo_name":"sbathina9/python","sub_path":"Data/Life_Insurance/spxmonthly.py","file_name":"spxmonthly.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"74352270126","text":"#Creating the LinearSVC model\r\nmodel = LinearSVC()\r\n\r\n#Creating the train and test sets\r\nX_train, X_test, y_train, y_test, indices_train, indices_test = train_test_split(html_feat,\r\n labels,\r\n html_df.index,\r\n test_size = 0.33,\r\n random_state = 42)\r\n\r\n#Fitting the model\r\nmodel.fit(X_train, y_train)\r\n\r\ny_pred = model.predict(X_test)\r\n\r\n#Creating the LinearSVC confusion matrix\r\nconf_mat = confusion_matrix(y_test, y_pred)\r\nfig, ax = plt.subplots(figsize = (10, 10))\r\nsns.heatmap(conf_mat, annot = True, fmt = 'd', xticklabels = catid_df.Categories.values,\r\n yticklabels = catid_df.Categories.values)\r\nplt.ylabel('Actual')\r\nplt.xlabel('Predicted')\r\nplt.show()\r\n\r\n#Determine what caused misclassifications\r\nfrom IPython.display import display\r\n\r\nfor predicted in catid_df.CatID:\r\n for actual in catid_df.CatID:\r\n if predicted != actual and conf_mat[actual, predicted] >= 10:\r\n print(\"'{}' predicted as '{}' : {} examples.\".format(id_to_cat[actual], id_to_cat[predicted],\r\n conf_mat[actual, predicted]))\r\n display(html_df.loc[indices_test[(y_test == actual) & (y_pred == predicted)\r\n ]][['Categories', 'HTML']])\r\n print('')\r\n\r\n#Creating the LineaarSVC unigrams and bigrams\r\nmodel.fit(html_feat, labels)\r\n\r\nN = 2\r\nfor Categories, CatID in sorted(cat_to_id.items()):\r\n indices = np.argsort(model.coef_[CatID])\r\n feature_names = np.array(tfidf.get_feature_names())[indices]\r\n unigrams = [v for v in reversed(feature_names) if len(v.split(' ')) == 1][:N]\r\n bigrams = [v for v in reversed(feature_names) if len(v.split(' ')) == 2][:N]\r\n print(\"# '{}':\".format(Categories))\r\n print(\" . Top unigrams:\\n . {}\".format('\\n . '.join(unigrams)))\r\n print(\" . Top bigrams:\\n . {}\".format('\\n . '.join(bigrams)))\r\n\r\n#Accuracy report\r\nprint(metrics.classification_report(y_test, y_pred,\r\n target_names = html_df['Categories'].unique()))\r\n","repo_name":"chbrown626/College-Classifications","sub_path":"html_linear_svc.py","file_name":"html_linear_svc.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"11272501777","text":"#!/usr/bin/python\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom shapely.geometry import Polygon\nimport geopandas as gpd\nimport os.path\nfrom PIL import Image\nimport rasterio\n\n## global variables\nCLIP = False\nINTERSEC = True\n\n#set working directory\nos.chdir(\"/Users/aminaly/Box Sync/mountain_biodiversity\")\n\n## Read in all the files\nkba = gpd.read_file(os.getcwd() + \"/data/KBA/KBA2020/KBAsGlobal_2020_September_02_POL.shp\")\nwdpa = gpd.read_file(os.getcwd() + \"/data/WDPA/WDPA_Nov2020_Public_shp/WDPA_poly_Nov2020_filtered.gdb/\")\ngmba = gpd.read_file(os.getcwd() + \"/data/GMBA/Gmba_Mountain_Inventory_v2_broad_20210630/Gmba_Mountain_Inventory_v2_broad_20210630.shp\")\n\n#list of ISOs to use to clip kba & wdpa\nwrld_cntries = ['KEN', 'MNG', 'JPN', 'NPL', 'UGA']\n\n#clip kba and wdpa using the list of isos \nkba_c = kba[kba['ISO3'].isin(wrld_cntries)]\nwdpa = wdpa[wdpa['ISO3'].isin(wrld_cntries)]\n\n#gmba will be clipped a little differently. Doesn't have ISOs so we'll use a world shapefile\nworld = gpd.read_file(os.getcwd() + \"/data/World/world_shp/world.shp\")\nworld = world[world['CNTRY_NAME'].isin(kba_c['Country'].unique())] \ngmba_c = gpd.overlay(gmba, world, how=\"intersection\")\n#then we find a list of all the ranges included in the clip, and select those specifically from the main gmba\ngmba_c = gmba[gmba.GMBA_V2_ID.isin(gmba_c.GMBA_V2_ID)]\n\n#Once we've clipped them, save them out as shapefiles\nkba_c.to_file(os.getcwd() + \"/data/KBA/KBA2020/clipped_KBAsGlobal_2020_September_02_POL.shp\", \n driver='ESRI Shapefile')\n\nwdpa.to_file(os.getcwd() + \"/data/WDPA/WDPA_Nov2020_Public_shp/clipped_WDPA_Nov2020_Public_flattened.shp\",\n driver='ESRI Shapefile')\n\ngmba_c.to_file(os.getcwd() + \"/data/GMBA/Gmba_Mountain_Inventory_v2_broad_20210630/clipped_Gmba_Mountain_Inventory_v2_broad_20210630.shp\", \n driver='ESRI Shapefile')\n \n \n \n \n \n","repo_name":"aminaly/mountain_biodiversity","sub_path":"analysis/create_clipped_&_intersections.py","file_name":"create_clipped_&_intersections.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"6537991666","text":"from flask import Flask, jsonify, render_template, request\nimport func\nimport firebase_admin\nimport requests, json\n\n\ndefault_app = firebase_admin.initialize_app()\n\napp = Flask(__name__)\n\n\n\n@app.route('/')\ndef home():\n movies = ()\n\n carouselData = func.getCarouselItems()\n popularMovies = func.getPopularMovies()\n #watchlist = func.getWatchlistMovies()\n return render_template('index.html', carouselData=carouselData, popularMovies=popularMovies)\n\n@app.route(\"/createUser\", methods=[\"GET\", \"PUT\"])\ndef createUsers():\n url = \"https://w5xj3edx56.execute-api.ap-south-1.amazonaws.com/createUser\"\n if request.method == 'PUT':\n requestBody = {}\n requestBody[\"Email\"] = request.json[\"emailValue\"]\n res = func.getUser(request.json[\"emailValue\"])\n print(res)\n if(res == False):\n requestBody[\"First_Name\"] = request.json[\"fnameValue\"]\n requestBody[\"Last_Name\"] = request.json[\"lnameValue\"]\n requestBody[\"Password\"] = request.json[\"pwdValue\"]\n requestBody[\"User_Name\"] = request.json[\"unameValue\"]\n response = requests.put(\n url, data=json.dumps(requestBody),\n headers={'Content-Type': 'application/json'}\n )\n return {\"status\": \"Account Created\"}\n \n return {\"status\": \"Already Exists\"}\n\n@app.route(\"/getUser\", methods=[\"GET\", \"POST\"])\ndef getUsers():\n if request.method == \"POST\":\n email = request.json['emailValue']\n users = func.getUser(email)\n if(users):\n return (users)\n else:\n return 0\n \n@app.route(\"/getMovie\", methods=[\"GET\", \"POST\"])\ndef getMovieById():\n if request.method == \"POST\":\n movie = request.json['movieId']\n users = func.getMovie(movie)\n if(users):\n return (users)\n else:\n return 0\n\n@app.route(\"/getMovieWatchlist\", methods=[\"GET\", \"POST\"])\ndef getMovieWatchlist():\n if request.method == \"POST\":\n movie = request.json['emailValue']\n watchlist = func.getWatchlistMovies(movie)\n if(watchlist):\n watchlist = map(dict, set(tuple(sorted(d.items())) for d in watchlist))\n # list_set = set(watchlist)\n # # convert the set to the list\n # watchlist = (list(list_set))\n return (list(watchlist))\n else:\n return 0 \n\n \n@app.route(\"/addMovieWatchlist\", methods=[\"GET\", \"PUT\"])\ndef addMovie():\n url = \"https://mv77u9kxij.execute-api.ap-south-1.amazonaws.com/addMovie\"\n if request.method == 'PUT':\n print(request.json)\n response = requests.put(\n url, data=json.dumps(request.json),\n headers={'Content-Type': 'application/json'})\n \n return response.content\n@app.route(\"/getRecommendation/\", methods=[\"GET\", \"PUT\"]) \ndef getRecommended(name):\n movies = ()\n if request.method == 'GET':\n movies = func.recommend(name)\n return list(movies)\n return movies\n\nif __name__ == \"__main__\":\n app.run(debug=True, port=3000)","repo_name":"Shrey-2019/MajorProject-MovieRecommendation_System","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"30117958525","text":"import threading\nfrom typing import List\nfrom typing_extensions import Self\n\nfrom gui_components import ValueBar, Box\nfrom pygame import Surface, Rect\nimport source\nimport pygame\n\nfrom .deck import Deck\nfrom .player_effect import EffectTarget, PlayerEffect\nfrom .player_attribute import Attribute\n\n\nclass Player(Box, EffectTarget):\n _hp: ValueBar\n _max_hp: int\n _shield: ValueBar\n _max_shield: int\n _is_invert: bool\n _name: str\n _value_bar_spacing: int\n _deck: Deck\n _id: int\n _effect_list: List[int]\n\n def __init__(self, background: Surface, id: int, hp: int, shield: int, max_hp: int, max_shield: int, deck: Deck, rect: Rect = None, name: str = \"unknown\", value_bar_spacing: int = 5) -> None:\n super().__init__(background, rect)\n self._is_invert = False\n self._value_bar_spacing = value_bar_spacing\n self._id = id\n self.set_deck(deck)\n self._effect_list = []\n\n self._max_hp = max_hp\n self._hp = ValueBar(source.image.get_image(source.image.PLAYER_HP),\n source.image.get_image(source.image.PLAYER_HP_BG),\n initial_value=hp, max_value=self._max_hp, spacing=-20)\n self.add_component(self._hp)\n\n self._max_shield = max_shield\n self._shield = ValueBar(source.image.get_image(source.image.PLAYER_SHIELD),\n source.image.get_image(\n source.image.PLAYER_SHIELD_BG),\n initial_value=shield, max_value=self._max_shield, spacing=-20)\n self.add_component(self._shield)\n\n self._set_value_bar_pos()\n self.set_name(name)\n\n def set_deck(self, deck: Deck) -> None:\n self._deck = deck\n\n def get_id(self) -> int:\n return self._id\n\n def set_name(self, name: str) -> None:\n self._name = name\n\n def get_name(self) -> str:\n return self._name\n\n def set_rotate(self, is_invert: bool):\n if self._is_invert != is_invert:\n self.set_background(pygame.transform.rotate(self._surface, 180))\n self._is_invert = is_invert\n self._set_value_bar_pos()\n\n def _set_value_bar_pos(self):\n if self._is_invert:\n self._hp.set_center((self.get_width()//2,\n self._hp.get_height()//2))\n self._shield.set_center((self.get_width()//2,\n self._hp.get_height() + self._shield.get_height()//2 + self._value_bar_spacing))\n else:\n self._hp.set_center((self.get_width()//2,\n self.get_height() - self._hp.get_height()//2))\n self._shield.set_center((self.get_width()//2,\n self._hp.get_y() - self._shield.get_height()//2 - self._value_bar_spacing))\n\n def add_effect(self, effect: int) -> None:\n self._effect_list.append(effect)\n\n def remove_effect(self, effect: int) -> None:\n if effect in self._effect_list:\n self._effect_list.remove(effect)\n\n def get_effect(self):\n return self._effect_list.copy()\n\n # def is_contain_effect(self, timing: str):\n # for effect in self._effect_list:\n # if effect.is_contain_timing(timing):\n # return True\n # return False\n\n # def check_effect(self):\n # for effect in self._effect_list:\n # if effect.is_fail():\n # self.remove_effect(effect)\n\n # def do_effect(self, attribute: str, value: int, timing: str):\n # for effect in self._effect_list:\n # attribute, value = effect.do_effect(self, attribute, value, timing)\n # return attribute, value\n\n # def do_effect_with_delay(self, attribute: str, value: int, timing: str):\n # pygame.time.delay(1000)\n # self.do_effect(attribute, value, timing)\n\n # def modify_attributes(self, attribute: str, value: int):\n # attribute, value = self.do_effect(attribute, value,\n # PlayerEffect.BEFORE_MODIFY_VALUE)\n # if attribute == Attribute.HP:\n # self.add_hp(value)\n # elif attribute == Attribute.SHIELD:\n # self.add_shield(value)\n # threading.Thread(target=self.do_effect_with_delay, args=[\n # attribute, value, PlayerEffect.AFTER_MODIFY_VALUE]).start()\n\n def add_hp(self, value: int):\n if self._hp.get_value() + value > self._max_hp:\n self._hp.add_value(self._max_hp - self._hp.get_value())\n else:\n self._hp.add_value(value)\n\n def get_hp_value(self) -> int:\n return self._hp.get_value()\n\n def add_shield(self, value: int):\n shield_end_value = self._shield.get_value() + value\n if shield_end_value > self._max_shield:\n self._shield.add_value(self._max_shield - self._shield.get_value())\n elif shield_end_value >= 0:\n self._shield.add_value(value)\n else:\n self._shield.add_value(value - shield_end_value)\n self.add_hp(shield_end_value)\n\n def get_shield_value(self) -> int:\n return self._shield.get_value()\n\n def draw_card(self, num: int):\n return self._deck.draw_card(num)\n\n def get_deck(self) -> Deck:\n return self._deck.copy()\n\n def copy(self) -> Self:\n player = Player(self._surface, self.get_id(), self.get_hp_value(), self.get_shield_value(\n ), self._max_hp, self._max_shield, self._deck.copy(), name=self._name, value_bar_spacing=self._value_bar_spacing)\n for effect in self._effect_list:\n player.add_effect(effect)\n return player\n","repo_name":"Bryant-Tang/VtuberSmash","sub_path":"game_old_version/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":5677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"28864329931","text":"itemtable = [\n 0,\n (1, \"animal nest\"),\n (1, \"anvil\"),\n (1, \"ash\"),\n (1, \"backpack\"),\n (1, \"bellows\"),\n (1, \"belt\"),\n (1, \"bits of fur\"),\n (1, \"bits of wood\"),\n (1, \"blanket\"),\n (1, \"bloodstain\"),\n (1, \"bones\"),\n (1, \"books\"),\n (1, \"boots\"),\n (1, \"bottle\"),\n (1, \"box\"),\n (1, \"branding iron\"),\n (1, \"broken glass\"),\n (1, \"broken or rusty weapons\"),\n (1, \"bucket\"),\n (1, \"burned-out torch\"),\n (1, \"candelabra\"),\n (1, \"candle\"),\n (1, \"chains\"),\n (1, \"claw marks\"),\n (1, \"cleaver\"),\n (1, \"clothing\"),\n (1, \"cobwebs\"),\n (1, \"cold spot\"),\n (1, \"corpse\"),\n (1, \"dice\"),\n (1, \"dripping water\"),\n (1, \"drum\"),\n (1, \"dust\"),\n (1, \"empty scroll case\"),\n (1, \"engraving\"),\n (1, \"flask\"),\n (1, \"flint and tinder\"),\n (1, \"fungus\"),\n (1, \"graffiti\"),\n (1, \"grinder\"),\n (1, \"hook\"),\n (1, \"horn\"),\n (1, \"hourglass\"),\n (1, \"insects\"),\n (1, \"jar\"),\n (1, \"keg\"),\n (1, \"key\"),\n (1, \"lamp\"),\n (1, \"lantern\"),\n (1, \"markings\"),\n (1, \"mold\"),\n (1, \"mud\"),\n (1, \"mug\"),\n (1, \"musical instrument\"),\n (1, \"mysterious stain\"),\n (1, \"nonmagical scroll\"),\n (1, \"playing cards\"),\n (1, \"pole\"),\n (1, \"pot\"),\n (1, \"pottery, possibly broken\"),\n (1, \"pouch\"),\n (1, \"puddle of water\"),\n (1, \"rags\"),\n (1, \"razor\"),\n (1, \"rivulet\"),\n (1, \"ropes\"),\n (1, \"runes\"),\n (1, \"sack\"),\n (1, \"scattered stones\"),\n (1, \"scorch marks\"),\n (1, \"skull\"),\n (1, \"slime\"),\n (1, \"spices\"),\n (1, \"spike\"),\n (1, \"straw\"),\n (1, \"teeth\"),\n (1, \"tongs\"),\n (1, \"tools\"),\n (1, \"tray\"),\n (1, \"trophy\"),\n (1, \"twine\"),\n (1, \"urn\"),\n (1, \"utensils\"),\n (1, \"whetstone\"),\n]\n\n# end of file.\n","repo_name":"Solomoriah/Dungeoneer","sub_path":"Dungeoneer/Items.py","file_name":"Items.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"2"}
+{"seq_id":"2759905403","text":"def check_overlap(assignments: list[str]) -> int:\n first = [\n _\n for _ in range(\n int(assignments[0].split(\"-\")[0]), int(assignments[0].split(\"-\")[1]) + 1\n )\n ]\n second = [\n _\n for _ in range(\n int(assignments[1].split(\"-\")[0]), int(assignments[1].split(\"-\")[1]) + 1\n )\n ]\n\n if set(first).intersection(set(second)):\n return 1\n return 0\n\n\nprint(\n sum(\n list(\n map(\n lambda x: check_overlap(list(x)),\n list(\n map(\n lambda x: map(lambda x: x, x.split(\",\")),\n open(\"input.txt\", \"r\").read().split(\"\\n\"),\n )\n ),\n )\n )\n )\n)","repo_name":"asrvd/aoc2022","sub_path":"day04/solution2.py","file_name":"solution2.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"40463973080","text":"import os\nimport pandas as pd\nimport numpy as np\nfrom mobilenet import MobileNet\nfrom mobilenet_dih import MobileNetDih4\nfrom mobilenet_dih_r import MobileNetDR\nfrom keras.optimizers import SGD, Adam, Adadelta\nfrom keras.callbacks import ModelCheckpoint, ReduceLROnPlateau,CSVLogger\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras import backend as K\nfrom keras import metrics\nfrom keras import losses\nfrom keras.utils import to_categorical\nfrom sklearn.model_selection import train_test_split\nimport imageio\nfrom skimage.transform import resize as imgresize\n\nTRAIN_PATH = 'input/train.json'\nBATCH_SIZE = 32\n\ndef json2img_and_labels(df:pd.DataFrame):\n imgs = []\n y = []\n\n for i, row in df.iterrows():\n band_1 = np.array(row['band_1']).reshape(75,75)\n band_2 = np.array(row['band_2']).reshape(75,75)\n\n d1 = (band_1 - band_1.mean()) / (band_1.max() - band_1.min())\n d2 = (band_2 - band_2.mean()) / (band_2.max() - band_2.min())\n\n imgs.append(np.dstack([d1, d2]))\n y.append(row['is_iceberg'])\n\n return imgs, y\n\ndef get_data():\n print('Read Data')\n df = pd.read_json(TRAIN_PATH)\n imgs, y = json2img_and_labels(df)\n train_img, valid_img, train_y, valid_y = train_test_split(imgs,\n y,\n random_state=131,\n shuffle=True,\n stratify=y,\n train_size=0.75)\n return train_img, valid_img, train_y, valid_y\n\n\ndef get_callbacks(filepath, patience=1):\n mcp = ModelCheckpoint(filepath,\n monitor='val_loss',\n verbose=2,\n save_best_only=True,\n save_weights_only=False,\n mode='min',\n period=1)\n rlr = ReduceLROnPlateau(monitor='val_loss', factor=0.1,\n patience=patience, min_lr=1e-16, verbose=1)\n csv_log = CSVLogger(filename=filepath+'.csv')\n return [mcp, rlr, csv_log]\n\ndef training_model(model_name='mobilenet'):\n train_img, valid_img, train_y, valid_y = get_data()\n callbacks = get_callbacks('mobilenet_10fulld01_b16', patience=2)\n if model_name == 'mobilenet':\n print('MobileNet')\n model = MobileNet(alpha=1.)\n model.summary()\n elif model_name =='mobilenet_dih':\n print('MobileNetDih')\n model = MobileNetDih4(alpha=1.)\n model.summary()\n elif model_name =='mobilenet_dih_r':\n print('MobileNetDihR')\n model = MobileNetDR(alpha=1.)\n model.summary()\n\n opt = Adam(lr=1e-3, beta_1=0.9, beta_2=0.999)\n #opt = Adadelta(lr=1e-1, rho=0.95, decay=0.1)\n #opt = SGD(lr=1e-7, momentum=0.9, decay=0., nesterov=True)\n\n model.compile(optimizer=opt,\n loss='binary_crossentropy',\n metrics=['accuracy'])\n #model.load_weights('mobilenet_05shortd01_catcros_resize_b16.hdf5')\n gen = ImageDataGenerator(rotation_range=359,\n zoom_range=[0.5, 2],\n width_shift_range=0.1,\n height_shift_range=0.1,\n vertical_flip=True,\n horizontal_flip=True)\n\n model.fit_generator(gen.flow(np.array(train_img),\n np.array(train_y),\n batch_size=BATCH_SIZE),\n steps_per_epoch=16*len(train_y)//BATCH_SIZE,\n epochs=40,\n validation_data=[np.array(valid_img), np.array(valid_y)],\n verbose=1,\n callbacks=callbacks)\n# \"\"\"\n #opt = Adam(lr=1e-3, beta_1=0.9, beta_2=0.999)\n #opt = Adadelta(lr=1e-1, rho=0.95, decay=0.1)\n opt = SGD(lr=0.05, momentum=0.9, decay=0., nesterov=True)\n model.load_weights('mobilenet_10shortd01_b16_sgd')\n model.fit_generator(gen.flow(np.array(train_img),\n np.array(train_y),\n batch_size=BATCH_SIZE),\n steps_per_epoch=16*len(train_y)//BATCH_SIZE,\n epochs=10,\n validation_data=[np.array(valid_img), np.array(valid_y)],\n verbose=1,\n callbacks=callbacks)\n#\"\"\"\n","repo_name":"MIklgr500/Statoil","sub_path":"tuning.py","file_name":"tuning.py","file_ext":"py","file_size_in_byte":4523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"70688484528","text":"from unittest import mock\nimport pytest\n\nfrom mitmproxy.contentviews import protobuf\nfrom mitmproxy.test import tutils\nfrom . import full_eval\n\n\ndef test_view_protobuf_request():\n v = full_eval(protobuf.ViewProtobuf())\n p = tutils.test_data.path(\"mitmproxy/data/protobuf01\")\n\n with mock.patch('mitmproxy.contentviews.protobuf.ViewProtobuf.is_available'):\n with mock.patch('subprocess.Popen') as n:\n m = mock.Mock()\n attrs = {'communicate.return_value': (b'1: \"3bbc333c-e61c-433b-819a-0b9a8cc103b8\"', True)}\n m.configure_mock(**attrs)\n n.return_value = m\n\n content_type, output = v(open(p, \"rb\").read())\n assert content_type == \"Protobuf\"\n assert output[0] == [('text', b'1: \"3bbc333c-e61c-433b-819a-0b9a8cc103b8\"')]\n\n m.communicate = mock.MagicMock()\n m.communicate.return_value = (None, None)\n with pytest.raises(ValueError, matches=\"Failed to parse input.\"):\n v(b'foobar')\n\n\ndef test_view_protobuf_availability():\n with mock.patch('subprocess.Popen') as n:\n m = mock.Mock()\n attrs = {'communicate.return_value': (b'libprotoc fake version', True)}\n m.configure_mock(**attrs)\n n.return_value = m\n assert protobuf.ViewProtobuf().is_available()\n\n m = mock.Mock()\n attrs = {'communicate.return_value': (b'command not found', True)}\n m.configure_mock(**attrs)\n n.return_value = m\n assert not protobuf.ViewProtobuf().is_available()\n\n\ndef test_view_protobuf_fallback():\n with mock.patch('subprocess.Popen.communicate') as m:\n m.side_effect = OSError()\n v = full_eval(protobuf.ViewProtobuf())\n with pytest.raises(NotImplementedError, matches='protoc not found'):\n v(b'foobar')\n","repo_name":"codebyravi/mproxy","sub_path":"test/mitmproxy/contentviews/test_protobuf.py","file_name":"test_protobuf.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"1401546784","text":"from tkinter import *\nfrom tkinter import messagebox\nimport mysql.connector\nfrom mysql.connector import Error\nimport os\nimport sys\npy = sys.executable\n\n#creating window\nclass Add(Tk):\n def __init__(self):\n super().__init__()\n self.iconbitmap(r'libico.ico')\n self.maxsize(480,360 )\n self.minsize(480,360)\n self.title('Ajouter un type de livre')\n self.canvas = Canvas(width=500, height=500, bg='#e5e5e5')\n self.canvas.pack()\n a = StringVar()\n b = StringVar()\n c = StringVar()\n #verifying Input\n def b_q():\n if len(b.get()) == 0:\n messagebox.showerror(\"Error\",\"Remplir tout les champs\")\n else:\n g = 'YES'\n try:\n self.conn = mysql.connector.connect(host='localhost',\n database='library',\n user='root',\n password='')\n self.myCursor = self.conn.cursor()\n self.myCursor.execute(\"Insert into categories(name) values (%s)\",[b.get()])\n self.conn.commit()\n messagebox.showinfo('Info', \"les données a été insérer avec succes\")\n ask = messagebox.askyesno(\"Confirm\", \"Voullez vous insérer une autre catégorie\")\n if ask:\n self.destroy()\n os.system('%s %s' % (py, 'Add_Category.py'))\n else:\n self.destroy()\n except Error:\n messagebox.showerror(\"Error\",\"Vérifier les données\")\n #creating input box and label\n Label(self, text='').pack()\n Label(self, text='Détails de Categorie',bg='#e5e5e5',fg='black',font=('Courier new', 20, 'bold')).place(x=45, y=70)\n Label(self, text='').pack()\n Label(self, text='Nom:',bg='#e5e5e5',fg='black', font=('Courier new', 10, 'bold')).place(x=60, y=180)\n Entry(self, textvariable=b, width=30).place(x=170, y=182)\n Button(self, text=\"Envoyer\", command=b_q).place(x=245, y=300)\nAdd().mainloop()","repo_name":"SDW-Soft/qergqrgeqrg","sub_path":"LIBRAY_MANAGEMENT/Add_Category.py","file_name":"Add_Category.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"43892273165","text":"# задача 3. Напишите программу, которая принимает на вход \n# координаты точки (X и Y), и выдаёт номер четверти плоскости, \n# в которой находится эта точка (или на какой оси она находится).\n\n# *Пример:*\n\n# - x=34; y=-30 -> 4\n# - x=2; y=4-> 1\n# - x=-34; y=-30 -> 3\n\n# print('Укажите координаты точки')\nx = int(input('Введите X = '))\ny = int(input('Введите Y = '))\nwhile x == 0 or y == 0:\n print('Координаты X и Y точки не могут быть равны 0. Повторите ввод')\n x = int(input('X = '))\n y = int(input('Y = '))\nif x > 0:\n if y > 0:\n print('Точка лежит в 1 четверти')\n else:\n print('Точка лежит в 4 четверти')\nelif x < 0:\n if y > 0:\n print('Точка лежит во 2 четверти')\n else:\n print('Точка лежит в 3 четверти')","repo_name":"Jull77/Python","sub_path":"Task03.py","file_name":"Task03.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"40972651990","text":"class Solution(object) :\n def minAreaRect(self, points) :\n n = len(points)\n nx = len(set([x for x, y in points]))\n ny = len(set([y for x, y in points]))\n\n if n == nx or n == ny :\n return 0\n\n p = {}\n if nx < ny :\n for x, y in points :\n p[x] = p.get(x, set())\n p[x].add(y)\n else :\n for x, y in points :\n p[y] = p.get(y, set())\n p[y].add(x)\n\n pp = {}\n for x, ys in p.iteritems() :\n if len(ys) >= 2 :\n pp[x] = ys\n p = pp\n\n ans = float('inf')\n for x0, ys0 in p.iteritems() :\n for x1, ys1 in p.iteritems() :\n if x0 >= x1 :\n continue\n intersect = sorted(ys0.intersection(ys1))\n if len(intersect) < 2 :\n continue\n dis = intersect[1] - intersect[0]\n for k in xrange(2, len(intersect)) :\n dis = min(dis, intersect[k] - intersect[k - 1])\n\n ans = min(ans, dis * (x1 - x0))\n\n return ans if ans != float('inf') else 0\n\n","repo_name":"heroming/algorithm","sub_path":"leetcode/939_Minimum_Area_Rectangle(optimize).py","file_name":"939_Minimum_Area_Rectangle(optimize).py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"}
+{"seq_id":"9432499096","text":"from adt import *\n\nclass A(Adt):\n Schema = (('A_i', int), ('A_s', str))\n \n# def __init__(self):\n# Adt.__init__(self)\n \nclass B(A):\n Schema = (('B_i', int), ('B_s', str))\n \n# def __init__(self):\n# A.__init__(self)\n \nclass C(Adt):\n Schema = (('C_i', int), ('C_AA', A))\n \nclass D(A,C):\n Schema = (('D_i', int),)\n \na = C()\na.C_AA = B()\n#b = B()\n#c = C()\n#d = D()\n\n\n#print (a.get_shema())\n##print (a.__class__)\nparser = ShemaParser(type(a))\n\nfor c in parser.Classes:\n print (c.__name__)\n\nprint(\"***\")\nfor at in parser.Attributes:\n print (at)\n \nprint(\"***\")\nfor n in parser.Names:\n print (n)\n\n#print(\"***\")\n#for t in parser.Types:\n# print (t)\n# \n#for n in parser.names():\n# print(n)\n# \n#print('***')\n#for t in parser.types():\n# print(t)\n# \n# \n#print('***')\n#for a in parser.elements():\n# print(a, type(a))\n##print (b.get_shema())\n##print (c.get_shema())\n##print (d.get_shema())\n#\n#for t,x in parser.Attributes:\n# print(t, x)\n# \nprint (\"****\") \nprint(a.__dict__)\n","repo_name":"hrvsaric/msppcore","sub_path":"pyms_core/ut_adt.py","file_name":"ut_adt.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"40645596595","text":"from pfiscrape import getURLandWriteHTML\nfrom emailscript import readFileAndSend\n\nSEAurl = \"http://www.pfie.com/asia-pacific/southeast-asia/\"\nCHINAurl = \"http://www.pfie.com/asia-pacific/china-and-east-asia/\"\nINDIAurl = \"http://www.pfie.com/asia-pacific/india-and-south-asia/\"\nINDOurl = \"http://www.pfie.com/search?saddfilter|wvcategory=21098/21527/21536\"\n\nwith open(\"msg.html\", \"w\") as tf:\n tf.write(\"
SouthEast Asia
\")\n\ngetURLandWriteHTML(SEAurl, \"a\")\n\nwith open(\"msg.html\", \"a\") as tf:\n tf.write(\"
China
\")\n\ngetURLandWriteHTML(CHINAurl, \"a\")\n\nwith open(\"msg.html\", \"a\") as tf:\n tf.write(\"
India
\")\n\ngetURLandWriteHTML(INDIAurl, \"a\")\n\nreadFileAndSend()\n\n","repo_name":"kevinlee05/scrapescripts","sub_path":"pfi/dailyscript.py","file_name":"dailyscript.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"5939173727","text":"from beecell.simple import jsonDumps\n\nfrom urllib.parse import quote\nimport requests\nimport json\nfrom logging import getLogger\n# from six.moves.urllib.parse import urlparse\nfrom beecell.simple import check_vault, truncate\nfrom requests.exceptions import ConnectionError, ConnectTimeout\nfrom urllib3 import disable_warnings, exceptions\n\ndisable_warnings(exceptions.InsecureRequestWarning)\n\n\nclass DataDomainError(Exception):\n def __init__(self, value, code=400):\n self.value = value\n self.code = code\n Exception.__init__(self, value, code)\n \n def __repr__(self):\n return 'DataDomainError: %s' % self.value\n \n def __str__(self):\n return 'DataDomainError: %s' % self.value\n\n\nclass DataDomainEntity(object):\n def __init__(self, manager):\n self.logger = getLogger(self.__class__.__module__ + '.' + self.__class__.__name__)\n\n self.manager = manager\n self.next = None\n\n @property\n def token(self):\n return self.manager.token\n\n @property\n def timeout(self):\n return self.manager.timeout\n\n @property\n def headers(self):\n headers = self.manager.dd_base_headers\n headers.update({'X-DD-AUTH-TOKEN': self.manager.get_token()})\n return headers\n\n def get_system_uri(self, oid):\n \"\"\"get datadomain base system uri\n\n :param oid: system id. uuid from system api\n :return: formatted uri\n \"\"\"\n oid = quote(oid)\n return '/dd-systems/%s' % oid\n\n def http_get(self, uri, **params):\n method = 'get'\n uri = self.manager.dd_base_uri + uri\n\n try:\n res = requests.get(uri, headers=self.headers, timeout=self.timeout, params=params, verify=False)\n output = res.json()\n if res.status_code in [400, 403, 404, 405]:\n error = output.get('details', '')\n raise Exception(error)\n self.logger.debug('datadomain http %s response: %s' % (method, truncate(output)))\n except ConnectTimeout as ex:\n self.logger.error('datadomain connection timeout: %s' % ex)\n raise DataDomainError(ex)\n except ConnectionError as ex:\n self.logger.error('datadomain connection error: %s' % ex)\n raise DataDomainError(ex)\n except Exception as ex:\n self.logger.error('datadomain http %s error: %s' % (method, ex))\n raise DataDomainError(ex)\n\n return output\n\n def http_post(self, uri, data={}):\n method = 'post'\n uri = self.manager.dd_base_uri + uri\n\n try:\n self.logger.debug('post data %s to dd' % data)\n res = requests.post(uri, headers=self.headers, timeout=self.timeout, data=jsonDumps(data), verify=False)\n output = res.json()\n if res.status_code in [400, 403, 404, 405]:\n error = output.get('detail', None)\n if error is None:\n error = output\n raise Exception(error)\n self.logger.debug('datadomain http %s response: %s' % (method, truncate(output)))\n except ConnectTimeout as ex:\n self.logger.error('datadomain connection timeout: %s' % ex)\n raise DataDomainError(ex)\n except ConnectionError as ex:\n self.logger.error('datadomain connection error: %s' % ex)\n raise DataDomainError(ex)\n except Exception as ex:\n self.logger.error('datadomain http %s error: %s' % (method, ex))\n raise DataDomainError(ex)\n\n return output\n\n def http_put(self, uri, data={}):\n method = 'put'\n uri = self.manager.dd_base_uri + uri\n\n try:\n self.logger.debug('put data %s to dd' % data)\n res = requests.put(uri, headers=self.headers, timeout=self.timeout, data=jsonDumps(data), verify=False)\n output = res.json()\n if res.status_code in [400, 403, 404, 405]:\n error = output.get('detail', None)\n if error is None:\n error = output\n raise Exception(error)\n self.logger.debug('datadomain http %s response: %s' % (method, truncate(output)))\n except ConnectTimeout as ex:\n self.logger.error('datadomain connection timeout: %s' % ex)\n raise DataDomainError(ex)\n except ConnectionError as ex:\n self.logger.error('datadomain connection error: %s' % ex)\n raise DataDomainError(ex)\n except Exception as ex:\n self.logger.error('datadomain http %s error: %s' % (method, ex))\n raise DataDomainError(ex)\n\n return output\n\n def http_delete(self, uri, data=None):\n method = 'delete'\n uri = self.manager.dd_base_uri + uri\n\n try:\n res = requests.delete(uri, headers=self.headers, timeout=self.timeout, verify=False)\n if res.status_code in [400, 403, 404, 405]:\n output = res.json()\n error = output['detail']\n raise Exception(error)\n self.logger.debug('datadomain http %s response: %s' % (method, True))\n except ConnectTimeout as ex:\n self.logger.error('datadomain connection timeout: %s' % ex)\n raise DataDomainError(ex)\n except ConnectionError as ex:\n self.logger.error('datadomain connection error: %s' % ex)\n raise DataDomainError(ex)\n except Exception as ex:\n self.logger.error('datadomain http %s error: %s' % (method, ex))\n raise DataDomainError(ex)\n\n\nclass DataDomainManager(object):\n def __init__(self, uri=None, proxy=None, timeout=60.0):\n self.logger = getLogger(self.__class__.__module__ + '.' + self.__class__.__name__)\n\n if uri is None:\n raise \n self.dd_base_uri = uri\n self.dd_base_headers = {\n 'Content-Type': 'application/json',\n 'Accept': 'application/json'\n }\n self.token = None\n self.token_expire = None\n self.timeout = timeout\n\n from .system import DataDomainSystem\n from .network import DataDomainNetwork\n from .mtree import DataDomainMtree\n from .protocol import DataDomainProtocol\n from .user import DataDomainUser\n from .trust import DataDomainTrust\n from .tenant import DataDomainTenant\n\n # initialize proxy objects\n self.system = DataDomainSystem(self)\n self.network = DataDomainNetwork(self)\n self.mtree = DataDomainMtree(self)\n self.protocol = DataDomainProtocol(self)\n self.user = DataDomainUser(self)\n self.trust = DataDomainTrust(self)\n self.tenant = DataDomainTenant(self)\n\n @property\n def headers(self):\n headers = self.dd_base_headers\n headers.update({'X-DD-AUTH-TOKEN': self.get_token()})\n return headers\n\n def set_timeout(self, timeout):\n self.timeout = timeout\n\n def ping(self):\n \"\"\"Ping dd\n\n :return: True or False\n \"\"\"\n res = False\n try:\n uri = self.dd_base_uri\n requests.get(uri, headers=self.dd_base_headers, timeout=self.timeout, verify=False)\n res = True\n except ConnectTimeout as ex:\n self.logger.error('datadomain connection timeout: %s' % ex)\n except ConnectionError as ex:\n self.logger.error('datadomain connection error: %s' % ex)\n except Exception as ex:\n self.logger.error('datadomain http %s error: %s' % ('post', False))\n self.logger.debug('Ping dd server: %s' % res)\n\n return res\n\n def version(self):\n \"\"\"Get dd version\n\n :return: dd version\n \"\"\"\n try:\n # get token from identity service\n header = self.dd_base_headers\n uri = self.dd_base_uri + 'config/'\n res = requests.get(uri, headers=header, timeout=self.timeout, verify=False)\n output = res.json()\n if res.status_code in [400]:\n error = output['detail']\n raise Exception(error)\n version = {'version': output.get('version', None), 'ansible_version': output.get('ansible_version', None)}\n self.logger.debug('Get version: %s' % version)\n return version\n except ConnectTimeout as ex:\n self.logger.error('datadomain connection timeout: %s' % ex)\n raise DataDomainError(ex)\n except ConnectionError as ex:\n self.logger.error('datadomain connection error: %s' % ex)\n raise DataDomainError(ex)\n except Exception as ex:\n self.logger.error('get version error: %s' % ex)\n raise DataDomainError(ex)\n\n def authorize(self, user=None, pwd=None, token=None, key=None):\n \"\"\"Get token\n\n :param user: user\n :param pwd: password\n :param token: token string\n :param key: [optional] fernet key used to decrypt encrypted password\n \"\"\"\n # check password is encrypted\n if pwd is not None:\n pwd = check_vault(pwd, key)\n\n # set token\n if token is not None:\n self.token = token\n else:\n try:\n # get token from identity service\n self.logger.debug('Try to get token for user %s' % user)\n data = {\n 'auth_info': {\n 'username': user,\n 'password': pwd\n }\n }\n uri = self.dd_base_uri + '/auth'\n res = requests.post(uri, headers=self.dd_base_headers, data=jsonDumps(data),\n timeout=self.timeout, verify=False)\n # output = res.json()\n if res.status_code in [400, 401]:\n raise Exception('')\n self.token = res.headers['X-DD-AUTH-TOKEN']\n self.logger.debug('Get token %s for user %s' % (self.token, user))\n except ConnectTimeout as ex:\n self.logger.error('datadomain connection timeout: %s' % ex)\n raise DataDomainError(ex)\n except ConnectionError as ex:\n self.logger.error('datadomain connection error: %s' % ex)\n raise DataDomainError(ex)\n except Exception as ex:\n self.logger.error('get token error: %s' % ex)\n raise DataDomainError(ex)\n\n def delete_token(self):\n try:\n uri = self.dd_base_uri + '/auth'\n res = requests.delete(uri, headers=self.headers, timeout=self.timeout, verify=False)\n if res.status_code != 200:\n return False\n except ConnectTimeout as ex:\n self.logger.error('datadomain connection timeout: %s' % ex)\n raise DataDomainError(ex)\n except ConnectionError as ex:\n self.logger.error('datadomain connection error: %s' % ex)\n raise DataDomainError(ex)\n except Exception as ex:\n self.logger.error('delete token error: %s' % ex)\n raise DataDomainError(ex)\n return True\n\n def get_token(self):\n return self.token\n","repo_name":"Nivola/beedrones","sub_path":"beedrones/datadomain/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":11221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"4787635833","text":"n=int(input(\"Enter the number\"))\nfirst=n\nlast=0\nfor i in reversed(range(first,last,-1)):\n increment = (i * 2)\n for j in range(1,increment):\n print(\"*\",end=\" \")\n\n print()\n\n\n\n'''\nn=int(input())\nfor i in reversed(range(n,0,-1)):\n for j in range(i):\n print(\"*\",end=\" \")\n print()\nprint(reversed)\n'''","repo_name":"mahdis4092/Python-problem-solving-Basic","sub_path":"For and while loop 2/triangle_using_astericks.py","file_name":"triangle_using_astericks.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"7976340152","text":"import os\nimport csv\nimport torch\nimport random\nfrom PIL import Image\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms, utils\nimport torchvision.transforms.functional as tf\n\n\nclass GeoDataset(Dataset):\n def __init__(self, data_csvpath:str,random_flip:bool=True, random_crop:bool=True, crop_box:int=512, transform=None):\n self.random_flip = random_flip\n self.random_crop = random_crop\n self.crop_box = crop_box\n self.csv_path = data_csvpath\n self.dataset = [] # [haze_img_path, clear_img_path]\n self._init_dataset()\n self.transform = transform\n self.__init_transform()\n\n def _init_dataset(self):\n csv_file = open(self.csv_path, \"r\")\n csv_reader = csv.reader(csv_file)\n for row in csv_reader:\n self.dataset.append([row[0], row[1]])\n csv_file.close()\n\n def __init_transform(self):\n if self.transform is None:\n self.transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.5] * 3, std=[0.5] * 3)\n ])\n\n # def _random_rotate(self, haze, clear):\n # # 拿到角度的随机数。angle是一个-180到180之间的一个数\n # angle = transforms.RandomRotation.get_params([-180, 180])\n # # 对haze和clear图像做相同的旋转操作,保证他们都旋转angle角度\n # haze = haze.rotate(angle, expand=True)\n # clear = clear.rotate(angle, expand=True)\n # return haze, clear\n\n def _random_flip(self, haze, clear):\n # 50%的概率应用垂直,水平翻转。\n if random.random() > 0.5:\n haze = tf.hflip(haze)\n clear = tf.hflip(clear)\n if random.random() > 0.5:\n haze = tf.vflip(haze)\n clear = tf.vflip(clear)\n return haze, clear\n\n def _random_crop(self, haze, clear):\n # 50%的概率应用垂直,水平翻转。\n i,j,h,w = transforms.RandomCrop.get_params(haze, (self.crop_box, self.crop_box))\n haze = tf.crop(haze, i,j,h,w)\n clear = tf.crop(clear, i,j,h,w)\n return haze, clear\n\n def __getitem__(self, item):\n haze = Image.open(self.dataset[item][0]).convert('RGB')\n clear = Image.open(self.dataset[item][1]).convert('RGB')\n if self.random_flip:\n haze, clear = self._random_flip(haze, clear)\n if self.random_crop:\n haze, clear = self._random_crop(haze, clear)\n\n haze = self.transform(haze)\n clear = self.transform(clear)\n\n return haze, clear\n\n def __len__(self):\n return len(self.dataset)\n\n\nif __name__ == \"__main__\":\n gd = GeoDataset(\"D:\\Dataset\\Geographic image\\data_train.csv\", random_crop=True, random_flip=True)\n gd = GeoDataset(\"D:\\Dataset\\Geographic image\\data_train.csv\", random_crop=False, random_flip=False)\n haze, clear = gd[2]\n utils.save_image((clear + 1) / 2.0, 'clear.jpg')\n utils.save_image((haze + 1) / 2.0, 'haze.jpg')\n pass","repo_name":"mikuzip01/DOC-Net","sub_path":"dataloaders/Geo_dataloader.py","file_name":"Geo_dataloader.py","file_ext":"py","file_size_in_byte":3016,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"11924299896","text":"from json import dumps\n\n\nasync def extract_data(page, cls, term):\n # Check if grade distribution data is available\n first_chart_heading = await page.locator(\"h4\").first.inner_text()\n\n # Obtaining data and parsing it into a dictionary\n td = page.locator(\"table\").first.locator(\"td\")\n data = await td.all_inner_texts()\n\n keys = data[::2]\n values = map(int, data[1::2])\n\n title = await page.locator(\"h2\").inner_text()\n professor = await page.locator(\"h2 + h3\").inner_text()\n\n if first_chart_heading != \"Grade Data Unavailable\":\n write_data = {\n \"section\": cls,\n \"term\": term,\n \"courseTitle\": title,\n \"instructor\": professor,\n \"grades\": dict(zip(keys, values))\n }\n else:\n write_data = {\n \"section\": cls,\n \"term\": term,\n \"courseTitle\": title,\n \"instructor\": professor,\n \"grades\": {}\n }\n\n return write_data\n\n\nasync def nav_and_extract(page, nav, cls, term, f):\n await page.goto(nav)\n write_data = await extract_data(page, cls, term)\n f.write(dumps(write_data) + \"\\n\")","repo_name":"jiechenmc/Gradus","sub_path":"core/page.py","file_name":"page.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"26879217796","text":"import math\n\ndef get_asteroid_angles( asteroids, current_asteroid ) :\n asteroid_angles = dict()\n \n for asteroid in asteroids :\n if asteroid != current_asteroid :\n slope = math.atan2( ( asteroid[0] - current_asteroid[0] ), ( asteroid[1] - current_asteroid[1] ) )\n if slope <= math.pi :\n slope = slope + 2 * math.pi\n try :\n asteroid_angles[slope].append( asteroid )\n except :\n asteroid_angles[slope] = [ asteroid ]\n\n return asteroid_angles\n\ndef vaporize_asteroids( asteroid_list, current_asteroid ) :\n asteroid_angles = list( asteroid_list.keys() )\n asteroid_angles.sort( reverse = True )\n finished_vaporize = False\n vaporize_count = 0\n \n while not finished_vaporize :\n asteroid_angles_copy = asteroid_angles[:]\n for angle in asteroid_angles :\n vaporize_count += 1\n \n if len( asteroid_list[angle] ) > 1 :\n asteroids_in_path = asteroid_list[angle][:]\n closest_distance = -1\n for asteroid in asteroids_in_path :\n asteroid_distance = abs( asteroid[1] - current_asteroid[1] ) + abs( asteroid[0] - current_asteroid[0] )\n if closest_distance == -1 or asteroid_distance < closest_distance :\n closest_distance = asteroid_distance\n closest_asteroid = asteroid\n if vaporize_count == 200 :\n print ( closest_asteroid )\n asteroid_list[angle].remove( closest_asteroid )\n else :\n if vaporize_count == 200 :\n print ( asteroid_list[angle][0] ) \n asteroid_angles_copy.remove( angle )\n del asteroid_list[angle]\n if len( asteroid_angles_copy ) < 1 :\n finished_vaporize = True\n else :\n asteroid_angles = asteroid_angles_copy\n\n# Reads the file input.txt to get the asteroid map\nasteroid_map = []\nasteroids = []\n\nwith open( \"input.txt\", \"r\" ) as fd:\n for line in fd:\n asteroid_map.append( line.strip() )\n\n# Fill the asteroids array with the location of all asteroids\nfor i in range( len( asteroid_map ) ) :\n for j in range( len( asteroid_map[i] ) ) :\n if asteroid_map[i][j] == '#':\n asteroids.append( ( j, i ) )\n\ncurrent_asteroid = ( 28, 29 )\n\nasteroid_list = get_asteroid_angles( asteroids, current_asteroid )\n\nvaporize_asteroids( asteroid_list, current_asteroid )\n","repo_name":"raygiang/advent-of-code-2019","sub_path":"day-10/day10-2.py","file_name":"day10-2.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"27448327478","text":"import unittest\nimport numpy as np\nimport gym\nfrom gym_jsbsim import utils\nfrom gym_jsbsim.agents import RandomAgent\nfrom gym_jsbsim.environment import JsbSimEnv\nfrom gym_jsbsim.tasks import HeadingControlTask\nimport gym_jsbsim.properties as prp\n\n\nclass AgentEnvInteractionTest(unittest.TestCase):\n \"\"\" Tests for agents interacting with env. \"\"\"\n\n def init_and_reset_env(self, env: JsbSimEnv):\n self.assertIsInstance(env.task, HeadingControlTask)\n\n # we interact at 5 Hz, so we expect the sim to run 12 timesteps per\n # interaction since it runs at 120 Hz\n self.assertEqual(12, env.sim_steps_per_agent_step)\n\n # we init a random agent with a seed\n agent = RandomAgent(action_space=env.action_space)\n self.assertEqual(env.action_space, agent.action_space)\n\n # this task has an action space of three controls: aileron, elevator, rudder\n expected_num_actions = 3\n self.assertEqual(expected_num_actions, len(agent.action_space.low))\n # we see that the action space has the correct low and high range of +-1.0\n expect_low = np.array([-1.0] * expected_num_actions)\n expect_high = np.array([1.0] * expected_num_actions)\n np.testing.assert_array_almost_equal(expect_high, env.action_space.high)\n np.testing.assert_array_almost_equal(expect_low, env.action_space.low)\n\n # we reset the env and receive the first state; the env is now ready\n state = env.reset()\n self.assertEqual(len(env.observation_space.low), len(state))\n\n # we close the env and JSBSim closes with it\n env.close()\n self.assertIsNone(env.sim.jsbsim)\n\n def take_step_with_random_agent(self, env: JsbSimEnv):\n agent = RandomAgent(action_space=env.action_space)\n\n # we set up for a loop through one episode\n first_state = env.reset()\n\n # we take a single step\n action = agent.act(first_state)\n state, reward, done, info = env.step(action)\n\n # we see the state has changed\n self.assertEqual(first_state.shape, state.shape)\n self.assertTrue(np.any(np.not_equal(first_state, state)),\n msg='state should have changed after simulation step')\n expected_time_step_size = env.sim_steps_per_agent_step / env.JSBSIM_DT_HZ\n self.assertAlmostEqual(expected_time_step_size, env.sim.get_sim_time())\n self.assertFalse(done, msg='episode is terminal after only a single step')\n\n # the aircraft engines are running, as per initial conditions\n self.assertNotAlmostEqual(env.sim[prp.engine_thrust_lbs], 0)\n\n env.close()\n\n def test_init_and_reset_all_envs(self):\n for env_id in utils.get_env_id_kwargs_map():\n env = gym.make(env_id)\n self.init_and_reset_env(env)\n\n def test_take_step_with_random_agent_all_envs(self):\n for env_id in utils.get_env_id_kwargs_map():\n env = gym.make(env_id)\n self.take_step_with_random_agent(env)\n","repo_name":"Gor-Ren/gym-jsbsim","sub_path":"gym_jsbsim/tests/test_functional.py","file_name":"test_functional.py","file_ext":"py","file_size_in_byte":3012,"program_lang":"python","lang":"en","doc_type":"code","stars":124,"dataset":"github-code","pt":"3"}
+{"seq_id":"2731676819","text":"import unittest\n\n\nclass Solution:\n def isPalindrome(self, s: str) -> bool:\n # Base cases:\n # check to see if it has any alphanumeric chars\n contains_a_n = False\n for c in s:\n if self.isAlphaNumeric(c):\n contains_a_n = True\n break\n if not contains_a_n:\n return True\n\n front = 0\n back = -1\n\n # Compare front to back until pointers pass or match each other.\n # Any non matches, kill.\n while front <= len(s) + back:\n front = self.getNextAlphaLocation(s, front)\n back = self.getNextAlphaLocation(s, back)\n if s[front].lower() != s[back].lower():\n return False\n # next\n front += 1\n back -= 1\n # All match\n return True\n\n # Returns next valid location\n def getNextAlphaLocation(self, s: str, index: int) -> int:\n # Direction\n step = 1\n if index < 0:\n step = -1\n # Find Next\n while not self.isAlphaNumeric(s[index]):\n index += step\n return index\n\n @staticmethod\n def isAlphaNumeric(c: chr) -> bool:\n return (\n ord('0') <= ord(c) <= ord('9') or\n ord('a') <= ord(c) <= ord('z') or\n ord('A') <= ord(c) <= ord('Z')\n )\n\n\nclass TestSolution(unittest.TestCase):\n def setUp(self):\n self.a = Solution()\n\n def test_isPalindrome(self):\n self.assertTrue(self.a.isPalindrome(\"A man, a plan, a canal: Panama\"))\n self.assertTrue(self.a.isPalindrome(\"arsttsra\"))\n self.assertTrue(self.a.isPalindrome(\"arst. tsra\"))\n self.assertTrue(self.a.isPalindrome(\"\"))\n self.assertTrue(self.a.isPalindrome(\"12345654321\"))\n\n self.assertFalse(self.a.isPalindrome(\"1234565432345234\"))\n self.assertFalse(self.a.isPalindrome(\"arst\"))\n\n\ndef main():\n unittest.main()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"matthewcordaro/leet-code-python","sub_path":"finished/101-200/125. Valid Palindrome/valid-palindrome.py","file_name":"valid-palindrome.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"42838527800","text":"\nclass oproperty(object):\n \"\"\"\n This class implements a property-like class that is designed to allow for\n easy overriding of a base class's property. This is especially useful for\n things like mixins, where you don't necessarily know what the base class's\n type is, and thus can't simply call BaseClass.prop.__set__. And, since\n super() doesn't proxy the __set__ function, it can be quite difficult to\n override a base class's property while still conforming to DRY.\n\n Usage:\n class BaseClass(object):\n @property\n def prop(self):\n return 1234\n\n @property_overriding\n class DerivedClass(object):\n @oproperty\n def prop(self, orig):\n return orig() + 1\n\n\n FAQ:\n Q: Why is this necessary?\n A: I like mixins, conceptually, but Python makes it a bit tricky to do\n stuff like overriding a base class's property setter without\n explicitly knowing what that class is. I wrote this to simplify\n things for me.\n\n Q: How does it work?\n A: In short, we pretend to be a property-like object, and instead of\n raising an error if the get/set/delete method doesn't exist, we call\n the next implementation found in a base class. If the method *does*\n exist, we call it, but also pass along a pointer to the original\n method, so our overriding method can make use of the original\n method, if it's necessary.\n\n Q: Are there any caveats?\n A: Maybe. I haven't properly tested this with things like abstract\n base classes (though I plan on doing so), and anything else that\n might rely on something actually being a property object. I'm\n thinking of making this class derive from property, but I'm not sure\n that's necessarily a great idea. Testing will continue :-)\n\n Q: What versions of Python does this work on?\n A: This should work on Python 2.6+, including Python 3.\n\n TODO:\n - Unsure what I want to do to deal with the case where the base\n property doesn't actually exist. Options are currently:\n - Make the orig parameter a kwarg, and only pass it if the base\n property actually exists\n - The orig lambda can return a None if the base prop doesn't exist\n - Verify when the class type is set that there is an attribute\n with the appropriate name somewhere in the __mro__. This would\n throw a RuntimeError(?) if we then try and override a property\n that doesn't already exist.\n \"\"\"\n\n def __init__(self, fget=None, fset=None, fdel=None, doc=None, name=None):\n self.fget = fget\n self.fset = fset\n self.fdel = fdel\n self.__doc__ = doc\n\n # If we're not explicitly given a name to override, we try and\n # determine it by inspecting the names of any given functions. If we\n # can't do this, we raise an error, since we don't know what to\n # override at all.\n if name is None:\n if fget is not None:\n name = fget.__name__\n elif fset is not None:\n name = fset.__name__\n elif fdel is not None:\n name = fdel.__name__\n else:\n raise RuntimeError(\"Can't create a property with no functions!\")\n\n self._prop_name = name\n self.__class_type = None\n\n def __get__(self, obj, type=None):\n # If we have no object, return ourself.\n if obj is None:\n return self\n\n # Get the superclass's attribute.\n super_attr = self._get_super_attribute(obj, self._prop_name)\n\n # If we have an attribute, call and return it. Otherwise, we simply\n # call the base property's __get__ function.\n if self.fget is not None:\n return self.fget(obj, lambda: super_attr.__get__(obj))\n else:\n return super_attr.__get__(obj)\n\n def __set__(self, obj, value):\n # Get the superclass's attribute.\n super_attr = self._get_super_attribute(obj, self._prop_name)\n\n # If we have an attribute, call and return it. Otherwise, we simply\n # call the base property's __set__ function.\n if self.fset is not None:\n return self.fset(obj, value, lambda val: super_attr.__set__(obj, val))\n else:\n return super_attr.__set__(obj, value)\n\n def __delete__(self, obj):\n # Get the superclass's attribute.\n super_attr = self._get_super_attribute(obj, self._prop_name)\n\n # If we have an attribute, call and return it. Otherwise, we simply\n # call the base property's __delete__ function.\n if self.fdel is not None:\n return self.fdel(obj, lambda: super_attr.__delete__(obj))\n else:\n return super_attr.__delete__(obj)\n\n def set_class_type(self, klass):\n \"\"\"\n This function is called by our decorator, below, to tell this class\n where to start searching in the __mro__ list.\n \"\"\"\n self.__class_type = klass\n\n def _handle_undecorated(self, an_object, mro):\n # We walk the MRO chain looking for ourself in the attributes\n # somewhere. This is helpful because we can try and pinpoint\n # exactly what class is undecorated.\n parent_klass = None\n\n for klass in mro:\n # Look through the dict.\n for name, val in klass.__dict__.items():\n # If this is ourself...\n if val is self:\n # Cool, we found it.\n parent_klass = klass\n break\n\n if parent_klass is not None:\n break\n\n if parent_klass:\n raise RuntimeError(\n \"You must decorate the class '{0}' with \" \\\n \"property_overriding!\".format(parent_klass.__name__)\n )\n else:\n raise RuntimeError(\n \"A class in the inheritance chain belonging to {0!r} hasn't \" \\\n \"been decorated with property_overriding.\".format(an_object)\n )\n\n def _get_super_attribute(self, obj, name):\n # Handle the None case.\n if obj is None:\n return None\n\n # Get the MRO for the object\n if isinstance(obj, type):\n mro = obj.__mro__\n else:\n mro = obj.__class__.__mro__\n\n if self.__class_type is None:\n self._handle_undecorated(obj, mro)\n\n # Find this class in the MRO.\n for pos in range(len(mro)):\n if mro[pos] == self.__class_type:\n break\n\n # Look through classes higher in the MRO for this attribute.\n for pos in range(pos + 1, len(mro)):\n tmp = mro[pos]\n\n if isinstance(tmp, type) and name in tmp.__dict__:\n return tmp.__dict__[name]\n\n return None\n\n def getter(self, fget):\n self.fget = fget\n return self\n\n def setter(self, fset):\n self.fset = fset\n return self\n\n def deleter(self, fdel):\n self.fdel = fdel\n return self\n\n @classmethod\n def override_setter(klass, fset, **kwargs):\n \"\"\"\n This is a convenience classmethod that lets you quickly override just\n the setter of a property, like so:\n class Derived(BaseClass):\n @oproperty.override_setter\n def prop(self, val, orig):\n # Do new setter work...\n pass\n \"\"\"\n return klass(fset=fset, **kwargs)\n\n @classmethod\n def override_deleter(klass, fdel, **kwargs):\n \"\"\"\n This is a convenience classmethod that lets you quickly override just\n the deleter of a property, like so:\n class Derived(BaseClass):\n @oproperty.override_deleter\n def prop(self, orig):\n # Do new deleter work...\n pass\n \"\"\"\n return klass(fdel=fdel, **kwargs)\n\n\n\ndef property_overriding(klass):\n for name, val in klass.__dict__.items():\n if isinstance(val, oproperty):\n val.set_class_type(klass)\n\n return klass\n\n","repo_name":"andrew-d/oproperty","sub_path":"oproperty.py","file_name":"oproperty.py","file_ext":"py","file_size_in_byte":8290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"9183545061","text":"import pygame\nfrom const import *\n\ndef draw_window(win, bird, pipes, base, score):\n win.blit(BG_IMG, (0,0))\n\n for pipe in pipes:\n pipe.draw(win)\n\n text = STAT_FONT.render(f\"Score: {score}\", 1, (255, 255, 255))\n win.blit(text, (WIDTH - 10 - text.get_width(), 10))\n\n base.draw(win)\n bird.draw(win)\n\n pygame.display.update()\n\ndef blitRotateCenter(surf, image, topleft, angle):\n rotated_image = pygame.transform.rotate(image, angle)\n new_rect = rotated_image.get_rect(center = image.get_rect(topleft = topleft).center)\n\n surf.blit(rotated_image, new_rect.topleft)","repo_name":"AmirAbaskohi/Flappy-Bird-NEAT","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"13629840520","text":"N = int(input())\nseconds = N%60\nif (seconds<60):\n\tseconds = seconds\nelse:\n\tseconds = seconds - 60\n\nminutes = int(N/60)\nhours = int(minutes/60)\n\nif(minutes < 60):\n\tminutes = minutes\nelse:\n\tminutes = minutes%60\nhours = str(hours)\nminutes = str(minutes)\nseconds = str(seconds)\nprint(hours+':'+minutes+':'+seconds)","repo_name":"rezoanahmed/uri_solutions_python","sub_path":"1019.py","file_name":"1019.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"13205277986","text":"import cPickle\nimport copy\nimport datetime\nimport logging\n\nimport twisted.internet.reactor\n\nimport deluge.component\nimport deluge.configmanager\n\nimport labelplus.common\nimport labelplus.common.config\nimport labelplus.common.label\nimport labelplus.gtkui.config\nimport labelplus.gtkui.config.convert\nimport labelplus.gtkui.common.gtklib.dnd\n\n\nfrom twisted.python.failure import Failure\n\nfrom deluge.ui.client import client\nfrom deluge.ui.client import DelugeRPCError\nfrom deluge.plugins.pluginbase import GtkPluginBase\n\nfrom labelplus.common import LabelPlusError\nfrom labelplus.gtkui.common.label_store import LabelStore\nfrom labelplus.gtkui.extensions.add_torrent_ext import AddTorrentExt\nfrom labelplus.gtkui.extensions.preferences_ext import PreferencesExt\nfrom labelplus.gtkui.extensions.sidebar_ext import SidebarExt\nfrom labelplus.gtkui.extensions.status_bar_ext import StatusBarExt\nfrom labelplus.gtkui.extensions.torrent_view_ext import TorrentViewExt\n\nfrom labelplus.gtkui import RT\n\n\nfrom labelplus.common.literals import (\n STR_UPDATE, ERR_TIMED_OUT, ERR_MAX_RETRY,\n)\n\nGTKUI_CONFIG = \"%s_ui.conf\" % labelplus.common.MODULE_NAME\n\nINIT_POLLING_INTERVAL = 3.0\nUPDATE_INTERVAL = 1.0\n\nTHROTTLED_INTERVAL = 6.0\nMAX_TRIES = 10\n\nREQUEST_TIMEOUT = 10.0\n\nEXTENSIONS = (\n AddTorrentExt,\n PreferencesExt,\n SidebarExt,\n StatusBarExt,\n TorrentViewExt,\n)\n\n\nlog = logging.getLogger(__name__)\nlabelplus.gtkui.common.gtklib.dnd.log.setLevel(logging.INFO)\n\n\nclass GtkUI(GtkPluginBase):\n\n # Section: Initialization\n\n def __init__(self, plugin_name):\n\n RT.logger.setLevel(logging.INFO)\n if __debug__: RT.register(self)\n\n super(GtkUI, self).__init__(plugin_name)\n\n self.initialized = False\n\n self.config = None\n\n self.store = LabelStore()\n self.last_updated = None\n self._tries = 0\n self._calls = []\n\n self._extensions = []\n\n self._update_funcs = []\n self._cleanup_funcs = []\n\n\n def enable(self):\n\n log.info(\"Initializing %s...\", self.__class__.__name__)\n\n self._poll_init()\n\n\n def _poll_init(self):\n\n client.labelplus.is_initialized().addCallback(self._check_init)\n\n\n def _check_init(self, result):\n\n log.debug(\"Waiting for core to be initialized...\")\n\n if result == True:\n client.labelplus.get_label_updates().addCallback(self._finish_init)\n else:\n twisted.internet.reactor.callLater(INIT_POLLING_INTERVAL,\n self._poll_init)\n\n\n def _finish_init(self, result):\n\n log.debug(\"Resuming initialization...\")\n\n try:\n info = client.connection_info()\n self.daemon = \"%s@%s:%s\" % (info[2], info[0], info[1])\n\n self._load_config()\n self._update_store(result)\n\n self.initialized = True\n\n self._load_extensions()\n\n log.info(\"%s initialized\", self.__class__.__name__)\n except:\n log.error(\"Error initializing %s\", self.__class__.__name__)\n raise\n\n twisted.internet.reactor.callLater(0, self._update_loop)\n\n\n def _load_extensions(self):\n\n log.info(\"Loading extensions...\")\n\n for ext in EXTENSIONS:\n try:\n log.debug(\"Initializing %s\", ext.__name__)\n instance = ext(self)\n self._extensions.append(instance)\n if __debug__: RT.register(instance, ext.__name__)\n log.info(\"%s initialized\", ext.__name__)\n except:\n log.exception(\"Error initializing %s\", ext.__name__)\n\n\n # Section: Deinitialization\n\n def disable(self):\n\n log.info(\"Deinitializing %s...\", self.__class__.__name__)\n\n labelplus.common.cancel_calls(self._calls)\n\n self._run_cleanup_funcs()\n self._unload_extensions()\n self._update_funcs = []\n\n self._close_config()\n self._destroy_store()\n\n self.initialized = False\n\n if __debug__: RT.report()\n\n log.info(\"%s deinitialized\", self.__class__.__name__)\n\n\n def _run_cleanup_funcs(self):\n\n while self._cleanup_funcs:\n func = self._cleanup_funcs.pop()\n try:\n func()\n except:\n log.exception(\"Failed to run %s()\", func.func_name)\n\n\n def _unload_extensions(self):\n\n log.info(\"Unloading extensions...\")\n\n while self._extensions:\n ext = self._extensions.pop()\n try:\n ext.unload()\n log.info(\"%s deinitialized\", ext.__class__.__name__)\n except:\n log.exception(\"Error deinitializing %s\", ext.__class__.__name__)\n\n\n def _destroy_store(self):\n\n if self.store:\n self.store.destroy()\n self.store = None\n\n\n # Section: Public\n\n def get_extension(self, name):\n\n for ext in self._extensions:\n if ext.__class__.__name__ == name:\n return ext\n\n return None\n\n\n def register_update_func(self, func):\n\n if func not in self._update_funcs:\n self._update_funcs.append(func)\n\n\n def deregister_update_func(self, func):\n\n if func in self._update_funcs:\n self._update_funcs.remove(func)\n\n\n def register_cleanup_func(self, func):\n\n if func not in self._cleanup_funcs:\n self._cleanup_funcs.append(func)\n\n\n def deregister_cleanup_func(self, func):\n\n if func in self._cleanup_funcs:\n self._cleanup_funcs.remove(func)\n\n\n # Section: Config\n\n def _load_config(self):\n\n config = deluge.configmanager.ConfigManager(GTKUI_CONFIG)\n\n # Workaround for 0.2.19.x that didn't use header\n if config.config.get(\"version\") == 2:\n labelplus.common.config.set_version(config, 2)\n\n labelplus.common.config.init_config(config,\n labelplus.gtkui.config.CONFIG_DEFAULTS,\n labelplus.gtkui.config.CONFIG_VERSION,\n labelplus.gtkui.config.convert.CONFIG_SPECS)\n\n self._update_daemon_config(config)\n self._normalize_config(config)\n\n self.config = config\n\n\n def _close_config(self):\n\n if self.config:\n if self.initialized:\n self.config.save()\n\n deluge.configmanager.close(GTKUI_CONFIG)\n\n\n def _update_daemon_config(self, config):\n\n saved_daemons = deluge.component.get(\"ConnectionManager\").config[\"hosts\"]\n if not saved_daemons:\n config[\"daemon\"] = {}\n else:\n daemons = [\"%s@%s:%s\" % (x[3], x[1], x[2]) for x in saved_daemons]\n\n # Remove daemons from config if not in ConnectionManager hosts\n for daemon in config[\"daemon\"].keys():\n if \"@localhost:\" in daemon or \"@127.0.0.1:\" in daemon:\n continue\n\n if daemon not in daemons and daemon != self.daemon:\n del config[\"daemon\"][daemon]\n\n if self.daemon not in config[\"daemon\"]:\n config[\"daemon\"][self.daemon] = copy.deepcopy(\n labelplus.gtkui.config.DAEMON_DEFAULTS)\n\n\n def _normalize_config(self, config):\n\n labelplus.common.normalize_dict(config.config,\n labelplus.gtkui.config.CONFIG_DEFAULTS)\n\n labelplus.common.normalize_dict(config[\"common\"],\n labelplus.gtkui.config.CONFIG_DEFAULTS[\"common\"])\n\n for daemon in config[\"daemon\"]:\n labelplus.common.normalize_dict(config[\"daemon\"][daemon],\n labelplus.gtkui.config.DAEMON_DEFAULTS)\n\n\n # Section: Update\n\n def _update_loop(self):\n\n def on_timeout():\n\n log.error(\"%s: %s\", STR_UPDATE, LabelPlusError(ERR_TIMED_OUT))\n\n if self.initialized:\n self._tries += 1\n if self._tries < MAX_TRIES:\n self._calls.append(twisted.internet.reactor.callLater(\n THROTTLED_INTERVAL, self._update_loop))\n else:\n log.error(\"%s: %s\", STR_UPDATE, LabelPlusError(ERR_MAX_RETRY))\n\n\n def process_result(result):\n\n if isinstance(result, Failure):\n if (isinstance(result.value, DelugeRPCError) and\n result.value.exception_type == \"LabelPlusError\"):\n log.error(\"%s: %s\", STR_UPDATE,\n LabelPlusError(result.value.exception_msg))\n interval = THROTTLED_INTERVAL\n else:\n return result\n else:\n self._tries = 0\n interval = UPDATE_INTERVAL\n self._update_store(result)\n\n if self.initialized:\n self._calls.append(twisted.internet.reactor.callLater(interval,\n self._update_loop))\n\n\n labelplus.common.clean_calls(self._calls)\n\n if self.initialized:\n pickled_time = cPickle.dumps(self.last_updated)\n deferred = client.labelplus.get_label_updates(pickled_time)\n labelplus.common.deferred_timeout(deferred, REQUEST_TIMEOUT, on_timeout,\n process_result, process_result)\n\n\n def _update_store(self, result):\n\n if not result:\n return\n\n update = cPickle.loads(result)\n\n log.debug(\"Update: Type: %s, Timestamp: %s\", update.type,\n update.timestamp)\n\n self.last_updated = update.timestamp\n self.store.update(update.data)\n\n for func in list(self._update_funcs):\n try:\n func(self.store)\n except:\n log.exception(\"Failed to run %s()\", func.func_name)\n","repo_name":"ratanakvlun/deluge-labelplus","sub_path":"labelplus/gtkui/gtkui.py","file_name":"gtkui.py","file_ext":"py","file_size_in_byte":8617,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"3"}
+{"seq_id":"72497906320","text":"import dask\nimport numpy\n\nfrom aydin.it.normalisers.base import NormaliserBase\nfrom aydin.util.log.log import lsection, lprint\n\n\nclass MinMaxNormaliser(NormaliserBase):\n \"\"\"Min-Max Normaliser\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Constructs a normalisers\"\"\"\n super().__init__(**kwargs)\n\n def calibrate(self, array):\n \"\"\"Method to calibrate\n\n Parameters\n ----------\n array : numpy.ArrayLike\n\n \"\"\"\n with lsection(\"Calibrating array using minmax method\"):\n self.original_dtype = array.dtype\n\n if hasattr(array, '__dask_keys__'):\n self.rmin = dask.array.min(array.flatten()).compute()\n self.rmax = dask.array.max(array.flatten()).compute()\n else:\n self.rmin = numpy.min(array)\n self.rmax = numpy.max(array)\n\n lprint(f\"Range for normalisation: [{self.rmin}, {self.rmax}]\")\n\n return self.rmin, self.rmax\n","repo_name":"royerlab/aydin","sub_path":"aydin/it/normalisers/minmax.py","file_name":"minmax.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":128,"dataset":"github-code","pt":"3"}
+{"seq_id":"21327504166","text":"import requests, time, re, html\nfrom tqdm import tqdm\n\nclass web_scraper:\n def __init__(self, initial_url):\n self.__base_dblp_url = initial_url\n self.__alt_urls = ['https://dblp.org','http://dblp.uni-trier.de', 'https://dblp2.uni-trier.de ', 'https://dblp.dagstuhl.de']\n self.__alt_urls.remove(initial_url)\n\n # Method to retrieve full title of conference series by unique id\n def _retrieve_conf_series_name(self, conf_id):\n url = f\"{self.__base_dblp_url}/db/conf/{conf_id}/index.html\"\n page = requests.get(url)\n if page.status_code == 200:\n re_string = '
([\\s\\S]*?)
'\n conf_series_name = re.search(re_string, page.text).group(1)\n\n # In event of a redirection, follow the link and retrieve from there\n redirect_texts = [\"Redirecting ...\", \"Redirect ...\", \"Redirection ...\"]\n if conf_series_name in redirect_texts:\n re_string=f'
\n\tregex_pb = re.compile('It i.+,$')\t\t\t# Exception 3 => start with
\n\twhile data:\n\t\tdata = data[:-1]\t\t\t\t# slicing the data\n\t\tl = len(data)\t\t\t\t\t# calculate the length of data\n\t\tif regex_h2_1.search(data) != None:\t\t# search for 'argument' in data\n\t\t\tfw.write(\"
%s
\\n\\n\" %data[:-1])\t# formatting with h2 tag\n\t\t\tdata = fr.readline()\t\t\t# flush the next line\n\t\telif regex_h2_2.search(data) != None:\t\t# search for 'roman number' in data\n\t\t\tfw.write(\"
%s
\\n\" %data[:-1])\t# formatting with h2 tag\n\t\telif regex_Tw.search(data) != None:\t\t# search for 'Twas right' or 'Twas night' in data\n\t\t\tfw.write(data + \"\\n\")\t\t\t# write on file\n\t\telif regex_pp.search(data) != None:\t\t# search for long string in data\n\t\t\tfw.write(\"
%s.
\\n\\n\" %data[:-1])\t# formatting with p tag\n\t\t\tdata = fr.readline()\t\t\t# flush the next line\n\t\telif regex_sp.search(data) != None:\t\t# handle exception\n\t\t\tfw.write(\"%s.
\\n\" %data[:-1])\t# add at the end of string\n\t\telif l == 0:\t\t\t\t\t# If there is no contents,\n\t\t\tfw.write(\" \\n\")\t\t\t# Just replace it with \n\t\telif regex_pb.search(data) != None:\t\t# handle exception\n\t\t\tfw.write(\"
%s \\n\" %data.lstrip())# add
at the start of string and at the end of string\n\t\telse:\n\t\t\tregex = re.compile('$')\t\t\t# If there is no matching,\n\t\t\tfw.write(regex.sub(\" \", data) + \"\\n\")# add at the end of string\n\t\tdata = fr.readline()\t\t\t\t# read next line\n\ndef footer(file):\t\t\t\t\t\t# Make HTML Footer\n\tfile.write(\" \\n\")\n\tfile.write(\"\\n\\n\")\n\nif __name__ == '__main__':\n\tfile_read = 'rime.txt'\t\t\t\t\t# input file\n\tfile_write = 'rime.html'\t\t\t\t# output file\n\twith open('convert/' + file_read, 'r') as fr:\t\t# open input file\n\t\twith open('convert/' + file_write, 'w') as fw:\t# oen output file\n\t\t\theader(fw, fr.readline()[:-1])\t\t# maek header and make title\n\t\t\tbody(fr, fw)\t\t\t\t# make body\n\t\t\tfooter(fw)\t\t\t\t# make footer\n","repo_name":"merrym4n/txt2html","sub_path":"rime.py","file_name":"rime.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"33898347611","text":"import numpy as np\nimport skimage\nimport skimage.color\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport string\nfrom model_experiment import model_experiment\n\nletters = list(string.ascii_uppercase)\nbaseline, image = model_experiment()\n\n\n# RGB\ndiff = skimage.color.rgb2hsv(image.img) - skimage.color.rgb2hsv(baseline.img)\ndiff = -diff # to comply with the darsia definition\n\n# Regularize\nsmooth = skimage.restoration.denoise_tv_bregman(\n diff, weight=0.025, eps=1e-4, max_num_iter=100, isotropic=True\n)\n\nsamples = [\n (slice(50, 150), slice(100, 200)),\n (slice(50, 150), slice(1600, 1700)),\n]\nconcentrations = np.array([1, 0.9])\n\n# visualise patches\nfig, ax = plt.subplots()\nax.imshow(smooth) # visualise abs colours, because relative cols are neg\nax.set_xlabel(\"horizontal pixel\")\nax.set_ylabel(\"vertical pixel\")\n\n# double check number of patches\nn = np.shape(samples)[0] # number of patches\nprint(\"number of support patches: \" + str(n))\n\n# init colour vector\ncolours = np.zeros((n, 3))\n# enumerate through all patches\nfor i, p in enumerate(samples):\n # visualise patches on image\n rect = patches.Rectangle(\n (p[1].start, p[0].start),\n p[1].stop - p[1].start,\n p[0].stop - p[0].start,\n linewidth=1,\n edgecolor=\"w\",\n facecolor=\"none\",\n )\n ax.text(p[1].start + 130, p[0].start + 100, letters[i], fontsize=15, color=\"white\")\n ax.add_patch(rect)\n\n # histo analysis\n patch = smooth[p]\n # patch = skimage.color.rgb2hsv(patch)\n vals = patch[:, :, 0]\n h_hist, bins = np.histogram(vals, bins=100, range=(-1, 1))\n plt.figure(\"h\" + letters[i])\n plt.stairs(h_hist, bins)\n\n\nfig, axes = plt.subplots(2, 1, sharex=True, sharey=True, figsize=(6, 2))\nfig.add_subplot(111, frameon=False)\nplt.tick_params(\n labelcolor=\"none\", which=\"both\", top=False, bottom=False, left=False, right=False\n)\nplt.ylabel(\"vertical pixel\")\nplt.xlabel(\"horizontal pixel\")\n\n# SIGNAL split\n# reduction blue: B\n# hsv = skimage.color.rgb2hsv(smooth)\nhsv = np.copy(smooth)\nscalar_blue = hsv[:, :, 2]\nmask_hue = np.logical_and(\n hsv[:, :, 0] > -0.5,\n hsv[:, :, 0] < -0.4,\n)\nscalar_blue[~mask_hue] = 0\n# ax1 = fig.add_subplot(211)\naxes[0].imshow(scalar_blue, vmin=0, vmax=1)\n\n# reduction green A\n# hsv = skimage.color.rgb2hsv(smooth)\nhsv = np.copy(smooth)\nscalar_green = hsv[:, :, 2]\nmask_hue = np.logical_and(\n hsv[:, :, 0] > -0.08,\n hsv[:, :, 0] < -0.04,\n)\nscalar_green[~mask_hue] = 0\n# ax2 = fig.add_subplot(212)\naxes[1].imshow(scalar_green, vmin=0, vmax=1)\n\n\nfig, axes = plt.subplots(2, 1, sharex=True, sharey=True, figsize=(6, 2))\nfig.add_subplot(111, frameon=False)\nplt.tick_params(\n labelcolor=\"none\", which=\"both\", top=False, bottom=False, left=False, right=False\n)\nplt.ylabel(\"vertical pixel\")\nplt.xlabel(\"horizontal pixel\")\n\naxes[0].imshow(scalar_blue + scalar_green, vmin=0, vmax=1)\n\n# scale and weight scalar signals\nweighted_signal = (\n scalar_blue / np.max(scalar_blue) * 0.9 + scalar_green / np.max(scalar_green) * 1\n)\naxes[1].imshow(weighted_signal, vmin=0, vmax=1)\n\nplt.figure()\nplt.imshow(scalar_blue + scalar_green, vmin=0, vmax=1)\nplt.xlabel(\"horizontal pixel\")\nplt.ylabel(\"vertical pixel\")\n\nplt.figure(\"cut ph val\")\nplt.plot(np.average(weighted_signal, axis=0))\nplt.xlabel(\"horizontal pixel\")\nplt.ylabel(\"signal value\")\n\n\n# plt.figure(\"cut ph val\")\n# plt.plot(np.average(weighted_signal, axis=0))\n# plt.xlabel(\"horizontal pixel\")\n# plt.ylabel(\"average concentration\")\n# plt.figure(\"cut ph val\")\n# plt.plot(np.average(scalar_blue + scalar_green, axis=0))\nplt.show()\n","repo_name":"moritzmarquardt/CA","sub_path":"plots_hsv_reduction.py","file_name":"plots_hsv_reduction.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"44329683409","text":"import checks\nimport errors\n\nclass Node():\n # Node types: normal, if, elif, else, def, for, while\n def __init__(self, code, node_type = 'normal', conditions_to_reach = [], node_id = 0):\n self.id = node_id\n self.code = code\n self.node_type = node_type\n self.sources = [] # Since that this is a graph (not a tree) so a node can have more than one source.\n self.conditions_to_reach = conditions_to_reach\n self.children = dict() # contains {Node: Edge}\n\n def __str__(self):\n return str(self.id) + \": \" + self.code\n # return str(self.id) + \": \" + self.code + \" | Type: \" + self.node_type + \" | CTR: \" + str(self.conditions_to_reach)\n\n def extract_condition(self, edge):\n \"\"\"\n Extracts the condition of the if, elif, for or while lines.\n \"\"\"\n x = self.conditions_to_reach.copy()\n if self.node_type in ['for', 'while', 'if', 'elif']:\n x.append(f'{str(edge)[0]} - {self.id}: ' + self.code[len(self.node_type)+1:-1])\n return x\n \n def findCommonConditions(self, condition1):\n new = []\n for i in range(min(len(condition1), len(self.conditions_to_reach))):\n if condition1[i] == self.conditions_to_reach[i]:\n new.append(condition1[i])\n else: break\n return new\n\n def addNodeChild(self, child_node, edge, called_from_add_child = False):\n \"\"\"\n Used to add a node as a child to another node instead of creating a new node.\n \"\"\"\n if child_node in self.children: return child_node\n self.children[child_node] = edge\n child_node.sources.append(self)\n # print(self.conditions_to_reach, child_node.conditions_to_reach)\n if len(self.conditions_to_reach) < len(child_node.conditions_to_reach) and not called_from_add_child:\n child_node.conditions_to_reach = self.conditions_to_reach.copy()\n # print(\"REDUCING\", child_node.code, child_node.node_type, self.node_type)\n if child_node.node_type in ['elif', 'else'] and self.node_type in ['if', 'elif']:\n child_node.conditions_to_reach.append(f'{str(edge)[0]}{self.id}: ' + self.code[len(self.node_type)+1:-1])\n elif not called_from_add_child:\n child_node.conditions_to_reach = self.findCommonConditions(child_node.conditions_to_reach)\n # print(\"REDUCING\", child_node.code)\n\n return child_node\n \n def addChild(self, child, node_type, edge, node_id):\n \"\"\"\n Creates a new node and adds it as a child for this node.\n \"\"\"\n child_node = Node(child, node_type, self.extract_condition(edge), node_id=node_id)\n return self.addNodeChild(child_node, edge, True)\n\nclass Edge:\n \"\"\"\n Represents the edge between 2 nodes in the graph.\n self.edge values: None, True, False\n If the edge is directed from a normal typed node, edge is None.\n If it is directed from if, elif, for or while, then it is either True or False.\n \"\"\"\n def __init__(self, edge, visited = 0):\n self.edge = edge\n self.visited = visited\n \n def __str__(self) -> str:\n return str(self.edge)\n \nclass Func:\n def __init__(self, scope, cfg):\n self.scope = scope\n self.cfg = cfg\n\nclass CFG():\n \"\"\"\n Represents a control flow graph.\n \"\"\"\n def __init__(self, code, indentation, parent = None, debug = False):\n self.parent = parent\n self.debug = debug\n self.root = Node('Start', node_id = 0)\n self.size = 1\n self.indentation = indentation # What indentation is used in the code\n self.constructed = False # indicates whether construct_graph was called on this graph or not\n self.code_lines = code.split('\\n')\n self.code_lines.append('End')\n self.defs = dict()\n self.child_graphs = dict() # contains graphs of all functions defined in the code.\n # if this graph contains a function, it may have another functions defined\n # in it, so they will also be child graphs for this function graph.\n\n def printCFG(self):\n if not self.constructed: return\n queue = []\n current = self.root\n visited_set_to = list(current.children.values())[0].visited\n queue.append(current)\n while len(queue) > 0:\n current = queue.pop(0)\n print(current, f\" | Parents: {[i.id for i in current.sources]}\")\n for i in current.children.keys():\n if current.children[i].visited == visited_set_to:\n current.children[i].visited += 1\n current.children[i].visited %= 2\n queue.append(i)\n\n def extractAllDefs(self):\n current_indent = 0 # How much is the previous line indented. (number of tabs)\n def_code = None # The code in the defined function\n def_name = None # The function's name\n in_def = [False, None] # If we are currently in a function and how much is it indented.\n to_remove = list() # We add lines in the function to this list to be removed from the code.\n\n for line in self.code_lines:\n if checks.checkUnwantedLine(line): continue\n\n line_indent = (len(line) - len(line.lstrip())) // self.indentation\n if line_indent < current_indent: # if we get out of def block\n new_cfg = CFG(def_code[:-1], self.indentation, self, debug=self.debug)\n self.child_graphs[def_name.split('(')[0]] = [new_cfg, def_name.split('(')[1][:-1], False] # Add function graph to the children of this graph.\n current_indent = line_indent\n in_def = [False, None]\n \n if line.strip()[:4] == 'def ' and not in_def[0]:\n current_indent = line_indent + 1\n in_def = [True, len(line) - len(line.lstrip())]\n def_name = line.strip()[4:-1]\n if checks.checkReservedKeyword(def_name.split('(')[0]):\n raise errors.InvalidUseOfReservedKeywordException(f\"Cannot use {def_name.split('(')[0]} as function name.\")\n def_code = ''\n elif not in_def[0]: continue\n else:\n def_code += line[in_def[1] + self.indentation:] + '\\n'\n to_remove.append(line)\n \n for i in to_remove:\n self.code_lines.remove(i)\n return len(self.child_graphs)\n\n def construct_graph(self):\n if self.debug: print(self.root.code, '\\t\\t', self.root.node_type)\n self.extractAllDefs()\n self.constructed = True\n if self.parent is not None: self.defs.update(self.parent.defs)\n current = self.root # Current node pointer\n indents = dict() # Dictionary of the lines that causes indentation\n the_if_list = [] # List of nodes containing if and elif statements\n current_indent = 0 # How much is the current block indented. (number of tabs)\n prev_line_indent = 0 # How much is the previous line indented. (number of tabs).\n last = dict() # contains nodes from if or elif blocks that are waiting for the elif or else\n # blocks to be closed so they can point to the following line\n added_indent = 0 # if 1 this indicates a new indentation has happened,\n # if -1 this indicates an indentation was closed\n returns = []\n\n for line in self.code_lines:\n if checks.checkUnwantedLine(line): continue\n\n common_child = None # If a node was created but is going to be added to the children list of\n # more than one node, we save it to not create it again\n\n # Determine the node type to insert this line into\n if line.strip()[:4] == 'for ':\n n_type = 'for'\n elif line.strip()[:4] == 'def ':\n n_type = 'def'\n def_indent = len(line) - len(line.lstrip())\n line = ' ' * def_indent + line.strip()[4:-1]\n elif line.strip()[:3] == 'if ':\n n_type = 'if'\n elif line.strip()[:5] == 'elif ':\n n_type = 'elif'\n elif line.strip()[:5] == 'else:':\n n_type = 'else'\n elif line.strip()[:6] == 'while ':\n n_type = 'while'\n else:\n n_type = 'normal'\n \n # If the user initialized a block with anything other than (if, elif, else, while, for, def)\n if line[-1] == ':' and n_type == 'normal':\n x = line.split(' ')[0]\n err = f'Invalid block intializer: \"{x}\".'\n for i in ['for', 'while', 'if', 'elif', 'else', 'def']:\n if line.startswith(i):\n err += '\\nDid you mean: \"' + i + ' ' + line[len(i):] + '\" ?'\n raise errors.InValidBlockException(err)\n \n # How much is the current line indented. (number of tabs)\n line_indent = (len(line) - len(line.lstrip())) // self.indentation\n return_continue_flag = False\n\n # if this line indentation is less than the previous line indentation\n # used when a block or more are closed to correctly put edges between nodes\n # READ this block after you read the rest of the code to UNDERSTAND IT.\n if line_indent < current_indent:\n if current.node_type in ['for', 'while', 'if', 'elif', 'else']:\n raise IndentationError(f'\"{line}\" is not properly indented after \"{current.code}\"')\n return_continue_flag = True\n different_indent = current_indent - line_indent # How many blocks were closed\n for i in range(different_indent): # For each block that was closed\n # indents[current_indent - i]: most recent block\n if indents[current_indent - i].node_type == 'else':\n the_if_list.append((indents.pop(current_indent - i), current_indent - i))\n elif indents[current_indent - i].node_type in ['for', 'while']:\n if current.code[:7] != 'return ' and current.code != 'return':\n current.addNodeChild(indents[current_indent - i], Edge(False))\n else: returns.append(current)\n # If there were \"if\" indentations that are closed in the same line with the while\n # or for loops, so they should point back to the loop statement.\n for j in range(len(the_if_list)):\n if the_if_list[j][0].node_type != 'else':\n the_if_list[j][0].addNodeChild(indents[current_indent - i], Edge(False))\n the_if_list = []\n current = indents.pop(current_indent - i)\n else:\n if len(the_if_list) != 0:\n # we don't want else statements in the_if_list because they always point to the next line\n if the_if_list[-1][0].node_type == 'else': the_if_list.pop(-1)\n the_if_list.append((indents.pop(current_indent - i), current_indent - i))\n if n_type == 'elif' or n_type == 'else':\n # if the line that gets us out of an if or elif blocks is an elif or an else, we don't want\n # the last line in the if or elif blocks to point at it, so we save it in last list.\n if last.get(line_indent) is None:\n last[line_indent] = []\n if current.code[:7] != 'return ' and current.code not in ['continue', 'break', 'return']:\n last[line_indent].append(current)\n x = line_indent + 1\n while last.get(x) is not None:\n for i in last.pop(x):\n last[line_indent].append(i)\n x += 1\n if len(the_if_list) > 1:\n for i in the_if_list[:-1]:\n last[line_indent].append(i[0])\n x = line_indent + 1\n while last.get(x) is not None:\n for j in last.pop(x):\n last[line_indent].append(j)\n x += 1\n the_if_list = the_if_list[-1:] \n elif len([i for i in last.keys() if i >= line_indent]) != 0:\n lasts = [last[i] for i in last.keys() if i >= line_indent]\n lasts2 = []\n for i in lasts:\n lasts2.extend(i)\n \n if the_if_list:\n if the_if_list[-1][0].node_type == 'else': the_if_list.pop(-1)\n\n if current.node_type in ['for', 'while']: common_child = current\n for i in lasts2:\n if common_child is None:\n common_child = i.addChild(line.strip(), n_type, Edge(None), node_id = self.size)\n self.size += 1\n else: i.addNodeChild(common_child, Edge(None))\n if current.node_type in ['for', 'while']: common_child = None\n \n added_indent = -1 # Since an indentation block was closed\n current_indent -= different_indent\n elif line_indent != prev_line_indent and current.node_type not in ['for', 'while', 'if', 'elif', 'else']:\n raise IndentationError(f'Line \"{line}\" not indented properly.')\n\n # Determine edge type\n if current.node_type in ['normal', 'def']:\n edge = Edge(None)\n elif current.node_type in ['for', 'while', 'if', 'elif']:\n # A node can have 2 children, one if the condition is true and other if false.\n # We traverse the true path first, so that's why when added_indent is 1, edge is true edge\n if added_indent > 0: edge = Edge(True)\n elif added_indent < 0: edge = Edge(False)\n\n for i in range(len(the_if_list)):\n if common_child is None:\n common_child = the_if_list[i][0].addChild(line.strip(), n_type, Edge(False), node_id = self.size)\n self.size += 1\n else: the_if_list[i][0].addNodeChild(common_child, Edge(False))\n the_if_list.clear()\n\n if self.debug: print(line, '\\t\\t', n_type)\n if (current.code[:7] == 'return ' or current.code in ['continue', 'break', 'return']) and return_continue_flag:\n current = common_child\n elif n_type in ['for', 'while', 'if', 'elif', 'else']:\n current_indent += 1 # We entered a new block\n added_indent = 1 # and therefore added indentation\n \n # We add the indentation of this block as a key to the indents dictionary, with a value of\n # node containing the line that caused the indentation (for, while, if or elif)\n if common_child is None:\n indents[current_indent] = current.addChild(line.strip(), n_type, edge, node_id = self.size)\n self.size += 1\n else:\n if last.get(line_indent) is not None:\n indents[current_indent] = common_child\n else: indents[current_indent] = current.addNodeChild(common_child, edge)\n current = indents[current_indent] # Let the current pointer point to the newly added node\n else:\n # Add this line as a child to the current node and set the pointer to point at the newly added node\n # If the node already exists, use it instead of making a new one.\n if common_child is None:\n current = current.addChild(line.strip(), n_type, edge, node_id = self.size)\n self.size += 1\n else:\n current = current.addNodeChild(common_child, edge)\n \n prev_line_indent = line_indent\n if current.node_type in ['elif', 'else'] and not [1 for i in current.sources if i.node_type in ['elif', 'if']]:\n raise errors.InValidBlockException(f'Cannot use {current.node_type} statement without an if statement.')\n for i in returns:\n i.addNodeChild(current, Edge(None))","repo_name":"mohamedmoataz-oacc/Code-Bugs-Scanner","sub_path":"cfg.py","file_name":"cfg.py","file_ext":"py","file_size_in_byte":16854,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"2379130792","text":"import theano\nimport theano.tensor as T\nimport numpy as np\nimport lasagne\n\n# compute vector average\nclass AverageWordLayer(lasagne.layers.MergeLayer):\n def __init__(self, incomings, **kwargs):\n super(AverageWordLayer, self).__init__(incomings, **kwargs)\n\n #embedding layer is batch_size x max_post_length x max_sentence_length x d\n #mask layer is batch_size x max_post_length x max_sentence_length \n def get_output_for(self, inputs, **kwargs):\n emb_sums = T.sum(inputs[0] * inputs[1][:, :, :, None], axis=2)\n\n mask_sums = T.sum(inputs[1], axis=2)\n\n #need to avoid dividing by zero\n mask_sums += T.eq(mask_sums, T.as_tensor_variable(0))\n \n return emb_sums / mask_sums[:,:,None]\n\n # output is batch_size x max_post_length x d\n def get_output_shape_for(self, input_shapes):\n \n return (None,input_shapes[0][1],input_shapes[0][-1])\n\nclass AverageSentenceLayer(lasagne.layers.MergeLayer):\n def __init__(self, incomings, **kwargs):\n super(AverageSentenceLayer, self).__init__(incomings, **kwargs)\n\n #sentence layer is batch_size x max_post_length x d\n #mask layer is batch_size x max_post_length \n def get_output_for(self, inputs, **kwargs):\n emb_sums = T.sum(inputs[0] * inputs[1][:, :, None], axis=1)\n mask_sums = T.sum(inputs[1], axis=1)\n\n return emb_sums / mask_sums[:,None]\n\n # output is batch_size x d\n def get_output_shape_for(self, input_shapes):\n \n return (None,input_shapes[0][-1])\n\nclass AttentionWordLayer(lasagne.layers.MergeLayer):\n #uses either a fixed \"query\" for the important words or another layer\n #this returns weights that can be used in the averaging layer in place of the mask\n def __init__(self, incomings, d, W_w=lasagne.init.Normal(),\n u_w=lasagne.init.Normal(), b_w=lasagne.init.Normal(),\n custom_query=None, normalized=True, **kwargs):\n super(AttentionWordLayer, self).__init__(incomings, **kwargs)\n self.W_w = self.add_param(W_w, (incomings[0].output_shape[-1],d))\n self.b_w = self.add_param(b_w, (d,))\n self.normalized = normalized\n\n self.fixed_query = True\n if custom_query is not None:\n self.fixed_query = False\n self.u_w = lasagne.layers.get_output(custom_query)\n else:\n self.u_w = self.add_param(u_w, (d,)) \n \n def get_output_for(self, inputs, **kwargs):\n #u = T.sum(inputs[0], axis=-1)\n if self.fixed_query:\n u = T.dot(T.tanh(T.dot(inputs[0], self.W_w) + self.b_w), self.u_w)\n else:\n u = T.batched_dot(T.tanh(T.dot(inputs[0], self.W_w) + self.b_w), self.u_w)\n \n # set masked positions to large negative value\n u = u*inputs[1] - (1-inputs[1])*10000\n \n #now batch_size x post_length x sentence_length x 1 but need to normalize via softmax\n #over 2nd axis, and also multiply by the sentence mask\n\n # normalize over sentence_length (->large negative values = 0)\n if not self.normalized:\n return T.reshape(u, (inputs[0].shape[0], inputs[0].shape[1], inputs[0].shape[2]))\n u = T.reshape(u, (inputs[0].shape[0]*inputs[0].shape[1], inputs[0].shape[2]))\n alpha = T.nnet.softmax(u)\n alpha = T.reshape(alpha, (inputs[0].shape[0], inputs[0].shape[1], inputs[0].shape[2]))\n\n #now return the weighted sum\n #return T.sum(inputs[0] * alpha[:,:,:, None], axis=2)\n\n return alpha\n \n def get_output_shape_for(self, input_shapes):\n \n #return (None,input_shapes[0][1],input_shapes[0][-1])\n return (None,input_shapes[0][1],input_shapes[0][2])\n\nclass AttentionSentenceLayer(lasagne.layers.MergeLayer):\n #uses either a fixed \"query\" for the important words or another layer\n #this returns weights that can be used in the averaging layer in place of the mask\n def __init__(self, incomings, d, W_s=lasagne.init.Normal(),\n u_s=lasagne.init.Normal(), b_s=lasagne.init.Normal(),\n custom_query=None, nonlinearity=T.tanh,\n hidden_layers=1, **kwargs):\n super(AttentionSentenceLayer, self).__init__(incomings, **kwargs)\n self.W_s = [self.add_param(W_s, (incomings[0].output_shape[-1], d)) for i in range(hidden_layers)]\n self.b_s = [self.add_param(b_s, (d,)) for i in range(hidden_layers)]\n \n self.fixed_query = True\n if custom_query is not None:\n self.fixed_query = False\n self.u_s = lasagne.layers.get_output(custom_query)\n else:\n self.u_s = self.add_param(u_s, (d,)) \n self.nonlinearity = nonlinearity\n self.hidden_layers = hidden_layers\n \n def get_output_for(self, inputs, **kwargs):\n #u = T.sum(inputs[0], axis=-1)\n tmp = inputs[0]\n for i in range(self.hidden_layers):\n tmp = self.nonlinearity(T.dot(tmp, self.W_s[i]) + self.b_s[i][None, None, :])\n \n if self.fixed_query:\n u = T.dot(tmp, self.u_s) \n else:\n u = T.batched_dot(tmp, self.u_s)\n \n # set masked positions to large negative value\n if len(inputs) > 1:\n u = u*inputs[1] - (1-inputs[1])*10000\n \n #now batch_size x post_length x 1 but need to normalize via softmax\n\n # normalize over post_length (->large negative values = 0)\n u = T.reshape(u, (inputs[0].shape[0], inputs[0].shape[1]))\n alpha = T.nnet.softmax(u)\n\n #now return the weights\n\n return alpha\n \n def get_output_shape_for(self, input_shapes):\n \n #return (None,input_shapes[0][1],input_shapes[0][-1])\n return (None,input_shapes[0][-1])\n \nclass WeightedAverageWordLayer(lasagne.layers.MergeLayer):\n def __init__(self, incomings, **kwargs):\n super(WeightedAverageWordLayer, self).__init__(incomings, **kwargs)\n\n def get_output_for(self, inputs, **kwargs):\n return T.sum(inputs[0] * inputs[1][:,:,:,None], axis=2)\n\n def get_output_shape_for(self, input_shapes):\n return (None, input_shapes[0][1], input_shapes[0][-1])\n\nclass WeightedAverageSentenceLayer(lasagne.layers.MergeLayer):\n def __init__(self, incomings, **kwargs):\n super(WeightedAverageSentenceLayer, self).__init__(incomings, **kwargs)\n \n def get_output_for(self, inputs, **kwargs):\n return T.sum(inputs[0] * inputs[1][:,:,None], axis=1)\n\n def get_output_shape_for(self, input_shapes):\n return (None, input_shapes[0][-1])\n \nclass HighwayLayer(lasagne.layers.Layer):\n def __init__(self, incoming, num_units, W_h=lasagne.init.GlorotUniform(),\n b_h=lasagne.init.Constant(0.), W_t=lasagne.init.GlorotUniform(),\n b_t=lasagne.init.Constant(-2.),\n nonlinearity=lasagne.nonlinearities.rectify,\n num_leading_axes=1, **kwargs):\n \n super(HighwayLayer, self).__init__(incoming, **kwargs)\n self.nonlinearity = (nonlinearities.identity if nonlinearity is None else nonlinearity)\n\n self.num_units = num_units\n\n if num_leading_axes >= len(self.input_shape):\n raise ValueError(\n \"Got num_leading_axes=%d for a %d-dimensional input, \"\n \"leaving no trailing axes for the dot product.\" %\n (num_leading_axes, len(self.input_shape)))\n elif num_leading_axes < -len(self.input_shape):\n raise ValueError(\n \"Got num_leading_axes=%d for a %d-dimensional input, \"\n \"requesting more trailing axes than there are input \"\n \"dimensions.\" % (num_leading_axes, len(self.input_shape)))\n self.num_leading_axes = num_leading_axes\n\n if any(s is None for s in self.input_shape[num_leading_axes:]):\n raise ValueError(\n \"A DenseLayer requires a fixed input shape (except for \"\n \"the leading axes). Got %r for num_leading_axes=%d.\" %\n (self.input_shape, self.num_leading_axes))\n num_inputs = int(np.prod(self.input_shape[num_leading_axes:]))\n\n assert(num_inputs == num_units)\n \n self.W_h = self.add_param(W_h, (num_inputs, num_units), name=\"W_h\")\n if b_h is None:\n self.b_h = None\n else:\n self.b_h = self.add_param(b_h, (num_units,), name=\"b_h\",\n regularizable=False)\n\n self.W_t = self.add_param(W_t, (num_inputs, num_units), name=\"W_t\")\n if b_t is None:\n self.b_t = None\n else:\n self.b_t = self.add_param(b_t, (num_units,), name=\"b_t\",\n regularizable=False)\n \n def get_output_shape_for(self, input_shape):\n return input_shape[:self.num_leading_axes] + (self.num_units,)\n\n def get_output_for(self, input, **kwargs):\n num_leading_axes = self.num_leading_axes\n if num_leading_axes < 0:\n num_leading_axes += input.ndim\n if input.ndim > num_leading_axes + 1:\n # flatten trailing axes (into (n+1)-tensor for num_leading_axes=n)\n input = input.flatten(num_leading_axes + 1)\n\n t = lasagne.nonlinearities.sigmoid(T.dot(input, self.W_t) + self.b_t)\n g = self.nonlinearity(T.dot(input, self.W_h) + self.b_h)\n\n return T.mul(t,g) + T.mul(1-t, input)\n\nclass MemoryLayer(lasagne.layers.MergeLayer):\n def __init__(self, incomings, W_r=lasagne.init.GlorotUniform(),\n hops=3, q=lasagne.init.Normal(), query=None, **kwargs):\n \n super(MemoryLayer, self).__init__(incomings, **kwargs)\n\n d = incomings[0].output_shape[-1]\n self.W_r = self.add_param(W_r, (d, d), name=\"W_r\")\n self.hops = hops\n self.d = d\n \n self.fixed_query = True\n if query is not None:\n self.fixed_query = False\n self.q = lasagne.layers.get_output(query)\n else:\n self.q = self.add_param(q, (d,)) \n\n def get_output_shape_for(self, input_shape):\n #B x D\n return (None, self.d)\n \n def get_output_for(self, inputs, **kwargs):\n q = self.q\n for i in range(self.hops):\n if self.fixed_query and not i:\n u = T.dot(inputs[0], q) \n else:\n u = T.batched_dot(inputs[0], q)\n\n # set masked positions to large negative value\n if len(inputs) > 1:\n u = u*inputs[1] - (1-inputs[1])*10000\n\n #now batch_size x post_length x 1 but need to normalize via softmax\n\n # normalize over post_length (->large negative values = 0)\n u = T.reshape(u, (inputs[0].shape[0], inputs[0].shape[1]))\n alpha = T.nnet.softmax(u)\n\n #now B x S\n o = T.dot(T.sum(inputs[0] * alpha[:,:,None], axis=1), self.W_r)\n if self.fixed_query:\n q = q + o\n else:\n q = q + o\n\n return q\n\n\nclass MyConcatLayer(lasagne.layers.MergeLayer):\n '''\n for concatenating a MxN tensor and an MxNxO tensor\n '''\n def __init__(self, incomings, **kwargs):\n super(MyConcatLayer, self).__init__(incomings, **kwargs) # MergeLayer constructor requires list of incoming layers\n \n def get_output_shape_for(self, input_shapes):\n lstm_shape, other_shape = input_shapes\n return (lstm_shape[0], lstm_shape[1], lstm_shape[2] + other_shape[-1])\n \n def get_output_for(self, inputs, **kwargs):\n lstm_input, other_input = inputs\n other_input = T.repeat(other_input.dimshuffle(0, 'x', 1), lstm_input.shape[1], axis=1) # repeat along time dimension\n return T.concatenate((lstm_input, other_input), axis=-1)\n\n","repo_name":"chridey/cmv","sub_path":"cmv/rnn/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":11854,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"}
+{"seq_id":"38694512864","text":"from django.shortcuts import render\nfrom twitteruser.models import TwitterUser\nfrom tweet.models import Tweet\n\n# Create your views here.\n \ndef user_details(request, user_id):\n user = TwitterUser.objects.filter(id=user_id).first()\n return render(request, \"user.html\", {\"user\": user})\n \n \ndef following(request, follow_id):\n logged_in_user = request.user\n followed = TwitterUser.objects.filter(id=follow_id).first()\n logged_in_user.following.add(followed)\n return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))\n\ndef unfollowing(request, unfollow_id):\n logged_in_user = request.user\n followed = TwitterUser.objects.filter(id=unfollow_id).first()\n logged_in_user.following.remove(followed)\n return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))","repo_name":"Paulracisz/twitterclone","sub_path":"twitteruser/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"2485641274","text":"from django import forms\nfrom django.contrib.auth.forms import UserCreationForm, UserChangeForm\n\nfrom .models import CustomUser\n\n\nclass CustomUserCreationForm(UserCreationForm):\n class Meta:\n model = CustomUser\n fields = (\"username\", \"email\")\n\n def __init__(self, *args, **kwargs):\n super(CustomUserCreationForm, self).__init__(*args, **kwargs)\n\n self.fields[\"username\"].widget.attrs[\"placeholder\"] = \"Username\"\n self.fields[\"username\"].widget.attrs[\"style\"] = \"text-align:center;\"\n\n self.fields[\"email\"].widget.attrs[\"placeholder\"] = \"Email\"\n self.fields[\"email\"].widget.attrs[\"style\"] = \"text-align:center;\"\n\n self.fields[\"password1\"].widget.attrs[\"placeholder\"] = \"Password\"\n self.fields[\"password1\"].widget.attrs[\"style\"] = \"text-align:center;\"\n\n self.fields[\"password2\"].widget.attrs[\"placeholder\"] = \"Confirm Password\"\n self.fields[\"password2\"].widget.attrs[\"style\"] = \"text-align:center;\"\n\n\nclass CustomUserChangeForm(UserChangeForm):\n class Meta:\n model = CustomUser\n fields = (\"username\", \"email\")\n\n\nCLEANING_FREQUENCIES = [\n (\"one-off\", \"One-off\"),\n (\"weekly\", \"Weekly\"),\n (\"fortnightly\", \"Fortnightly\"),\n (\"monthly\", \"Monthly\"),\n]\nCLEANING_TYPE = [\n (\"house\", \"House\"),\n (\"office\", \"Office\"),\n (\"presale\", \"Pre-sale clean\"),\n (\"spring\", \"Spring Clean\"),\n (\"bond/exit\", \"Bond or Exit Clean\"),\n (\"mum\", \"Cleaning for First-time Mums\"),\n (\"oven\", \"Oven Clean\"),\n (\"declutter\", \"Declutter and Organise\"),\n (\"airbnb\", \"Air BnB Clean\"),\n (\"outside\", \"Outdoor Patio Clean\"),\n (\"other\", \"Other (specify below)\"),\n]\nHOME_SIZE = [\n (\"n/a\", \"N/A\"),\n (\"1-2 bed\", \"1-2 bedrooms\"),\n (\"3-5 bed\", \"3-5 bedrooms\"),\n (\">5 bed\", \"More than 5 bedrooms\"),\n]\nOFFICE_SIZE = [\n (\"n/a\", \"N/A\"),\n (\"5-10 people\", \"5-10 people\"),\n (\"11-15 people\", \"11-15 people\"),\n (\">15 people\", \"More than 15 people\"),\n]\nOTHER_SERVICES = [\n (\"yoga\", \"Private Yoga\"),\n (\"pet sit\", \"Pet Sitting\"),\n (\"meal prep\", \"Meal Preparation\"),\n (\"other\", \"Other (specify below)\"),\n]\n\n\nclass ContactForm(forms.Form):\n first_name = forms.CharField(max_length=100)\n last_name = forms.CharField(max_length=100)\n email = forms.EmailField()\n mobile = forms.CharField(max_length=100)\n street_address = forms.CharField(\n label=\"Street Address of the property\",\n max_length=250,\n )\n post_code = forms.CharField(max_length=10)\n clean_type = forms.ChoiceField(\n label=\"What type of clean are you after?\",\n choices=CLEANING_TYPE,\n widget=forms.Select(attrs={\"class\": \"form-control\"}),\n required=False,\n )\n clean_type = forms.MultipleChoiceField(\n choices=CLEANING_TYPE,\n label=\"What type of clean are you after?\",\n required=False,\n widget=forms.CheckboxSelectMultiple,\n )\n clean_frequency = forms.MultipleChoiceField(\n label=\"How often should we come?\",\n choices=CLEANING_FREQUENCIES,\n widget=forms.CheckboxSelectMultiple,\n required=False,\n )\n home_size = forms.ChoiceField(\n required=False,\n label=\"How many bedrooms in the home?\",\n widget=forms.Select(attrs={\"class\": \"form-control\"}),\n choices=HOME_SIZE,\n )\n office_size = forms.ChoiceField(\n required=False,\n label=\"How many people use the office?\",\n widget=forms.Select(attrs={\"class\": \"form-control\"}),\n choices=OFFICE_SIZE,\n )\n other_services = forms.MultipleChoiceField(\n label=\"Are you interested in any of our other services?\",\n choices=OTHER_SERVICES,\n widget=forms.CheckboxSelectMultiple,\n required=False,\n )\n\n additonal_information = forms.CharField(widget=forms.Textarea, required=False)\n\n\nFEEDBACK_CATEGORIES = [\n (\"cleaning\", \"Cleaning\"),\n (\"business_coaching\", \"Business Coaching\"),\n (\"website\", \"Website\"),\n (\"other\", \"Other\"),\n]\nSATISFACTION_RATINGS = [\n (\"\",\"\"),\n (\"very satisfied\", \"Very satisfied\"),\n (\"satisfied\", \"Satisfied\"),\n (\"neutral\", \"Neutral\"),\n (\"dissatisfied\", \"Dissatisfied\"),\n (\"very dissatisfied\", \"Very dissatisfied\"),\n]\nCLEANLINESS_RATINGS = [\n (\"\",\"\"),\n (\"excellent\", \"Excellent\"),\n (\"good\", \"Good\"),\n (\"average\", \"Average\"),\n (\"poor\", \"Poor\"),\n (\"very poor\", \"Very poor\"),\n]\nINSTRUCTIONS_FOLLOWED = [\n (\"\",\"\"),\n (\"yes\", \"Yes, completely\"),\n (\"partially\", \"Yes, but partially\"),\n (\"no\", \"No, not at all\"),\n]\nAREAS = [\n (\"\",\"\"),\n (\"dusting/vaccumming\" ,\"Dusting and vacuuming\"),\n (\"kitchen/bedroom\",\"Kitchen & Bathrooms\"),\n (\"extra/rotational\",\"Extras & rotational items\"),\n ('helpful/friendly',\"Helpful & friendly manner\"),\n ]\nRECCOMMENDATION_CHOICES = [\n (\"\",\"\"),\n (\"yes\",\"Yes, definitley\"),\n (\"maybe\",\"Yes, maybe\"),\n (\"probably not\",\"No, probably not\"),\n (\"no\",\"No, defnitley not\"),\n ]\n\n\n\nclass FeedbackForm(forms.Form):\n name = forms.CharField(max_length=100)\n email = forms.EmailField(required=False, help_text=\"(Optional)\")\n feedback_category = forms.ChoiceField(\n label=\"What is your feedback about?\",\n choices=FEEDBACK_CATEGORIES,\n widget=forms.Select(attrs={\"class\": \"form-control\"}),\n )\n feedback = forms.CharField(widget=forms.Textarea, required=False)\n satisfaction_rating = forms.ChoiceField(\n label=\"How satisfied were you with the overall cleaning service provided?\",\n choices=SATISFACTION_RATINGS,\n required=False,\n )\n cleanliness_rating = forms.ChoiceField(\n label=\"How would you rate the cleanliness of your space after the cleaning?\",\n choices=CLEANLINESS_RATINGS,\n required=False,\n )\n instructions_followed = forms.ChoiceField(\n label=\"Did the cleaner follow any specific instructions or requests you had given?\",\n choices=INSTRUCTIONS_FOLLOWED,\n required=False,\n )\n areas_for_improvement = forms.ChoiceField(\n label=\"Areas for improvement\",\n choices=AREAS,\n required=False,\n )\n areas_of_satisfaction = forms.ChoiceField(\n label=\"Areas of satisfaction\",\n choices=AREAS,\n required=False,\n )\n would_reccommend = forms.ChoiceField(\n label=\"Would you recommend our cleaning services to others?\",\n choices=RECCOMMENDATION_CHOICES,\n required=False,\n )\n additional_information = forms.CharField(\n label=\"Is there anything else you would like to share about your experience with our cleaning service?\",\n widget=forms.Textarea,\n required=False,\n )\n contact_information = forms.CharField(\n label=\"Please provide your contact information if you would like us to follow up with you regarding your feedback\",\n widget=forms.Textarea,\n required=False,\n help_text=\"Thank you for taking the time to complete this feedback form. We appreciate your input and look forward to serving you again in the future.\"\n )","repo_name":"phyxphysio/holistic-hincher","sub_path":"holistic_hincher/free_pages/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":7064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"37204731519","text":"\"\"\"\r\n## Function written to match MATLAB function graddetmf_ext()\r\n\r\n\r\n## Author: Caiya Zhang, Yuchen Zheng\r\n\"\"\"\r\n\r\n\r\nimport numpy as np\r\nfrom project.cell import cell\r\nfrom project.size import size\r\nfrom project.zeros import zeros\r\nfrom project.mftot import mftot\r\nfrom matpy.matrix import Matrix\r\nfrom project.get_fim_size import get_fim_size\r\nfrom project.update_designinlist import update_designinlist\r\n\r\n\r\ndef graddetmf_ext(model_switch,aX,groupsize,ni,xt,x,a,bpop,d,sigma,docc,poped_db,lndet=False,gradxt=False):\r\n \r\n n = get_fim_size(poped_db)\r\n m = size(ni)[0]\r\n if gradxt is False:\r\n gdmf = Matrix(1, (m, size(a)[1]))\r\n G_X = poped_db[\"design_space\"][\"G_a\"]\r\n else:\r\n gdmf = Matrix(1, (m, size(xt)[1]))\r\n G_X = poped_db[\"design_space\"][\"G_xt\"]\r\n \r\n \r\n iParallelN = (poped_db[\"settings\"][\"parallel\"][\"bParallelSG\"] == 1) + 1 #1 if no parallel, 2 if parallel\r\n \r\n if iParallelN == 2:\r\n designsin = cell(1,0)\r\n it = 1\r\n \r\n for p in range(0, iParallelN):\r\n if p == 2:\r\n #Execute parallel designs\r\n designout = designsin\r\n raise Exception(\"Parallel execution not yet implemented in PopED for R\")\r\n #designout = execute_parallel(designsin,poped_db)\r\n if iParallelN == 1:\r\n returnArgs = mftot(model_switch,groupsize,ni,xt,x,a,bpop,d,sigma,docc,poped_db) \r\n mft = returnArgs[0]\r\n poped_db = returnArgs[1]\r\n else:\r\n if p == 1:\r\n designsin = update_designinlist(designsin,groupsize,ni,xt,x,a,-1,0)\r\n else:\r\n designout = designsin\r\n mft = designout[it][\"FIM\"]\r\n it = it + 1\r\n\r\n\r\n if iParallelN == 1 or p == 2:\r\n #If we have a prior\r\n if all(size(poped_db[\"settings\"][\"prior_fim\"]) == size(mft)):\r\n mft = mft + poped_db[\"settings\"][\"prior_fim\"]\r\n \r\n imft = np.linalg.inv(mft)\r\n if imft[1,1] is np.inf:\r\n imft = zeros(size(mft))\r\n \r\n \r\n a0 = a\r\n xt0 = xt\r\n for k in range(0, max(max(max(G_X)), 0)):\r\n inters = (G_X == k)\r\n if sum(sum(inters)) !=0 : #If we have a covariate or time-point defined here (accord. to G_X)\r\n if gradxt is False:\r\n a = a0 + poped_db[\"settings\"][\"hgd\"]*inters\r\n else:\r\n xt = xt0 + poped_db[\"settings\"][\"hgd\"]*inters\r\n \r\n if iParallelN == 1:\r\n returnArgs = mftot(model_switch,groupsize,ni,xt,x,a,bpop,d,sigma,docc,poped_db) \r\n mf_plus = returnArgs[0]\r\n poped_db = returnArgs[1]\r\n else:\r\n if p == 1:\r\n designsin = update_designinlist(designsin,groupsize,ni,xt,x,a,-1,0)\r\n else:\r\n mf_plus = designout[it][\"FIM\"]\r\n it = it + 1\r\n \r\n if iParallelN == 1 or p == 2:\r\n #If we have a prior\r\n if all(size(poped_db[\"settings\"][\"prior_fim\"]) == size(mft)):\r\n mf_plus = mf_plus + poped_db[\"settings\"][\"prior_fim\"]\r\n \r\n ir = (mf_plus-mft)/poped_db[\"settings\"][\"hgd\"]\r\n \r\n s = 0 #Calc the tr(A^-1 * dA/dX) for some X\r\n for ct2 in range(0, n):\r\n s = s + np.matmul(imft[ct2,:], ir[:,ct2])\r\n \r\n \r\n if s == 0: #The model doesn't depend on a or xt, e$g. PD is only dependent on time and not dose, fix the a-gradient to a small value or PD with Placebo dose, fix the xt-gradient to a small value\r\n s = 1e-12\r\n \r\n gdmf[inters==1 and aX!=0] = s\r\n # for(i in 1:size(a,1)){\r\n # for(j in 1:size(a,2)){\r\n # if((inters[i,j]==1 && aX[i,j]!=0)){\r\n # gdmf[i,j]=s\r\n # }\r\n # }\r\n # }\r\n \r\n \r\n if lndet is False:\r\n ret = gdmf*np.det(mft)\r\n else:\r\n ret = gdmf\r\n \r\n return {\"ret\": ret, \"poped_db\": poped_db}\r\n\r\n\r\n\r\n","repo_name":"felixzheng02/pypkpd","sub_path":"project/graddetmf_ext.py","file_name":"graddetmf_ext.py","file_ext":"py","file_size_in_byte":4514,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"}
+{"seq_id":"15933134262","text":"import os\nimport openai\nimport functools\nopenai.api_key = os.getenv(\"OPENAI_API_KEY\")\n\n\ndef display(func):\n @functools.wraps(func)\n def wrapper_display_stats(*args, **kwargs):\n response = func(*args, **kwargs)\n print(f\"Time Taken: {response.response_ms / 1000.0}s\")\n print(f\"Token Usage: {response.usage['total_tokens']} total, {response.usage['completion_tokens']} completion, {response.usage['prompt_tokens']} prompt\")\n print(f\"Response:\\n{response.choices[0].message['content']}\")\n return response.choices[0].message[\"content\"]\n return wrapper_display_stats\n\n\n@display\ndef get_completion_from_prompt(prompt, model=\"gpt-3.5-turbo\", temperature=0, presence_penalty=0):\n messages = [{\"role\": \"user\", \"content\": prompt}]\n response = openai.ChatCompletion.create(\n model=model,\n messages=messages,\n temperature=temperature,\n presence_penalty=presence_penalty,\n )\n return response\n\n\n@display\ndef get_completion_from_messages(messages, model=\"gpt-3.5-turbo\", temperature=0):\n response = openai.ChatCompletion.create(\n model=model,\n messages=messages,\n temperature=temperature,\n )\n return response\n","repo_name":"Daan4/ChatGPT-experiments","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"43972190553","text":"\n\ndef crawlnode(progmap, p, visited):\n #print('called for '+str(p))\n progs = progmap[p]\n for pr in progs:\n crawl = False\n if pr not in visited:\n crawl = True\n visited.add(pr)\n if crawl:\n crawlnode(progmap, pr, visited)\n\ndef run(lines):\n MAGIC_PROGRAM = 0\n progmap = {}\n for line in lines:\n items = line.split()\n cncprogs = []\n for prog in items[2:]:\n cncprogs.append(int(prog.strip(',')))\n progmap[int(items[0])] = cncprogs\n\n visited = set()\n crawlnode(progmap, MAGIC_PROGRAM, visited)\n return len(visited)\n\ndef run2(lines):\n progmap = {}\n for line in lines:\n items = line.split()\n cncprogs = []\n for prog in items[2:]:\n cncprogs.append(int(prog.strip(',')))\n progmap[int(items[0])] = cncprogs\n\n allsets = []\n for p in progmap.keys():\n visited = set()\n s = crawlnode(progmap, p, visited)\n if visited not in allsets:\n allsets.append(visited)\n #print allsets\n return len(allsets)\n\nif __name__ == '__main__':\n print('--- Advent of Code 2017: Day 12 ---')\n fname = 'input1.txt'\n import sys\n if len(sys.argv) == 2:\n fname = sys.argv[1]\n f = open(fname)\n lines = f.readlines()\n f.close()\n ans = run(lines)\n print('(part 1) ans: '+str(ans))\n ans2 = run2(lines)\n print('(part 2) ans: '+str(ans2))\n","repo_name":"seefdogg/adventOfCode2017","sub_path":"day12/day12.py","file_name":"day12.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"73197908241","text":"import webob.exc\n\nfrom neutron.tests.unit.plugins.ml2 import test_plugin\nfrom neutron.tests.unit import testlib_api\n\n# BGPVPN Table metadata should be imported before\n# sqlalchemy metadata.create_all call else tables\n# will not be created.\nfrom networking_bgpvpn.neutron.db import bgpvpn_db # noqa\nfrom networking_bgpvpn.tests.unit.services import test_plugin as bgpvpn_plugin\n\nfrom networking_odl.common import constants as odl_const\nfrom networking_odl.tests.functional import base\n\n\nclass _TestBGPVPNBase(base.OdlTestsBase):\n rds = ['100:1']\n\n def setUp(self, plugin=None, service_plugins=None,\n ext_mgr=None):\n provider = {\n 'service_type': 'BGPVPN',\n 'name': 'OpenDaylight',\n 'driver': 'networking_odl.bgpvpn.odl_v2.OpenDaylightBgpvpnDriver',\n 'default': True\n }\n self.service_providers.return_value = [provider]\n self.plugin_arg = plugin\n self.service_plugin_arg = service_plugins\n self.ext_mgr_arg = ext_mgr\n super(_TestBGPVPNBase, self).setUp()\n\n def get_ext_managers(self):\n return self.ext_mgr_arg\n\n def get_plugins(self):\n return self.plugin_arg\n\n def get_additional_service_plugins(self):\n return self.service_plugin_arg\n\n def _assert_networks_associated(self, net_ids, bgpvpn):\n response = self.get_odl_resource(odl_const.ODL_BGPVPN, bgpvpn)\n self.assertItemsEqual(net_ids,\n response[odl_const.ODL_BGPVPN]['networks'])\n\n def _assert_routers_associated(self, router_ids, bgpvpn):\n response = self.get_odl_resource(odl_const.ODL_BGPVPN, bgpvpn)\n self.assertItemsEqual(router_ids,\n response[odl_const.ODL_BGPVPN]['routers'])\n\n def test_bgpvpn_create(self):\n with self.bgpvpn() as bgpvpn:\n self.assert_resource_created(odl_const.ODL_BGPVPN, bgpvpn)\n\n def test_bgpvpn_create_with_rds(self):\n with self.bgpvpn(route_distinguishers=self.rds) as bgpvpn:\n response = self.get_odl_resource(odl_const.ODL_BGPVPN, bgpvpn)\n self.assertItemsEqual(self.rds,\n response[odl_const.ODL_BGPVPN]\n ['route_distinguishers'])\n\n def test_bgpvpn_delete(self):\n with self.bgpvpn(do_delete=False) as bgpvpn:\n self._delete('bgpvpn/bgpvpns', bgpvpn['bgpvpn']['id'])\n self.assertIsNone(\n self.get_odl_resource(odl_const.ODL_BGPVPN, bgpvpn))\n\n def test_associate_dissociate_net(self):\n with (self.network()) as net1, (\n self.bgpvpn(route_distinguishers=self.rds)) as bgpvpn:\n net_id = net1['network']['id']\n bgpvpn_id = bgpvpn['bgpvpn']['id']\n with self.assoc_net(bgpvpn_id, net_id):\n self._assert_networks_associated([net_id], bgpvpn)\n self._assert_networks_associated([], bgpvpn)\n\n def test_associate_multiple_networks(self):\n with (self.network()) as net1, (self.network()) as net2, (\n self.bgpvpn(route_distinguishers=self.rds)) as bgpvpn:\n net_id1 = net1['network']['id']\n net_id2 = net2['network']['id']\n bgpvpn_id = bgpvpn['bgpvpn']['id']\n with self.assoc_net(bgpvpn_id, net_id1), \\\n self.assoc_net(bgpvpn_id, net_id2):\n self._assert_networks_associated([net_id1, net_id2], bgpvpn)\n\n def test_assoc_multiple_networks_dissoc_one(self):\n with (self.network()) as net1, (self.network()) as net2, (\n self.bgpvpn(route_distinguishers=self.rds)) as bgpvpn:\n net_id1 = net1['network']['id']\n net_id2 = net2['network']['id']\n bgpvpn_id = bgpvpn['bgpvpn']['id']\n with self.assoc_net(bgpvpn_id, net_id1):\n with self.assoc_net(bgpvpn_id, net_id2):\n self._assert_networks_associated([net_id1, net_id2],\n bgpvpn)\n self._assert_networks_associated([net_id1], bgpvpn)\n\n def test_associate_dissociate_router(self):\n with (self.router(tenant_id=self._tenant_id)) as router, (\n self.bgpvpn(route_distinguishers=self.rds)) as bgpvpn:\n router_id = router['router']['id']\n bgpvpn_id = bgpvpn['bgpvpn']['id']\n with self.assoc_router(bgpvpn_id, router_id):\n self._assert_routers_associated([router_id], bgpvpn)\n self._assert_routers_associated([], bgpvpn)\n\n def test_associate_multiple_routers(self):\n with (self.router(tenant_id=self._tenant_id, name='r1')) as r1, (\n self.router(tenant_id=self._tenant_id, name='r2')) as r2, (\n self.bgpvpn(route_distinguishers=self.rds)) as bgpvpn:\n router_id1 = r1['router']['id']\n router_id2 = r2['router']['id']\n bgpvpn_id = bgpvpn['bgpvpn']['id']\n with self.assoc_router(bgpvpn_id, router_id1):\n self._assert_routers_associated([router_id1], bgpvpn)\n with testlib_api.ExpectedException(\n webob.exc.HTTPClientError) as ctx_manager:\n with self.assoc_router(bgpvpn_id, router_id2):\n pass\n self.assertEqual(webob.exc.HTTPBadRequest.code,\n ctx_manager.exception.code)\n self._assert_routers_associated([router_id1], bgpvpn)\n\n def test_assoc_router_multiple_bgpvpns(self):\n with (self.router(tenant_id=self._tenant_id, name='r1')) as router, (\n self.bgpvpn(route_distinguishers=self.rds)) as bgpvpn1, (\n self.bgpvpn()) as bgpvpn2:\n router_id = router['router']['id']\n bgpvpn_id_1 = bgpvpn1['bgpvpn']['id']\n bgpvpn_id_2 = bgpvpn2['bgpvpn']['id']\n with (self.assoc_router(bgpvpn_id_1, router_id)), (\n self.assoc_router(bgpvpn_id_2, router_id)):\n self._assert_routers_associated([router_id], bgpvpn1)\n self._assert_routers_associated([router_id], bgpvpn2)\n\n def test_associate_router_network(self):\n with (self.router(tenant_id=self._tenant_id)) as router, (\n self.network()) as net1, (\n self.bgpvpn(route_distinguishers=self.rds)) as bgpvpn:\n router_id = router['router']['id']\n net_id = net1['network']['id']\n bgpvpn_id = bgpvpn['bgpvpn']['id']\n with self.assoc_router(bgpvpn_id, router_id), \\\n self.assoc_net(bgpvpn_id, net_id):\n response = self.get_odl_resource(odl_const.ODL_BGPVPN, bgpvpn)\n self.assertItemsEqual([router_id],\n response[odl_const.ODL_BGPVPN]\n ['routers'])\n self.assertItemsEqual([net_id],\n response[odl_const.ODL_BGPVPN]\n ['networks'])\n\n\nclass TestBGPVPNV2Driver(base.V2DriverAdjustment,\n bgpvpn_plugin.BgpvpnTestCaseMixin,\n _TestBGPVPNBase, test_plugin.Ml2PluginV2TestCase):\n _mechanism_drivers = ['opendaylight_v2']\n","repo_name":"Jordonchen/kolla-ansible-openstack","sub_path":"networking_odl/tests/functional/test_bgpvpn.py","file_name":"test_bgpvpn.py","file_ext":"py","file_size_in_byte":7298,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"}
+{"seq_id":"32601549718","text":"## 2D UNet for 21cm Observation De-Noising\r\n## by TLM\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras.layers import Conv2D, BatchNormalization, Conv2DTranspose, concatenate, MaxPool2D, Activation\r\n\r\nclass unet2D(): \r\n \"\"\"\r\n General class for building fully connected 2D convolutional UNet\r\n Parameters: `n_filters`: starting filter size\r\n `conv_width`: how many convolutions to be performed in residual block\r\n `network_depth`: how many layers deep your network goes \r\n (limit: growth_factor^network_depth =< x_dim)\r\n `growth_factor`: 2 (how to divide feature size)\r\n `n_channels`: how many image (or frequency) channels to put in\r\n `x_dim`: image input size (x_dim x x_dim)\r\n `batchnorm`: bool (usually True) to reduce internal covariance shift\r\n `momentum`: batchnorm param, set to 0.1 for outputs, (default=0.9)\r\n `epsilon`: batchnorm param (default = 0.001) \r\n `activation`: activation function for outputs (default = 'relu')\r\n `maxpool`: whether or not to use MaxPool feature to downsamplee (default = True)\r\n \"\"\"\r\n \r\n def __init__(self,n_filters = 16, conv_width=1, \r\n network_depth = 4,\r\n n_channels=32, x_dim=32, dropout = 0.0, \r\n growth_factor=2, batchnorm = True, \r\n momentum=0.9, epsilon=0.001,\r\n activation='relu'\r\n ):\r\n \r\n self.n_filters = n_filters\r\n self.n_channels = n_channels\r\n self.conv_width = conv_width\r\n self.network_depth = network_depth\r\n self.x_dim = x_dim\r\n self.dropout = dropout\r\n self.growth_factor = growth_factor\r\n self.batchnorm = batchnorm\r\n self.momentum = momentum\r\n self.epsilon = epsilon\r\n self.activation = activation\r\n \r\n # define all layers\r\n \r\n def conv_block(self, input_tensor, n_filters, n_layers=1, strides=1, kernel_size=3, \\\r\n momentum=0.9, maxpool=False, batchnorm=True, layer_num=None):\r\n \"\"\"Function to add n_blocks convolutional layers with the parameters passed to it\"\"\"\r\n if layer_num is not None:\r\n if strides > 1:\r\n name = 'downsample_{}'.format(layer_num)\r\n else:\r\n name = None\r\n \r\n x = input_tensor \r\n \r\n for _ in range(n_layers): \r\n identity = x\r\n x = Conv2D(filters = n_filters, kernel_size = (kernel_size, kernel_size),\\\r\n padding = 'same', strides=strides, name=name)(x)\r\n\r\n if batchnorm:\r\n x = BatchNormalization(momentum=momentum)(x) \r\n x = Activation(self.activation)(x)\r\n # if l > 0:\r\n # x = Add()([x, identity])\r\n # x = Activation(self.activation)(x) \r\n return x \r\n \r\n \r\n def build_model(self):\r\n \"\"\"\r\n Function to build network with specified architecture parameters\r\n \"\"\"\r\n network_depth = self.network_depth\r\n n_filters = self.n_filters\r\n growth_factor = self.growth_factor\r\n momentum = self.momentum\r\n\r\n ## Start with inputs\r\n inputs = keras.layers.Input(shape=(self.x_dim, self.x_dim, self.n_channels),name=\"image_input\")\r\n x = inputs\r\n concat_down = []\r\n \r\n for h in range(network_depth):\r\n x = self.conv_block(x, n_filters, n_layers=self.conv_width,strides=1) \r\n concat_down.append(x)\r\n n_filters *= growth_factor\r\n x = self.conv_block(x, n_filters, n_layers=1, batchnorm=True, strides=2, \r\n maxpool=self.maxpool, layer_num=h+1)\r\n \r\n # reverse order of down layers\r\n concat_down = concat_down[::-1] \r\n # middle\r\n x = self.conv_block(x, n_filters, n_layers=self.conv_width, strides=1)\r\n \r\n # expansive path\r\n n_filters //= growth_factor\r\n for h in range(network_depth):\r\n n_filters //= growth_factor\r\n x = Conv2DTranspose(n_filters, kernel_size=3, strides=2, padding='same')(x)\r\n x = BatchNormalization(momentum=momentum, epsilon=self.epsilon)(x)\r\n x = Activation(self.activation)(x)\r\n x = concatenate([x, concat_down[h]])\r\n x = self.conv_block(x, n_filters, n_layers=self.conv_width, kernel_size=3, \r\n strides=1, momentum=self.momentum) \r\n # n_filters //= growth_factor\r\n \r\n ## output matches input dims\r\n output = Conv2DTranspose(self.n_channels,1,padding=\"same\",name=\"output\")(x) \r\n\r\n model = keras.models.Model(inputs=inputs,outputs=output)\r\n return model\r\n","repo_name":"tlmakinen/deep21","sub_path":"unet/unet_2d.py","file_name":"unet_2d.py","file_ext":"py","file_size_in_byte":4938,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"3"}
+{"seq_id":"18478831909","text":"class Email:\n\n def __init__(self, sender, receiver, content, is_sent=False):\n self.sender = sender\n self.receiver = receiver\n self.content = content\n self.is_sent = is_sent\n\n def send(self):\n self.is_sent = True\n\n def get_info(self):\n return f\"{self.sender} says to {self.receiver}: {self.content}. Sent: {self.is_sent}\"\n\n\ninput_list = input()\nemails = []\nwhile input_list != \"Stop\":\n separated_list = input_list.split()\n sender = separated_list[0]\n receiver = separated_list[1]\n content = separated_list[2]\n email = Email(sender, receiver, content)\n emails.append(email)\n input_list = input()\n\nsend_emails = list(map(int, input().split(\", \")))\n\nfor sequence in send_emails:\n emails[sequence].send()\n\nfor email in emails:\n print(email.get_info())\n","repo_name":"RadoslavTs/SoftUni-Courses","sub_path":"2. Python Fundamentals/06. Objects and classes/Lab/03. email.py","file_name":"03. email.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"16609066631","text":"import matplotlib.pyplot as plt\nimport statistics\n\nμ = 1\nb1 = 0.85\nb0 = 0.1\nσ = 1\nn = 30\n\neps = statistics.NormalDist(μ, σ**2).samples(n, seed=30)\nvals = [i for i in range(1, n)]\n\nh = []\nfor i in range(n-1):\n h.append(μ+b0*eps[i+1] + b1*eps[i])\n\nplt.plot(vals, h, label=\"Сгенерированная\")\n\npred_h = [b1*h[0], 0]\nfor i in range(1, n-2):\n for j in range(1, i+1):\n pred_h[i] += h[j]*(b1**(i-j+1))*(-1)**(i-j)\n pred_h.append(0)\n\nplt.plot(vals, pred_h, label=\"Предсказанная\")\n\nplt.legend()\nplt.savefig(\"graphic.png\")\nplt.show()\n\n\n\n","repo_name":"BarmaJley/Code-examples","sub_path":"Лабораторные по Эконометрике/2_lab/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"73997406161","text":"\"\"\"\n# Definition for Employee.\nclass Employee:\n def __init__(self, id: int, importance: int, subordinates: List[int]):\n self.id = id\n self.importance = importance\n self.subordinates = subordinates\n\"\"\"\n\nclass Solution:\n def getImportance(self, employees: List['Employee'], id: int) -> int:\n \n emp_dict = {}\n for each_emp in employees:\n emp_dict[each_emp.id] = [each_emp.importance , each_emp.subordinates]\n \n id_list = [id]\n total_imp = 0\n while id_list:\n cur_id = id_list.pop()\n total_imp += emp_dict[cur_id][0]\n id_list += emp_dict[cur_id][1]\n \n return total_imp\n","repo_name":"KajalGada/leetcode-python","sub_path":"leetcode_690_employee_importance/leetcode 690 employee importance.py","file_name":"leetcode 690 employee importance.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"22717790391","text":"from __future__ import print_function\nfrom glob import glob\nfrom os.path import join as pjoin\nimport os\nimport io\nHERE = os.path.abspath(os.path.dirname(__file__))\n\ndef find_packages(top=HERE):\n \"\"\"\n Find all of the packages.\n \"\"\"\n packages = []\n for d, dirs, _ in os.walk(top, followlinks=True):\n if os.path.exists(pjoin(d, '__init__.py')):\n packages.append(os.path.relpath(d, top).replace(os.path.sep, '.'))\n elif d != top:\n # Don't look for packages in subfolders if current isn't a package.\n dirs[:] = []\n return packages\n\ndef get_version(file, name='__version__'):\n \"\"\"Get the version of the package from the given file by\n executing it and extracting the given `name`.\n \"\"\"\n path = os.path.realpath(file)\n version_ns = {}\n with io.open(path, encoding=\"utf8\") as f:\n exec(f.read(), {}, version_ns)\n return version_ns[name]\n\n\nfrom setuptools import setup\n\n\n# The name of the project\nname = 'nanohub-uidl'\n\n# Ensure a valid python version ### deprecated\n#ensure_python('>=3.3')\n\n# Get our version\nversion = get_version(pjoin('nanohubuidl', '_version.py'))\n\nlong_description = \"\"\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup_args = {\n 'name' : name,\n 'description' : 'A set of tools to run create Javascript Apps, using Teleporthq UIDL schema',\n 'long_description_content_type' : 'text/markdown',\n 'long_description':long_description,\n 'version' : version,\n 'scripts' : glob(pjoin('scripts', '*')),\n 'packages' : find_packages(),\n 'data_files' : [\n ('assets', []),\n (\n 'etc/jupyter/jupyter_notebook_config.d',\n ['nanohubuidl/jupyter-config/jupyter_server_config.d/nanohubuidl.json']\n )\n ],\n 'author' : 'Nanohub',\n 'author_email' : 'denphi@denphi.com',\n 'url' : 'https://github.com/denphi/nanohub-uidl',\n 'license' : 'BSD',\n 'platforms' : \"Linux, Mac OS X, Windows\",\n 'keywords' : ['IPython'],\n 'classifiers' : [\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Framework :: Jupyter',\n ],\n 'include_package_data' : True,\n 'install_requires' : [\n 'nanohub-remote>=0.1.0',\n 'simtool',\n ],\n 'extras_require' : {\n 'test': [\n ],\n 'examples': [\n ],\n 'docs': [\n ],\n },\n 'entry_points' : {\n 'console_scripts': [\n 'run_uidl = nanohubuidl:main'\n ],\n },\n}\n\nif __name__ == '__main__':\n setup(**setup_args)\n","repo_name":"denphi/nanohub-uidl","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"37408006748","text":"import sys\ninput = sys.stdin.readline\n\nn,k = map(int, input().split())\ncoins = []\ncnt = 0\n\nfor _ in range(n):\n a = int(input())\n coins.append(a)\n\nwhile(k!=0):\n for i in range(len(coins)):\n if coins[i] > k:\n num = int(k//coins[i-1])\n cnt += num\n k -= coins[i-1]*num\n\nprint(cnt)","repo_name":"pipi-shortstocking/CodingTest","sub_path":"백준/11047/1차.py","file_name":"1차.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"71706384401","text":"import socket\nimport time\nimport os\n\nBUFFER = 1024\n\ndef statistics(bytes_transmitted, start_time, file_size): \n elapsed_time = time.time() - start_time\n transfer_rate = bytes_transmitted / elapsed_time if elapsed_time != 0 else 0\n porcentage = (bytes_transmitted / file_size) * 100\n\n print(\"Tranfer rate: {:.2f} Bytes/s | Porcentage transferred: {:.2f}%\".format(transfer_rate, porcentage), end=\"\\r\", flush=True)\n\ndef send_file(client, namefile):\n\n if not os.path.exists('Client\\\\Files\\\\' + namefile):\n print(\"File does not exist!\")\n return\n\n try:\n with open('Client\\\\Files\\\\' + namefile, 'rb') as file:\n file_size = len(file.read())\n file.seek(0)\n start_time = time.time()\n bytes_transmitted = 0\n\n for data in file.readlines():\n client.send(data)\n bytes_transmitted += len(data)\n statistics(bytes_transmitted, start_time, file_size)\n\n print(\"\\n\" + f'{namefile} sent!\\n')\n except Exception as error:\n print(error)\n\ndef receive_file(client, namefile):\n data = client.recv(BUFFER).decode()\n\n if str(data).startswith(\"Error:\"):\n print(data)\n return\n\n file_size = int(data)\n\n with open('Client\\\\Files\\\\' + namefile, 'wb') as file:\n start_time = time.time()\n bytes_transmitted = 0\n\n while True:\n data = client.recv(BUFFER)\n\n if not data:\n break\n \n file.write(data)\n bytes_transmitted += len(data) \n statistics(bytes_transmitted, start_time, file_size)\n\n print(\"\\n\" + f'{namefile} received!')\n\ndef main():\n while True:\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as client:\n client.connect(('localhost', 25565))\n print('Connected!\\n')\n\n operation = input('1 - Upload\\n2 - Download\\n3 - Exit\\n')\n\n if operation == '1':\n namefile = input('File Name: ')\n client.send((operation + ' ' + namefile).encode())\n send_file(client, namefile)\n elif operation == '2':\n namefile = input('File Name: ')\n client.send((operation + ' ' + namefile).encode())\n receive_file(client, namefile)\n elif operation == '3':\n break\n else:\n print('Invalid!\\n')\n continue\n\n client.close()\n print(\"Connection closed!\")\n\n client.close()\n print(\"Connection closed!\")\n\n\nif __name__ == '__main__':\n main()","repo_name":"WilliamDRib/File_Transfer_with_Socket","sub_path":"Client/Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"33208559144","text":"from heapq import heappop, heappush\n\nn = int(input())\narr = []\nfor _ in range(n):\n arr.append(list(map(int, input().split())))\n\narr.sort() # start, end 순으로 sort\n\nanswer = 1\nnotEnding = [] # end 지점 모아두기 e\nfor s, e in arr:\n while notEnding and notEnding[0] <= s:\n heappop(notEnding)\n \n heappush(notEnding, e) # 새로운 수업 업데이트 \n answer = max(len(notEnding), answer) # 동시에 수업 진행 되는 최대.\n\nprint(answer)","repo_name":"hyukji/AlgorithmProblem","sub_path":"Python/15000/11000.py","file_name":"11000.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"4258996943","text":"\ndef sym_diff(arr1,arr2):\n f1=set.difference(arr1,arr2)\n f2=set.difference(arr2,arr1)\n #Adding two sets\n print(f1)\n print(f2)\n un=list(set.union(f1,f2))\n un.sort()\n for value in un:\n print(value)\n\n\nif __name__=='__main__':\n n=int(input().strip())\n arr1=set(list(map(int,input().split())))\n m=int(input().strip())\n arr2=set(list(map(int,input().split())))\n sym_diff(arr1,arr2)","repo_name":"farhan1503001/Hackerrank-Python-Language-Problems","sub_path":"Python operations/symmetricdiff.py","file_name":"symmetricdiff.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"5912346267","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nimport math\n\nwidth, height = 100, 100\ntree_prob = 0.6\nsettle_prob = 0.6\nsettle_num = 5\nneighbours = [(-2, -2), (-1, -2), (0, -2), (1, -2), (2, -2),\n (-2, -1), (-1, -1), (0, -1), (1, -1), (2, -1),\n (-2, 0), (-1, 0), (1, 0), (2, 0),\n (-2, 1), (-1, 1), (0, 1), (1, 1), (2, 1),\n (-2, 2), (-1, 2), (0, 2), (1, 2), (2, 2)]\n\nEMPTY, TREE, SETTLE, FIRE, BREAK, = 0, 1, 2, 3, 4\n\ncolours = ['white', 'green', 'brown']\n # 'orange', 'black']\ncmap = colors.ListedColormap(colours)\n\nnp.random.seed(2)\nsystem = np.random.random([width, height])\n\nsystem = np.where(system <= tree_prob, TREE, EMPTY)\n\ndef settle_neighbours(x, y):\n system[x][y] = SETTLE\n for n in neighbours:\n neigh_x = x + n[0]\n neigh_y = y + n[1]\n if((neigh_x >= 0 and neigh_x < width) and (neigh_y >= 0 and neigh_y < height)):\n if(np.random.random() <= settle_prob):\n system[neigh_x][neigh_y] = SETTLE\n\n# generate settlements\nfor settle in range(settle_num):\n x = math.trunc(np.random.random() * 100)\n y = math.trunc(np.random.random() * 100)\n settle_neighbours(x, y)\n\nplt.imshow(system, cmap=cmap)\nplt.show()\n\n# print(system)\n\nnp.savetxt('system.txt', system, fmt='%.1i')\n ","repo_name":"cargraham/COMP6216-Group-Coursework","sub_path":"myModel.py","file_name":"myModel.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"27592188682","text":"import asyncio\nfrom random import randint\n\nloop = asyncio.get_event_loop()\nfuture = loop.create_future()\n\ndef random_hit(future, n, cnt=1, loop=None):\n #import pdb; pdb.set_trace()\n if loop is None:\n loop = asyncio.get_event_loop()\n v = randint(1, n)\n if v == 1:\n future.set_result(cnt)\n else:\n cnt += 1\n loop.call_soon(random_hit, future, n, cnt, loop)\n\nfuture.add_done_callback(lambda f: print(\"done\"))\nloop.call_soon(random_hit, future, 100)\n\nresult = loop.run_until_complete(future)\nprint(result)\n\nloop.close()\n","repo_name":"iihiro/test_asyncio","sub_path":"2_example.py","file_name":"2_example.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"25698014494","text":"from django.shortcuts import render, redirect\nfrom principal.models import *\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.generic.base import View\nfrom principal.forms import *\nfrom django.http import HttpResponseRedirect, HttpResponse\n\n@login_required\ndef index(request):\n usuarioLogado = get_perfil_logado(request)\n turmas_aluno = Turma.objects.filter(alunos__id=usuarioLogado.id) \n turmas_adm = Turma.objects.all().filter(administrador=usuarioLogado.id)\n return render(request, 'index.html', {'turmas_adm': turmas_adm,'usuarioLogado':usuarioLogado, 'turmas_aluno': turmas_aluno})\n\n@login_required\ndef acessarTurma(request, id):\n ranking = []\n usuarioLogado = get_perfil_logado(request)\n turma = Turma.objects.get(id=id)\n atividades = Atividade.objects.filter(turma=turma)\n respostas = RespostaAtividade.objects.filter(atividade__turma__id = turma.id)\n if(len(turma.alunos.all()) > 0):\n for aluno in turma.alunos.all(): \n soma = 0\n for res in RespostaAtividade.objects.filter(aluno__id=aluno.id):\n soma = soma + res.nota\n r = Ranking(aluno.id,aluno.nome, soma, turma.id)\n ranking.append(r)\n rankingOrdenado = sorted(ranking, key=lambda Ranking:Ranking.resultado, reverse=True)\n else:\n rankingOrdenado = None\n return render(request, 'turmaDetalhes.html', {'turma': turma, 'atividades': atividades, 'usuarioLogado':usuarioLogado, 'ranking': rankingOrdenado})\n\n@login_required\ndef cadastrarVideo(request):\n usuarioLogado = get_perfil_logado(request)\n video = Video()\n urlVideo = request.POST.get('url')\n video.embedCode = urlVideo.replace('watch?v=', 'embed/')\n video.data_entrega = request.POST.get('dataEntrega')\n video.titulo = request.POST.get('titulo')\n video.valor = request.POST.get('valor')\n turmaId = request.POST.get('turma')\n turma = Turma.objects.get(id=turmaId)\n video.turma = turma\n video.save()\n return HttpResponseRedirect('/video/lista')\n\n@login_required\ndef listarVideos(request):\n usuarioLogado = get_perfil_logado(request)\n turmas = Turma.objects.filter(administrador__id=usuarioLogado.id)\n videos = Video.objects.filter(turma__administrador__id=usuarioLogado.id)\n videosVencido = []\n videosPrazo = []\n for v in videos:\n if(v.comparaData == False):\n videosVencido.append(v)\n else:\n videosPrazo.append(v) \n return render(request, 'listaVideos.html', {'videos':videosPrazo, 'videoVencido': videosVencido, 'usuarioLogado':usuarioLogado, 'turmas':turmas})\n\n@login_required\ndef verResposta(request, id):\n resposta = RespostaAtividade.objects.get(id=id)\n usuarioLogado = get_perfil_logado(request)\n return render(request, 'verResposta.html', {'resposta': resposta, 'usuarioLogado':usuarioLogado})\n\n@login_required\ndef listarAtividades(request):\n usuarioLogado = get_perfil_logado(request)\n turmas = Turma.objects.filter(administrador__id=usuarioLogado.id)\n atividadesProfessor = Atividade.objects.filter(turma__administrador__id=usuarioLogado.id)\n atividadesAluno = Atividade.objects.filter(turma__alunos__id=usuarioLogado.id)\n return render(request, 'listaAtividades.html', {'usuarioLogado': usuarioLogado, 'atividadesProfessor': atividadesProfessor, 'atividadesAluno': atividadesAluno, 'turmas':turmas})\n\n@login_required\ndef alterarNota(request, id, idPagina): \n novaNota = request.POST.get('nota')\n resposta = RespostaAtividade.objects.get(id=id)\n resposta.nota = novaNota\n resposta.save()\n usuarioLogado = get_perfil_logado\n if(idPagina == 1):\n return render(request, 'verResposta.html', {'resposta': resposta, 'usuarioLogado':usuarioLogado})\n else:\n respostas = RespostaAtividade.objects.filter(atividade__id=resposta.atividade.id)\n atividade = Atividade.objects.get(id=resposta.atividade.id)\n return render(request, 'respostasAtividade.html', {'respostas': respostas, 'atividade':atividade, 'usuarioLogado':usuarioLogado})\n\n@login_required\ndef responderAtividade(request, id, idAluno):\n usuarioLogado = get_perfil_logado(request)\n if(request.method == 'GET'):\n return render(request, 'respostaAtividadeForm.html', {'usuarioLogado': usuarioLogado})\n elif(request.method == \"POST\"):\n resposta = request.POST.get('froala-editor')\n atividade = Atividade.objects.get(id=id)\n aluno = Usuario.objects.get(id=idAluno)\n r = RespostaAtividade()\n r.aluno = aluno\n r.atividade = atividade\n r.nota = 0\n r.resposta = resposta\n r.save()\n return render(request, 'minhasRespostas.html', {'usuarioLogado': usuarioLogado})\n\n@login_required\ndef acessarTurmaAluno(request, idTurma):\n usuarioLogado = get_perfil_logado(request)\n atividades = Atividade.objects.filter(turma__id=idTurma)\n respostas = RespostaAtividade.objects.filter(aluno__id=1000).filter(atividade__turma__id=idTurma)\n return render(request, 'turmasDetalheAluno.html', {'respostas': respostas, 'atividades' : atividades, 'usuarioLogado': usuarioLogado})\n\n@login_required\ndef acessarDetalhesAluno(request, id):\n usuarioLogado = get_perfil_logado(request)\n aluno = Usuario.objects.get(id=usuarioLogado.id)\n return render(request, 'alunoDetalhes.html', {'aluno': aluno, 'usuarioLogado': usuarioLogado}) \n\n@login_required\ndef acessarNotasAluno(request, id, idTurma):\n usuarioLogado = get_perfil_logado(request)\n atividades = Atividade.objects.filter(turma__id=idTurma)\n respostas = RespostaAtividade.objects.filter(atividade__turma__id=idTurma).filter(aluno__id=id)\n return render(request, 'notasAluno.html', {'respostas':respostas, 'atividades':atividades, 'usuarioLogado':usuarioLogado})\n\n@login_required\ndef getRespostasAtividade(request, id):\n usuarioLogado = get_perfil_logado(request)\n respostas = RespostaAtividade.objects.filter(atividade=id)\n from ast import literal_eval\n gruposString = Grupo.objects.filter(atividade=id)\n grupos = literal_eval(gruposString[0].grupo) if gruposString else []\n atividade = Atividade.objects.get(id=id)\n return render(request, 'respostasAtividade.html', {'respostas': respostas, 'atividade':atividade, 'usuarioLogado':usuarioLogado, 'grupos': grupos})\n\n@login_required\ndef detalhesAtividade(request,id):\n usuarioLogado = get_perfil_logado(request)\n atividade = Atividade.objects.get(id=id)\n respostas = RespostaAtividade.objects.filter(atividade__id=atividade.id).filter(aluno__id=usuarioLogado.id)\n return render(request, 'atividadeDetalhes.html', {'atividade': atividade, 'usuarioLogado':usuarioLogado, 'respostas':respostas})\n\n@login_required\ndef get_perfil_logado(request):\n return request.user.usuario \n\n@login_required\ndef cadastrarTurma(request):\n usuarioLogado = get_perfil_logado(request)\n turma = Turma()\n turma.titulo = request.POST.get('nomeTurma')\n turma.descricao = request.POST.get('descricaoTurma')\n turma.codigo = request.POST.get('codigoTurma')\n turma.administrador = usuarioLogado\n turma.save()\n return HttpResponseRedirect('/index') \n\n@login_required\ndef cadastrarAtividade(request):\n usuarioLogado = get_perfil_logado(request)\n atividade = Atividade()\n atividade.titulo = request.POST.get('nomeAtividade')\n atividade.data_entrega = request.POST.get('dataEntrega')\n atividade.valor = request.POST.get('valorAtividade')\n atividade.url = request.POST.get('urlAtividade')\n atividade.individual = True if request.POST.get(\"individualAtividade\") == 'True' else False\n grupos = []\n t = request.POST.get('turmaAtividade')\n turma = Turma.objects.get(id=t)\n atividade.turma = turma\n if(atividade.individual == False):\n lenGrupo = request.POST.get('tamanhoGrupo')\n grupos = gerarGrupos(turma.alunos.all(), lenGrupo if lenGrupo and int(lenGrupo) > 1 else 1)\n atividade.save()\n grupo = Grupo()\n grupo.atividade = Atividade.objects.get(id=atividade.id)\n grupo.grupo = str(grupos)\n grupo.save()\n print(str(grupos))\n return HttpResponseRedirect('/atividade/lista')\n\n@login_required\ndef entrarTurma(request, id):\n msgErro = None\n usuarioLogado = get_perfil_logado(request)\n valor = request.POST.get('valorConsultado')\n turmas = Turma.objects.filter(codigo__icontains=valor)\n turma = Turma.objects.get(id=id)\n for aluno in turma.alunos.all():\n if(aluno.id == usuarioLogado.id):\n msgErro = \"Você já é aluno desta turma\"\n break\n else:\n msgErro = None\n turma.alunos.add(usuarioLogado)\n turma.save()\n return render(request, 'resultadoConsultaTurma.html', {'turmas':turmas, 'usuarioLogado':usuarioLogado, 'msgErro': msgErro})\n\n@login_required\ndef consultaTurmaByCodigo(request):\n valor = request.POST.get('valorBuscado')\n turmas = Turma.objects.filter(codigo__icontains=valor)\n usuarioLogado = get_perfil_logado(request) \n return render(request, 'resultadoConsultaTurma.html', {'turmas':turmas, 'usuarioLogado':usuarioLogado, 'valorConsultado': valor})\n\nclass RegistrarUsuarioView(View):\n template = 'cadastroUsuario.html'\n def get(self, request):\n return render(request, self.template)\n def post(self,request):\n #preenche o from\n form = RegistrarUsuarioForm(request.POST)\n\n #verifica se eh valido\n if form.is_valid():\n\n dados_form = form.data\n\n #cria o usuario\n usuario = User.objects.create_user(dados_form['nome'], dados_form['email'], dados_form['senha']) \n\n #cria o perfil\n perfil = Usuario(nome=dados_form['nome'],\n matricula=dados_form['matricula'],\n usuario=usuario)\n\n #grava no banco\n perfil.save()\n\n #redireciona para index\n return redirect('login')\n\n #so chega aqui se nao for valido\n #vamos devolver o form para mostrar o formulario preenchido \n return render(request, self.template_name, {'form' : form})\n\ndef gerarGrupos(alunos, lenGrupo):\n valores = []\n for aluno in alunos:\n valores.append(aluno.nome)\n countGrupo = 1\n grupos = {}\n tamanhoGrupo = int(lenGrupo)\n tamTotal = int(len(valores) / tamanhoGrupo)\n for x in range(0, tamTotal):\n pos = 0\n contador = 0\n grupo = []\n while(contador < tamanhoGrupo):\n try: \n grupo.append(valores[pos])\n del valores[pos]\n except IndexError:\n break\n contador += 1\n pos = 0 if pos == -1 else -1\n nome = 'Grupo ' + str(countGrupo)\n grupos[nome] = grupo\n countGrupo += 1\n maiorGrupo = len(grupos)\n if(len(valores) > 0):\n if(len(valores) == tamanhoGrupo):\n grupos['Grupo ' + str(countGrupo)] = valores\n else: \n for valor in valores:\n try:\n grupos['Grupo ' + str(countGrupo)].append(valor)\n except KeyError:\n grupos['Grupo ' + str(maiorGrupo)].append(valor)\n countGrupo -= 1\n countGrupo = countGrupo - 1 if countGrupo > 0 else maiorGrupo\n return grupos\n\nclass Ranking(object):\n idAluno = 0\n nomeAluno = ''\n resultado = 0.0\n idTurma = 0\n\n def __init__(self, idAluno, nome, resultado, idTurma):\n self.idAluno = idAluno\n self.nomeAluno = nome\n self.resultado = resultado\n self.idTurma = idTurma","repo_name":"Hallessandro/unione-ifrn","sub_path":"principal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11638,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"14901836509","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom scripts import loss_funcs\nfrom scipy.stats import norm as scipy_normal\n\n# Grab some normal points\ndistribution = loss_funcs.NormalPDFLoss()\n\nhello = {}\nto_try = np.asarray([1500] * 1)\n#colours = ['r', 'g', 'b', 'm', 'c', 'y']\n\nmean = [0.0, 0.1, 0.2, 0.3, 0.4]\nmean = [0.0, 0.0, 0.0, 0.0, 0.0]\nstd_d = [1.0, 1.1, 1.2, 1.3, 1.4]\ncolors = ['r', 'g', 'c', 'b', 'm']\n\nmeans = np.zeros((len(mean), to_try.size))\n\nfor stage_i, (a_mean, a_std_d, a_color) in enumerate(zip(mean, std_d, colors)):\n for try_i, i in enumerate(to_try):\n random_variables = scipy_normal.rvs(size=i, loc=0., scale=1.)\n\n # Evaluate the cdf at each random deviate and sort the array\n cdfs = scipy_normal.cdf(random_variables, loc=0.0, scale=a_std_d)\n\n cdfs = cdfs[np.where(np.logical_and(cdfs > a_mean, cdfs < 1 - a_mean))[0]]\n\n # Make it median-invariant\n #cdfs = np.abs(cdfs - 0.5)\n cdfs_sorted = np.sort(cdfs)\n\n # Extend the cdfs and take means\n #cdfs[:] = 0.5\n #np.random.shuffle(cdfs)\n #cdfs = np.mean(cdfs.reshape(random_variables.shape[0], -1), axis=1)\n\n # Cumulative sum and normalise so last element is 1.0\n cdfs_summed = np.cumsum(cdfs)\n cdfs_summed /= 0.5 * i\n\n # Get expected points\n cdfs_expected = np.linspace(0., 1., num=cdfs_sorted.size)\n cdfs_summed = cdfs_expected\n\n cdfs_summed = np.log(np.cosh(cdfs_summed - cdfs_sorted))\n\n # Plot shiz\n plt.plot(cdfs_sorted, cdfs_summed, '-', lw=1, ms=2, color=a_color, alpha=0.1)\n boop = np.max(cdfs_summed)\n #plt.plot([0, 1], [boop, boop], '--', color=c, label='mean^2 of {}'.format(i))\n\n means[stage_i, try_i] = boop\n\n boop = np.mean(means[stage_i, :])\n plt.plot([0, 1], [boop, boop], '--', color=a_color, label='mean^2 of {}, {}'.format(a_mean, a_std_d))\n\n\n#plt.plot([0, 1], [0, 0], 'k--')\nplt.legend()\nplt.xlabel('ci')\nplt.ylabel('(F(ci) - ci)^2')\n#plt.title('CDF stat for mean {}, std_d {} of model'.format(mean, std_d))\nplt.show()\n\nprint(\"Std deviation in means: {}\".format(np.std(means, axis=1)))\n\n","repo_name":"emilyhunt/masters_project","sub_path":"examples/cdf_testing.py","file_name":"cdf_testing.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"69900055408","text":"import argparse\nimport gc\nimport os\nimport time\nimport random\nimport sys\nimport importlib\nsys.path.append('.')\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport tqdm\n# from apex import amp\nfrom apex import parallel as apex_parallel\n\n# import data.dtu as dtu, data.sceneflow as sceneflow, data.blended as bld\n# from core.model_cas import Model, Loss\nfrom utils.io_utils import load_model, save_model\nfrom utils.preproc import recursive_apply\nfrom utils.utils import NanError\n\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--num_workers', type=int, default=8, help='The number of workers for the dataloader. 0 to disable the async loading.')\n# parser.add_argument('--num_gpus', type=int, default=1)\n\nparser.add_argument('--data_root', type=str, help='The root dir of the data.')\nparser.add_argument('--dataset_name', type=str, default='blended', help='The name of the dataset. Should be identical to the dataloader source file. e.g. blended refers to data/blended.py.')\nparser.add_argument('--model_name', type=str, default='model_cas', help='The name of the model. Should be identical to the model source file. e.g. model_cas refers to core/model_cas.py.')\n\nparser.add_argument('--num_src', type=int, default=3, help='The number of source views.')\nparser.add_argument('--max_d', type=int, default=128, help='The standard max depth number.')\nparser.add_argument('--interval_scale', type=float, default=1., help='The standard interval scale.')\nparser.add_argument('--cas_depth_num', type=str, default='32,16,8', help='The depth number for each stage.')\nparser.add_argument('--cas_interv_scale', type=str, default='4,2,1', help='The interval scale for each stage.')\nparser.add_argument('--resize', type=str, default='768,576', help='The size of the preprocessed input resized from the original one.')\nparser.add_argument('--crop', type=str, default='640,512', help='The size of the preprocessed input cropped from the resized one.')\n\nparser.add_argument('--mode', type=str, default='soft', choices=['soft', 'hard', 'uwta', 'maxpool', 'average'], help='The fusion strategy.')\nparser.add_argument('--occ_guide', action='store_true', default=False, help='Deprecated')\n\nparser.add_argument('--lr', type=str, default='1e-3,.5e-3,.25e-3,.125e-3', help='Learning rate under piecewise constant scheme.')\nparser.add_argument('--boundaries', type=str, default='.625,.75,.875', help='Boundary percentage for changing the learning rate.')\nparser.add_argument('--weight_decay', type=float, default=0, help='Weight decay factor.')\nparser.add_argument('--num_samples', type=int, default=160000, help='Total number =total_step*batch_size of samples for training.')\nparser.add_argument('--batch_size', type=int, default=2, help='Batch size.')\n\nparser.add_argument('--load_path', type=str, default=None, help='The dir of the folder containing the pretrained checkpoints.')\nparser.add_argument('--load_step', type=int, default=-1, help='The step to load. -1 for the latest one.')\nparser.add_argument('--reset_step', action='store_true', default=True, help='Set to reset the global step. Otherwise resume from the step of the checkpoint.')\n\nparser.add_argument('--job_name', type=str, default='temp', help='Job name for the name of the saved checkpoint.')\n\nparser.add_argument('--save_dir', type=str, help='The dir for saving the checkpoints.')\n\nparser.add_argument('--snapshot', type=int, default=5000, help='Step interval to save a checkpoint.')\nparser.add_argument('--max_keep', type=int, default=1000, help='Max number of checkpoints kept.')\n\nargs = parser.parse_args()\n\nif __name__ == '__main__':\n torch.backends.cudnn.benchmark = True\n\n # seed = 0\n # torch.backends.cudnn.benchmark = False\n # torch.backends.cudnn.deterministic = True\n # torch.manual_seed(seed)\n # np.random.seed(seed)\n # random.seed(seed)\n # torch.cuda.manual_seed(seed)\n # torch.cuda.manual_seed_all(seed)\n\n total_steps = args.num_samples // args.batch_size\n [resize_width, resize_height], [crop_width, crop_height] = [[int(v) for v in arg_str.split(',')] for arg_str in [args.resize, args.crop]]\n cas_depth_num = [int(v) for v in args.cas_depth_num.split(',')]\n cas_interv_scale = [float(v) for v in args.cas_interv_scale.split(',')]\n\n Model = importlib.import_module(f'core.{args.model_name}').Model\n Loss = importlib.import_module(f'core.{args.model_name}').Loss\n get_train_loader = importlib.import_module(f'data.{args.dataset_name}').get_train_loader\n\n dataset, loader = get_train_loader(\n args.data_root, args.num_src, total_steps, args.batch_size,\n {\n 'interval_scale': args.interval_scale,\n 'max_d': args.max_d,\n 'resize_width': resize_width,\n 'resize_height': resize_height,\n 'crop_width': crop_width,\n 'crop_height': crop_height\n },\n num_workers=args.num_workers\n )\n\n model = Model()\n model.cuda()\n model = apex_parallel.convert_syncbn_model(model)\n print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters() if p.requires_grad])))\n compute_loss = Loss()\n\n model = nn.DataParallel(model)\n\n if args.load_path is None:\n for m in model.modules():\n if any([isinstance(m, T) for T in [nn.Conv2d, nn.Conv3d, nn.ConvTranspose2d, nn.ConvTranspose3d]]):\n if m.weight.requires_grad:\n nn.init.xavier_uniform_(m.weight)\n elif any([isinstance(m, T) for T in [nn.BatchNorm2d, nn.BatchNorm3d]]):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n global_step = 0\n else:\n global_step = load_model(model, args.load_path, args.load_step)\n if args.reset_step: global_step = 0\n print(f'load {os.path.join(args.load_path, str(args.load_step))}')\n\n lr = [float(v) for v in args.lr.split(',')]\n boundaries = args.boundaries\n if boundaries is not None:\n boundaries = [int(total_steps * float(b)) for b in boundaries.split(',')]\n optimizer = optim.Adam(model.parameters(), lr=lr[0], weight_decay=args.weight_decay)\n\n # model, optimizer = amp.initialize(model, optimizer, opt_level='O0')\n\n def piecewise_constant():\n if boundaries is None: return lr[0]\n i = 0\n for b in boundaries:\n if global_step < b: break\n i += 1\n curr_lr = lr[i]\n for param_group in optimizer.param_groups:\n param_group['lr'] = curr_lr\n return curr_lr\n\n model.train()\n\n pbar = tqdm.tqdm(loader, dynamic_ncols=True)\n if global_step != 0: pbar.update(global_step)\n for sample in pbar:\n if global_step >= total_steps: break\n if sample.get('skip') is not None and np.any(sample['skip']): continue\n\n curr_lr = piecewise_constant()\n\n recursive_apply(sample, lambda x: torch.from_numpy(x).float().cuda())\n ref, ref_cam, srcs, srcs_cam, gt, masks = [sample[attr] for attr in ['ref', 'ref_cam', 'srcs', 'srcs_cam', 'gt', 'masks']]\n\n loss, uncert_loss, less1, less3, l1, losses, outputs, refined_depth, prob_maps = None, None, None, None, None, None, None, None, None\n try:\n # est_depth, prob_map, pair_results = model([ref, ref_cam, srcs, srcs_cam], args.max_d, mode=args.mode)\n outputs, refined_depth, prob_maps = model(sample, cas_depth_num, cas_interv_scale, mode=args.mode)\n\n # losses = compute_loss([est_depth, pair_results], gt, masks, ref_cam, args.max_d, occ_guide=args.occ_guide, mode=args.mode)\n losses = compute_loss([outputs, refined_depth], gt, masks, ref_cam, args.max_d, occ_guide=args.occ_guide, mode=args.mode)\n \n loss, uncert_loss, less1, less3, l1 = losses[:5] #MVS\n # loss, less1, less3, l1 = losses[:4]\n\n if np.isnan(loss.item()):\n raise NanError\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n losses_np = [v.item() for v in losses[:5]] #MVS\n loss, uncert_loss, less1, less3, l1 = losses_np #MVS\n # loss, less1, less3, l1 = losses_np\n\n stats = losses[5]\n stats_np = [(l1.item(), less1.item(), less3.item()) for l1, less1, less3 in stats]\n stats_str = ''.join([f'({l1:.3f} {less1*100:.2f} {less3*100:.2f})' for l1, less1, less3 in stats_np])\n\n pbar.set_description(f'{loss:.3f}{stats_str}{l1:.3f}')\n # pbar.set_description(f'{loss:.4f} {less1:.3f} {less3:.3f} {l1:.4f}') #MVS\n # pbar.set_description(f'{less1:.3f} {less3:.3f} {l1:.4f}')\n except NanError:\n print(f'nan: {global_step}/{total_steps}')\n gc.collect()\n torch.cuda.empty_cache()\n # optimizer.zero_grad()\n # optimizer.step()\n\n if global_step != 0 and global_step % args.snapshot == 0:\n save_model({\n 'global_step': global_step,\n 'state_dict': model.state_dict()\n }, args.save_dir, args.job_name, global_step, args.max_keep)\n\n global_step += 1\n del loss, uncert_loss, less1, less3, l1, losses, outputs, refined_depth, prob_maps\n\n save_model({\n 'global_step': global_step,\n 'state_dict': model.state_dict()\n }, args.save_dir, args.job_name, global_step, args.max_keep)\n","repo_name":"jzhangbs/Vis-MVSNet","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9439,"program_lang":"python","lang":"en","doc_type":"code","stars":211,"dataset":"github-code","pt":"2"}
+{"seq_id":"34547947448","text":"\nimport cv2\nimport imageio\n\n# cascadeler filtre serisi bunlar yüzü ve gözü tespit etmek içi ardı ardına uygulanacak\nface_cascade = cv2.CascadeClassifier('haarcascade-frontalface-default.xml')\n\neye_cascade = cv2.CascadeClassifier('haarcascade-eye.xml')\n\n\ndef detect(frame):\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # resmi gri tonuna çevirdik\n\n # bu işlem yüz tespiti yapar ve tupple döndürür bu tupple içinde x y kordinatı (dikdörtgenin sol üst köşesi\n # h(yukseklik) w (genislik) değerleri vardır. 1.3 olarak verdiğimiz değer scale dir ne kadar ölçekleneceği\n # 5 değeri ise komşu sayısı en az 5 pencere olursa oraya yüz deriz.\n\n faces = face_cascade.detectMultiScale(gray, 1.1, 5)\n\n for (x, y, w, h) in faces: # x y w h değerlerini kaç tane yüz var ise okadar alıyoruz\n cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)\n gray_face=gray[y:y+h,x:x+w] # gözü yüzün üzerinde arıyoruz yüzün seçili olduğu alanı aldık\n color_face=frame[y:y+h,x:x+w] #yüzün olsuğu alana renkli olarak aldık\n\n eyes=eye_cascade.detectMultiScale(gray_face,1.1,3) # gri yüzün üzerinde göz tespiti yaptık ve tupple dondurduk\n for (ex,ey,ew,eh) in eyes: # koordinaları aldık\n cv2.rectangle(color_face,(ex,ey),(ex+w,ey+h),(0,255,0),2) # renkkli yüzde gözlere kareler çizdik\n\n return frame\n\n\nreader=imageio.get_reader('1.mp4') #videoyu okuduk\nfps=reader.get_meta_data()['fps'] # okuduğumuz videonun fps değerini aldık\nwriter=imageio.get_writer('output.mp4',fps=fps) # yeni video oluşturmak için videonun adı ve kaç fps olucak\n\nfor i,frame in enumerate(reader): # reader ile aldığımız videodan tek tek frame burada i sayaç kaçıncı frami aldığımızı görmek için\n frame=detect(frame)#alıp bunların üzerine detect fonksiyonunu uyguladık\n writer.append_data(frame)#detect fonk uygulanmış frameleri tektek videomuza ekliyoruz\n print(i) # kaçıncı framdeyiz\n\nwriter.close() # videoyu yazmayı kapatıyoruz\n","repo_name":"burakbaga/face-detection","sub_path":"face_det.py","file_name":"face_det.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"14137085556","text":"import godunovfunctions\nimport os\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader, TensorDataset\nfrom sklearn.model_selection import KFold\nimport csv\nimport time\nfrom tqdm import tqdm\n\n\n# Loading all data\ndata_name = \"short_highway\"\ndatafolder = os.path.join(os.getcwd(), \"data\", data_name)\ndf = godunovfunctions.load_data(datafolder, print_logs=True)\nX_df = df[[1, 2, 3, 4, 5, 6, \"gem_dichtheid\"]]\nY_df = df[\"gem_intensiteit\"]\n\n\ndef train_and_save(\n model_linear_stack = nn.Sequential(\n nn.Linear(6, 4),\n nn.Softplus(),\n nn.Linear(4, 3),\n nn.Softplus(),\n ),\n X_min_normalizer = 0.0,\n X_max_normalizer = 100.0,\n Y_normalizer = 10000.0,\n criterion_function = nn.MSELoss,\n optimizer_function = optim.Adam,\n bias_init_function = nn.init.zeros_,\n weights_init_function = nn.init.xavier_uniform_,\n lr = 0.01,\n epochs = 1000,\n batch_size = 1000,\n k_folds = 5,):\n # ----------- From here, the hyperparameter search loop starts -------------\n # Normalize data, create tensors and create dataloader\n X = (torch.tensor(X_df.values, dtype=torch.float32) - X_min_normalizer) / (X_max_normalizer - X_min_normalizer)\n Y = torch.tensor(Y_df.values, dtype=torch.float32).view(-1) / Y_normalizer\n dataset = TensorDataset(X, Y)\n\n # We will save all results in logging_data\n logging_data = []\n\n kfold = KFold(n_splits=k_folds, shuffle=True, random_state=42)\n for fold, (train_ids, test_ids) in enumerate(kfold.split(dataset)):\n train_subsampler = torch.utils.data.SubsetRandomSampler(train_ids)\n test_subsampler = torch.utils.data.SubsetRandomSampler(test_ids)\n\n # Define data loaders for training and testing data in this fold\n trainloader = torch.utils.data.DataLoader(\n dataset, \n batch_size=batch_size, sampler=train_subsampler)\n testloader = torch.utils.data.DataLoader(\n dataset,\n batch_size=batch_size, sampler=test_subsampler)\n \n # Set up model and optimizer\n torch.manual_seed(42)\n model = godunovfunctions.NeuralNetwork(\n lin_stack=model_linear_stack,\n bias_init_function=bias_init_function,\n weights_init_function=weights_init_function)\n optimizer = optimizer_function(model.parameters(), lr=lr)\n criterion = criterion_function()\n\n # Run epochs\n starttime = time.time()\n for epoch in tqdm(range(epochs), desc=f\"Epochs in fold {fold}\", ncols=100):\n epoch_starttime = time.time()\n\n # Loop over all training batches\n train_loss_total = 0\n tot_batches = 0\n for x_batch, y_batch in trainloader:\n # Get train loss\n y_batch_pred = model(x_batch)\n loss = criterion(y_batch_pred , y_batch)\n\n # Save that loss\n train_loss_total += loss.item()\n tot_batches += 1\n\n # Take a step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n # Scale loss according to batches\n train_loss_total = train_loss_total / tot_batches\n\n # Loop over all test batches\n test_loss_total = 0\n tot_batches = 0\n for x_batch, y_batch in testloader:\n # Get test loss\n y_batch_pred = model(x_batch)\n loss = criterion(y_batch_pred, y_batch)\n\n # Save that loss\n test_loss_total += loss.item()\n tot_batches += 1\n \n # Scale loss according to batches\n test_loss_total = test_loss_total / tot_batches\n\n # Save the epoch results\n logging_data.append([fold, epoch, train_loss_total, test_loss_total, time.time() - epoch_starttime, time.time() - starttime])\n\n # Save the logging_data in a csv\n setting_data = [\n data_name,\n str(model_linear_stack).replace(\"\\n\", \"\"),\n X_min_normalizer,\n X_max_normalizer,\n Y_normalizer,\n str(criterion_function),\n str(optimizer_function),\n str(bias_init_function),\n str(weights_init_function),\n lr,\n epochs,\n batch_size,\n k_folds\n ]\n\n with open(\"hyperparameter_logs.csv\", \"a\", newline=\"\") as f_object:\n writer_object = csv.writer(f_object)\n for row in logging_data:\n writer_object.writerow(setting_data + row)\n f_object.close()\n\n\npossible_lin_stacks = [\n nn.Sequential(\n nn.Linear(6, 3),\n nn.Softplus(),\n ),\n nn.Sequential(\n nn.Linear(6, 10),\n nn.Softplus(),\n nn.Linear(10, 3),\n nn.Softplus(),\n ),\n nn.Sequential(\n nn.Linear(6, 20),\n nn.Softplus(),\n nn.Linear(20, 10),\n nn.Softplus(),\n nn.Linear(10, 3),\n nn.Softplus(),\n ),\n nn.Sequential(\n nn.Linear(6, 40),\n nn.Softplus(),\n nn.Linear(40, 20),\n nn.Softplus(),\n nn.Linear(20, 10),\n nn.Softplus(),\n nn.Linear(10, 3),\n nn.Softplus(),\n ),\n nn.Sequential(\n nn.Linear(6, 50),\n nn.Softplus(),\n nn.Linear(50, 40),\n nn.Softplus(),\n nn.Linear(40, 20),\n nn.Softplus(),\n nn.Linear(20, 10),\n nn.Softplus(),\n nn.Linear(10, 3),\n nn.Softplus()\n )\n]\n\nsetupcounter = 1\nfor lin_stack in possible_lin_stacks:\n print(f\"\\nTraining and testing setup {setupcounter}\")\n train_and_save(model_linear_stack=lin_stack)\n setupcounter += 1\n\n\n# # Settings\n# model_linear_stack = nn.Sequential(\n# nn.Linear(6, 50),\n# nn.Softplus(),\n# nn.Linear(50, 25),\n# nn.Softplus(),\n# nn.Linear(25, 10),\n# nn.Softplus(),\n# nn.Linear(10, 3),\n# nn.Softplus(),\n# )\n# X_min_normalizer = 0.0\n# X_max_normalizer = 100.0\n# Y_normalizer = 10000.0\n# criterion_function = nn.MSELoss\n# optimizer_function = optim.Adam\n# bias_init_function = nn.init.zeros_\n# weights_init_function = nn.init.xavier_uniform_\n# lr = 0.01\n# epochs = 20\n# batch_size = 1000\n# k_folds = 5\n\n","repo_name":"vossemeijssen/macroscopic_traffic_model","sub_path":"hyperparameter_tuning.py","file_name":"hyperparameter_tuning.py","file_ext":"py","file_size_in_byte":6353,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"33597040517","text":"# coding: utf-8\n\nimport os,sys\nimport copy\nimport argparse\nimport numpy as np\nfrom scipy import stats\nimport Bio.Cluster\nfrom sklearn import metrics\nimport pandas as pd\nfrom ranking import RankingMeasures\n\n# parser settings\nparser = argparse.ArgumentParser()\nparser.add_argument('-test','--test-metric', \\\n action='store', \\\n nargs=None, \\\n const=None, \\\n default='cor', \\\n type=str, \\\n choices=None, \\\n help='Test metric option which you\\'d like to set.', \\\n metavar=None)\nparser.add_argument('-k','--top-k', \\\n action='store', \\\n nargs=None, \\\n const=None, \\\n default=10, \\\n type=int, \\\n choices=None, \\\n help='Number of top-k value which you\\'d like to set.', \\\n metavar=None)\nparser.add_argument('-thre','--threshold', \\\n action='store', \\\n nargs=None, \\\n const=None, \\\n default=4, \\\n type=int, \\\n choices=None, \\\n help='Number of threshold value which you\\'d like to set.', \\\n metavar=None)\n\n# config\nNUM_FOLDS = 5\nGROUNDTRUTH = '../dataset/open_peer_review_v3/peer_review/translated_groundtruth.csv'\nPEER_REVIEW = '../dataset/open_peer_review_v3/peer_review/peer_review_forPG3.csv'\n\n# load true ability\ngDF = pd.read_csv(GROUNDTRUTH)\ntrue_ability = gDF['grade'].get_values()\n\n# calculate mean grades\nrDF = pd.read_csv(PEER_REVIEW)\nmean_grades = rDF[['receiver_id','value']].groupby(['receiver_id']).mean()['value'].get_values()\nmean_corrected = rDF[['receiver_id','corrected']].groupby(['receiver_id']).mean()['corrected'].get_values()\nmean_diff = rDF[['receiver_id','diff']].groupby(['receiver_id']).mean()['diff'].get_values()\n\n# generate random permutation and fold that index\nnp.random.seed(12345678)\npermu =np.random.permutation(len(true_ability))\nidx_inFold = np.array_split(permu, NUM_FOLDS)\n\n# set metric\ncorrcoef = lambda true,estimated: np.corrcoef(true, estimated)[0,1]\nkendalltau = lambda true,estimated: stats.kendalltau(true, estimated)[0]\nspearmanrho = lambda true,estimated: 1-Bio.Cluster.distancematrix((true,estimated), dist=\"s\")[1][0]\ndef precisionAtK(true,estimated,top_k,threshold):\n top_ranker_ture = np.array((true >= threshold))\n id_top_k = estimated.argsort()[::-1][:top_k]\n TP = top_ranker_ture[id_top_k].sum()\n return TP/float(top_k)\ndef auc(true,estimated,threshold):\n fpr, tpr, thresholds = metrics.roc_curve(true >= threshold, estimated, pos_label=1)\n return metrics.auc(fpr, tpr)\ndef nDCG(true,estimated,top_k):\n rm = RankingMeasures(estimated, true)\n return rm.nDCG(k=top_k)\n\n#argparse\nargs = parser.parse_args()\n# set test metric\nif args.test_metric == 'cor':\n func_metric = corrcoef\nelif args.test_metric == 'ktau':\n func_metric = kendalltau\nelif args.test_metric == 'srho':\n func_metric = spearmanrho\nelif args.test_metric == 'preck':\n top_k = args.top_k\n threshold = args.threshold\n func_metric = lambda true,estimated: precisionAtK(true,estimated,top_k,threshold)\nelif args.test_metric == 'auc':\n threshold = args.threshold\n func_metric = lambda true,estimated: auc(true,estimated,threshold)\nelif args.test_metric == 'ndcg':\n top_k = args.top_k\n func_metric = lambda true,estimated: nDCG(true,estimated,top_k)\nelse:\n print('Error: set test metrics [cor|ktau|srho|preck|auc|ndcg]')\n sys.exit()\n\nstatistic_test = np.empty(0)\nfor loop in xrange(NUM_FOLDS):\n buf_list = copy.copy(idx_inFold)\n idx_train = buf_list.pop(loop)\n idx_test = np.concatenate(buf_list)\n #test\n true_test = true_ability[idx_test]\n mean_grade_test = mean_grades[idx_test]\n corrcoef_test = func_metric(true_test, mean_grade_test)\n statistic_test = np.append(statistic_test,corrcoef_test)\nprint('-------- result mean grade --------')\nprint('mean:{0}, std:{1}'.format(statistic_test.mean(),statistic_test.std()))\n\nstatistic_test = np.empty(0)\nfor loop in xrange(NUM_FOLDS):\n buf_list = copy.copy(idx_inFold)\n idx_train = buf_list.pop(loop)\n idx_test = np.concatenate(buf_list)\n #test\n true_test = true_ability[idx_test]\n mean_corrected_test = mean_corrected[idx_test]\n corrcoef_test = func_metric(true_test, -mean_corrected_test)\n statistic_test = np.append(statistic_test,corrcoef_test)\nprint('-------- result mean corrected --------')\nprint('mean:{0}, std:{1}'.format(statistic_test.mean(),statistic_test.std()))\n\nstatistic_test = np.empty(0)\nfor loop in xrange(NUM_FOLDS):\n buf_list = copy.copy(idx_inFold)\n idx_train = buf_list.pop(loop)\n idx_test = np.concatenate(buf_list)\n #test\n true_test = true_ability[idx_test]\n mean_diff_test = mean_diff[idx_test]\n corrcoef_test = func_metric(true_test, -mean_diff_test)\n statistic_test = np.append(statistic_test,corrcoef_test)\nprint('-------- result mean diff --------')\nprint('mean:{0}, std:{1}'.format(statistic_test.mean(),statistic_test.std()))\n","repo_name":"takerun/PeerCorrection","sub_path":"tools/calculateMeanGrade.py","file_name":"calculateMeanGrade.py","file_ext":"py","file_size_in_byte":4906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"8182178846","text":"import Twitter_creds as TC\nimport twitter\nimport json\n\n\noutfile = open(\"test2.txt\",\"w\", encoding=\"utf-8\")\napi = twitter.Api(consumer_key=TC.CONSUMER_KEY,\n consumer_secret=TC.CONSUMER_SECRET,\n access_token_key=TC.ACCESS_TOKEN,\n access_token_secret=TC.ACCESS_TOKEN_SECRET,\n sleep_on_rate_limit=True)\nhash_tag_list = input(\"Please enter your search terms\\nBe sure to separate them with a comma(,).\\n>>\").split(\",\")\n#results = api.GetSearch(raw_query=\"q=%40twitterapi\")\n#results = str(api.GetSearch(raw_query=\"q=(%EF%82%A7%09%E2%80%9CHIV%E2%80%9D%20OR%20%E2%80%9CHIV%2FAIDS%E2%80%9D%20OR%20%E2%80%9CHIV%20OR%20testing%E2%80%9D%20OR%20%E2%80%9CHIV%20OR%20medication%E2%80%9D%20OR%20%E2%80%9CAIDS%20OR%20test%E2%80%9D%20OR%20%E2%80%9CHIV%20OR%20test%E2%80%9D%20OR%20%E2%80%9CHIV%2B%E2%80%9D%20OR%20%E2%80%9CHIV(%2B)%E2%80%9D%20OR%20%E2%80%9Crapid-HIV%20OR%20test%E2%80%9D%20OR%20%E2%80%9Crapid%20OR%20HIV%20OR%20test%E2%80%9D%20OR%20%E2%80%9Cora-sure%E2%80%9D%20OR%20%E2%80%9Corasure%E2%80%9D%20OR%20%E2%80%9CAcquired%20OR%20Immune%20OR%20Deficiency%20OR%20Syndrome%E2%80%9D%20OR%20%E2%80%9CAcyclovir%E2%80%9D%20OR%20%E2%80%9CADAP%E2%80%9D%20OR%20%E2%80%9CKaposi%27s%20OR%20Sarcoma%E2%80%9D%20OR%20%E2%80%9CThrush%E2%80%9D)&src=typed_query\"))\nfor terms in hash_tag_list:\n results = api.GetSearch(term=terms, count= 100, lang=\"en\")\n #, since = 2020 - 2 - 15, until = 2020 - 2 - 16\n print(results)\n for t in results:\n tweets = [t.AsDict() for t in results]\n print(t)\n #print(t[\"text\"], t[\"lang\"])\n str_result = str(results)\n outfile.write(str_result +\"\\n\")\n\n","repo_name":"chacreton190/Social-Media-Data-Project","sub_path":"Twitter Hist Search.py","file_name":"Twitter Hist Search.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"2039934973","text":"from __future__ import print_function, division\nimport torch\nimport torch.nn as nn\nimport functools\n\ndef print_network(net):\n num_params = 0\n for param in net.parameters():\n num_params += param.numel()\n print(net)\n print('Total number of parameters: %d' % num_params)\n\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n m.weight.data.normal_(0.0, 0.02)\n if hasattr(m.bias, 'data'):\n m.bias.data.fill_(0)\n elif classname.find('BatchNorm2d') != -1:\n m.weight.data.normal_(1.0, 0.02)\n # m.weight.data.fill_(1)\n m.bias.data.fill_(0)\n\ndef get_norm_layer(norm_type='instance'):\n if norm_type == 'batch':\n norm_layer = functools.partial(nn.BatchNorm2d, affine=True)\n elif norm_type == 'instance':\n norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)\n else:\n raise NotImplementedError('normalization layer [%s] is not found' % norm_type)\n return norm_layer\n\n\nclass ModelFusion(nn.Module):\n def __init__(self, config):\n super(ModelFusion, self).__init__()\n self.fc_1 = nn.Linear(config.pred_length * 256, 512)\n self.fc_2 = nn.Linear(512, config.label_size)\n self.relu = nn.ReLU(True)\n self.sig = nn.Sigmoid()\n self.config = config\n self.dis = nn.Linear(512, 1)\n if not config.resume:\n self.fc_1.weight.data.normal_(0, 0.0001)\n self.fc_1.bias.data.zero_()\n\n def forward(self, x):\n x = x.view(-1, self.config.pred_length * 256)\n net = self.fc_1(x)\n net0 = self.relu(net)\n net = self.fc_2(net0)\n # # net0 = self.dropout(net)\n #\n # dis_feature = self.sig(self.dis(net0))\n return net\n\n\nclass discriminator_audio(nn.Module):\n def __init__(self):\n super(discriminator_audio, self).__init__()\n self.fc1 = nn.Linear(256, 256)\n self.fc2 = nn.Linear(256, 1)\n self.relu = nn.ReLU(True)\n self.sig = nn.Sigmoid()\n\n def forward(self, x):\n x = x.view(-1, 256)\n net = self.fc1(x)\n net = self.fc2(self.relu(net))\n dis1 = self.sig(net)\n return dis1\n","repo_name":"Hangz-nju-cuhk/Talking-Face-Generation-DAVS","sub_path":"network/networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","stars":796,"dataset":"github-code","pt":"2"}
+{"seq_id":"34465964408","text":"from sqlalchemy import select\nfrom sqlalchemy import and_\nfrom database.models.Images import Images as Image\nfrom database.models.Images import engine\nfrom database.models.Objects_ import Objects_ as Object_\nfrom database.models.Coordinates import Coordinates\nfrom datetime import datetime\n\n\n\ndef getImageByFilename(filename):\n conn = engine.connect()\n\n selectStmt = select([Image]).where(Image.filename == filename)\n res = conn.execute(selectStmt).fetchone() # можно сделать fetchall и если будет больше одного результата, вернуть фолс\n if res is None:\n raise ValueError(f\"Image {filename} not found on database\")\n return dict(res)\n\n\ndef getAllFilenames():\n conn = engine.connect()\n selectStmt = select([Image.filename])\n res = conn.execute(selectStmt).fetchall()\n stringRes = [i[0] for i in res]\n return stringRes\n\n\ndef getObjects(filename):\n conn = engine.connect()\n selectStmt = select([Object_]).where(and_(Object_.imageId == Image.id, Image.filename == filename))\n objectsInfo = conn.execute(selectStmt).fetchall() # т.к. объектов может быть много\n if objectsInfo is None:\n raise ValueError(f\"Objects not found on database\")\n stringRes = [dict(i) for i in objectsInfo]\n return stringRes\n\n\ndef getImageBetweenDatesFromCamera(cameraId, startDate: datetime, endDate: datetime):\n conn = engine.connect()\n selectStmt = select([Image]).where(and_(Image.numberOfCam == cameraId,\n Image.fixationDatetime >= startDate,\n Image.fixationDatetime <= endDate))\n images = conn.execute(selectStmt).fetchall()\n stringRes = [list(i) for i in images]\n return stringRes\n\n\ndef getCoord(filename):\n conn = engine.connect()\n idImage = select([Image.id]).where(filename == Image.filename)\n coordinates = select([Coordinates.LDx, Coordinates.LDy,\n Coordinates.RUx, Coordinates.RUy])\\\n .where(and_(idImage == Object_.imageId,\n Coordinates.id == Object_.id))\n\n objectsInfo = conn.execute(coordinates).fetchall()\n stringRes = [list(i) for i in objectsInfo]\n return stringRes\n\n\n\n\n\n","repo_name":"Sapfir0/web-premier-eye","sub_path":"application/database/dbAPI.py","file_name":"dbAPI.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"71944598768","text":"from requests_html import HTML\nimport requests\n\n\ndef parse_article_entries(doc):\n\n html = HTML(html=doc)\n\n post_entries = html.find('div.r-ent')\n\n return post_entries\n\ndef fetch(url):\n\n response = requests.get(url)\n\n response = requests.get(url, cookies={'over18': '1'})\n\n return response\n\ndef parse_article_meta(entry):\n\n meta = {'title': entry.find('div.title', first=True).text,\n 'push': entry.find('div.nrec', first=True).text,\n 'date': entry.find('div.date', first=True).text,\n 'author': entry.find('div.author', first=True).text,\n 'link': entry.find('div.title > a', first=True).attrs['href'],\n }\n try:\n meta['author'] = entry.find('div.author', first=True).text\n meta['link'] = entry.find('div.title > a', first=True).attrs['href']\n except AttributeError:\n if '(本文已被刪除)' in meta['title']:\n match_author = re.search('\\[(\\w*)\\]', meta['title'])\n if match_author:\n meta['author'] = match_author.group(1)\n elif re.search('已被\\w*刪除', meta['title']):\n match_author = re.search('\\<(\\w*)\\>', meta['title'])\n if match_author:\n meta['author'] = match_author.group(1)\n return meta\n \n return meta\n\n\nurl = 'https://www.ptt.cc/bbs/movie/index.html'\n\nresp = fetch(url) # step-1\n\npost_entries = parse_article_entries(resp.text) # step-2\n\n\nfor entry in post_entries:\n meta = parse_article_meta(entry)\n print(meta) # result of setp-3\n\n \n","repo_name":"funew4670/pttwebcrawler","sub_path":"pttcrawler.py","file_name":"pttcrawler.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"42507310433","text":"from plivo import exceptions\nfrom tests.base import PlivoResourceTestCase\nfrom tests.decorators import with_response\n\n\nclass LookupTest(PlivoResourceTestCase):\n @with_response(200)\n def test_get(self):\n number = '+14154305555'\n resp = self.client.lookup.get(number)\n self.assertResponseMatches(resp)\n self.assertEqual(self.client.current_request.method, 'GET')\n","repo_name":"plivo/plivo-python","sub_path":"tests/resources/test_lookup.py","file_name":"test_lookup.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"2"}
+{"seq_id":"17550609475","text":"import numpy as np\n\ndef bb_iou(a, b):\n x1 = max(a[0], b[0])\n y1 = max(a[1], b[1])\n x2 = min(a[2], b[2])\n y2 = min(a[3], b[3])\n\n interArea = max(0, x2-x1) * max(0, y2-y1)\n\n if interArea == 0: return 0.0\n\n boxAArea = (a[2]-a[0]) * (a[3]-a[1])\n boxBArea = (b[2]-b[0]) * (b[3]-b[1])\n\n return interArea / float(boxAArea+boxBArea - interArea)\n\n\ndef find_matching_box(box_list, new_box, match_iou):\n best_iou = match_iou\n best_idx = -1\n for i in range(len(box_list)):\n box = box_list[i]\n iou = bb_iou(box[:4], new_box[:4])\n if iou > best_iou:\n best_idx = i\n best_iou = iou\n return best_idx, best_iou\n\ndef get_weighted_avg_box(clustered_boxes):\n confidence = 0\n box = np.zeros(5, dtype=np.float32)\n for b in clustered_boxes:\n # scale by the confidence\n box[:4] += np.array(b[:4]) * b[-1]\n confidence += b[-1]\n\n # avg confidence\n box[-1] = confidence / len(clustered_boxes)\n box[:4] /= confidence\n return box\n","repo_name":"matthewygf/all-for-one","sub_path":"preds/box_utils.py","file_name":"box_utils.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"23051562729","text":"import time\r\nimport requests\r\nimport hashlib\r\nimport hmac\r\n\r\nTICK_INTERVAL = 60 # seconds\r\nAPI_KEY = 'my-api-key'\r\nAPI_SECRET_KEY = b'my-secret-key'\r\n\r\n\r\ndef main():\r\n print('Starting trader bot...')\r\n\r\n while True:\r\n start = time.time()\r\n tick()\r\n end = time.time()\r\n\r\n # Sleep the thread if needed\r\n if end - start < TICK_INTERVAL:\r\n time.sleep(TICK_INTERVAL - (end - start))\r\n\r\n\r\ndef tick():\r\n print('Running routine')\r\n\r\n market_summaries = simple_request('https://bittrex.com/api/v1.1/public/getmarketsummaries')\r\n for summary in market_summaries['result']:\r\n market = summary['MarketName']\r\n day_close = summary['PrevDay']\r\n last = summary['Last']\r\n\r\n if day_close > 0:\r\n percent_chg = ((last / day_close) - 1) * 100\r\n else:\r\n print('day_close zero for ' + market)\r\n\r\n print(market + ' changed ' + str(percent_chg))\r\n\r\n if 40 < percent_chg < 60:\r\n # Fomo strikes! Let's buy some\r\n if has_open_order(market, 'LIMIT_BUY'):\r\n print('Order already opened to buy 5 ' + market)\r\n else:\r\n print('Purchasing 5 units of ' + market + ' for ' + str(format_float(last)))\r\n res = buy_limit(market, 5, last)\r\n print(res)\r\n\r\n if percent_chg < -20:\r\n # Do we have any to sell?\r\n balance_res = get_balance_from_market(market)\r\n current_balance = balance_res['result']['Available']\r\n\r\n if current_balance > 5:\r\n # Ship is sinking, get out!\r\n if has_open_order(market, 'LIMIT_SELL'):\r\n print('Order already opened to sell 5 ' + market)\r\n else:\r\n print('Selling 5 units of ' + market + ' for ' + str(format_float(last)))\r\n res = sell_limit(market, 5, last)\r\n print(res)\r\n else:\r\n print('Not enough ' + market + ' to open a sell order')\r\n\r\n\r\ndef buy_limit(market, quantity, rate):\r\n url = 'https://bittrex.com/api/v1.1/market/buylimit?apikey=' + API_KEY + '&market=' + market + '&quantity=' + str(quantity) + '&rate=' + format_float(rate)\r\n return signed_request(url)\r\n\r\n\r\ndef sell_limit(market, quantity, rate):\r\n url = 'https://bittrex.com/api/v1.1/market/selllimit?apikey=' + API_KEY + '&market=' + market + '&quantity=' + str(quantity) + '&rate=' + format_float(rate)\r\n return signed_request(url)\r\n\r\n\r\ndef get_balance_from_market(market_type):\r\n markets_res = simple_request('https://bittrex.com/api/v1.1/public/getmarkets')\r\n markets = markets_res['result']\r\n for market in markets:\r\n if market['MarketName'] == market_type:\r\n return get_balance(market['MarketCurrency'])\r\n\r\n # Return a fake response of 0 if not found\r\n return {'result': {'Available': 0}}\r\n\r\n\r\ndef get_balance(currency):\r\n url = 'https://bittrex.com/api/v1.1/account/getbalance?apikey=' + API_KEY + '¤cy=' + currency\r\n res = signed_request(url)\r\n\r\n if res['result'] is not None and len(res['result']) > 0:\r\n return res\r\n\r\n # If there are no results, than your balance is 0\r\n return {'result': {'Available': 0}}\r\n\r\n\r\ndef get_open_orders(market):\r\n url = 'https://bittrex.com/api/v1.1/market/getopenorders?apikey=' + API_KEY + '&market=' + market\r\n return signed_request(url)\r\n\r\n\r\ndef has_open_order(market, order_type):\r\n orders_res = get_open_orders(market)\r\n orders = orders_res['result']\r\n\r\n if orders is None or len(orders) == 0:\r\n return False\r\n\r\n # Check all orders for a LIMIT_BUY\r\n for order in orders:\r\n if order['OrderType'] == order_type:\r\n return True\r\n\r\n return False\r\n\r\n\r\ndef signed_request(url):\r\n now = time.time()\r\n url += '&nonce=' + str(now)\r\n signed = hmac.new(API_SECRET_KEY, url.encode('utf-8'), hashlib.sha512).hexdigest()\r\n headers = {'apisign': signed}\r\n r = requests.get(url, headers=headers)\r\n return r.json()\r\n\r\n\r\ndef simple_request(url):\r\n r = requests.get(url)\r\n return r.json()\r\n\r\n\r\ndef format_float(f):\r\n return \"%.8f\" % f\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n","repo_name":"tmstieff/BittrexBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4232,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"2"}
+{"seq_id":"28427543867","text":"honapok = ['januar', 'februar', 'marcius', 'aprilis',]\n #print(honapok)\n#print(','.join(honapok))\n#print(len(honapok))\n#print(honapok[:-1])\n#szo = \"almafa\"\nprint(szo[:-3])\n\ngyumolcsok = []\n\ngyumolcs = None\nwhile gyumolcs !='':\n if gyumolcs != '':\n gyumolcsok.append(gyumolcs)\n\nprint(gyumolcsok)","repo_name":"kizsi2019/22_10D2","sub_path":"Jakab Máté/listak.py","file_name":"listak.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"hu","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"}
+{"seq_id":"16030251066","text":"import requests\nfrom dateutil import parser\nfrom datetime import datetime as DT\nimport binascii\nfrom db import Мemorizer\nfrom app_user import User\nimport time\n\n\nclass Finder():\n def __init__(self, user, token):\n self.token = token\n self.user = user\n self.attribute_list = user.attribute_list\n self.param_dict = self.request_param()\n self.match_index = 0\n\n def request_param(self) -> dict:\n param_dict = {}\n param_dict['age_from'] = (self.age_calc(self.attribute_list['bdate'])) - 5\n param_dict['age_to'] = (self.age_calc(self.attribute_list['bdate'])) + 5\n param_dict['sex'] = 1 if self.attribute_list['sex'] == 2 else 2\n param_dict['city'] = self.attribute_list['city']['id']\n return param_dict\n\n def age_calc(self, date) -> int:\n result = (DT.date(DT.now()) - (DT.date(parser.parse(date)))).days // 365\n return result\n\n def request(self, status) -> list:\n age_request = 9\n respons_list = []\n for i in range(age_request):\n time.sleep(1)\n url = 'https://api.vk.com/method/users.search/'\n params = {'access_token': self.token, 'sort': 1, 'offset': 0, 'count': 1000,\n 'common_count': 0, 'sex': self.param_dict['sex'], 'status': status,\n 'age_from': self.param_dict['age_from'] + i, 'age_to': self.param_dict['age_to'],\n 'city': self.param_dict['city'],\n 'fields': 'interests, about, books, music, connections, people_main, life_main, personal, political',\n 'v': '5.131'}\n respons = requests.get(url, params=params)\n respons_list = respons_list + respons.json()['response']['items']\n return respons_list\n\n def matcher(self, user_profile, candidate_profile) -> bool:\n try:\n self.match_index += self._direct_matching(user_profile['personal']['religion_id'],\n candidate_profile['personal']['religion_id'])\n except:\n self.match_index += 2\n try:\n self.match_index += self._direct_matching(user_profile['personal']['political'],\n candidate_profile['personal']['political'])\n except:\n self.match_index += 2\n try:\n self.match_index += self._direct_matching(user_profile['personal']['life_main'],\n candidate_profile['personal']['life_main'])\n except:\n self.match_index += 2\n try:\n self.match_index += self._not_direct_match(user_profile['personal']['smoking'],\n candidate_profile['personal']['smoking'], 'habits')\n except:\n self.match_index += 2\n try:\n self.match_index += self._not_direct_match(user_profile['personal']['life_main'],\n candidate_profile['personal']['life_main'])\n except:\n self.match_index += 2\n try:\n self.match_index += self.compaire(user_profile['personal']['inspired_by'],\n candidate_profile['personal']['inspired_by'])\n except:\n self.match_index += 2\n try:\n self.match_index += self.lang_macth(user_profile['personal']['langs'],\n candidate_profile['personal']['langs'])\n except:\n self.match_index += 2\n try:\n self.match_index += self.compaire(user_profile['about'], candidate_profile['about'])\n except:\n self.match_index += 2\n try:\n self.match_index += self.compaire(user_profile['interests'], candidate_profile['interests'])\n except:\n self.match_index += 2\n try:\n self.match_index += self.compaire(user_profile['books'], candidate_profile['books'])\n except:\n self.match_index += 2\n\n if self.match_index >= 190:\n return True\n else:\n return False\n\n def lang_macth(self, value1, value2) -> int:\n result = 0\n for i in value1:\n if i in value2:\n result += 3\n return result\n\n def canonize_text(self, value) -> list:\n symbols = '.,!?:;-\\n\\r()'\n words = (u'это', u'как', u'так',\n u'и', u'в', u'над',\n u'к', u'до', u'не',\n u'на', u'но', u'за',\n u'то', u'с', u'ли',\n u'а', u'во', u'от',\n u'со', u'для', u'о',\n u'же', u'ну', u'вы',\n u'бы', u'что', u'кто',\n u'он', u'она')\n return ([x for x in [y.strip(symbols) for y in value.lower().split()] if x and (x not in words)])\n\n def shingle(self, value) -> list:\n shingleLen = 3\n out = []\n for i in range(len(value) - (shingleLen - 1)):\n out.append(binascii.crc32(' '.join([x for x in value[i:i + shingleLen]]).encode('utf-8')))\n\n return out\n\n def compaire(self, value1, value2) -> int:\n same = 0\n for i in range(len(value1)):\n if value1[i] in value2:\n same += 1\n\n result = int(same * 2 / float(len(value1) + len(value2)) * 10)\n if result >= 9:\n return 9\n elif result >= 7:\n return 5\n elif result > 5:\n return 3\n else:\n return 0\n\n def _direct_matching(self, value1, value2) -> int:\n if value1 == value2:\n return 9\n else:\n return 0\n\n def _not_direct_match(self, value1, value2, flag='') -> int:\n m_list = ['56', '65', '82', '28']\n if flag == 'habits':\n if abs(value1 - value2) <= 1:\n return 9\n if abs(value1 - value2) <= 3:\n return 5\n else:\n return 0\n else:\n if value1 == value2:\n return 9\n elif str(value1) + str(value2) in m_list:\n return 5\n else:\n return 0\n\n def check_common_subscriptions(self, item) -> None:\n try:\n candidate = User(item['id'])\n for group in candidate.group_list():\n if group in self.user.group_list():\n self.match_index += 2\n for friend in candidate.friends_list:\n if friend in self.user.friends_list:\n self.match_index += 9\n except:\n self.match_index += 0\n\n def sorter(self) -> dict:\n search_list = []\n memorizer = Мemorizer()\n req_list = self.request(6) + self.request(1) + self.request(5)\n for item in req_list:\n if self.matcher(self.attribute_list, item):\n self.check_common_subscriptions(item)\n if self.match_index >= 210:\n if memorizer.find_previos_value(item) == []:\n search_list.append(item)\n self.match_index = 0\n else:\n continue\n return search_list\n\n\n\n","repo_name":"GusevADresume/Course_Work_2","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":7338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"26181280712","text":"from typing import List\n\ndef puzzle(arr: List) -> int:\n # count number of times depth increases from one group of three measurements to the next\n group_sums = [sum(arr[i:i+3]) for i in range(0, len(arr))]\n print(group_sums)\n increases = [int(group_sums[i] > group_sums[i-1]) for i in range(1, len(group_sums))]\n return sum(increases)\n\nwith open('report.txt') as f:\n data = [int(line) for line in f.readlines()]\n\n# data = [199, 200, 208, 210, 200, 207, 240, 269, 260, 263]\n\nprint(puzzle(data))\n","repo_name":"benrosenberg/advent-of-code-2021","sub_path":"1/1b.py","file_name":"1b.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"41087921093","text":"import collections\ninventory = collections.OrderedDict()\nN = int(input())\nfor i in range(0, N):\n input_line = input()\n item_name, price = input_line.split()[:-1], int(input_line.split()[-1])\n if str(' '.join(item_name)) in inventory:\n inventory[' '.join(item_name)] += price\n else:\n inventory[' '.join(item_name)] = price\n \n \nfor key in inventory:\n print(key, inventory[key])\n ","repo_name":"raleighlittles/Python-HackerRank","sub_path":"Collections/Collections Ordered Dict.py","file_name":"Collections Ordered Dict.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"2"}
+{"seq_id":"33871541661","text":"\"\"\"\nAuthor: William Rhodes Lacy, lacyw@purdue.edu\nAssignment: 03.2 - Sum Average\nDate: 02/21/2022\n\nDescription:\n This program receives some number of inputs and tells the user their sum and average as well as the number of numbers input\n\nContributors:\n William Rhodes Lacy, lacyw@purdue.edu\n\nMy contributor(s) helped me:\n [ ] understand the assignment expectations without\n telling me how they will approach it.\n [ ] understand different ways to think about a solution\n without helping me plan my solution.\n [ ] think through the meaning of a specific error or\n bug present in my code without looking at my code.\n Note that if you helped somebody else with their code, you\n have to list that person as a contributor.\n\nAcademic Integrity Statement:\n I have not used source code obtained from any unauthorized\n source, either modified or unmodified; nor have I provided\n another student access to my code. The project I am\n submitting is my own original work.\n\"\"\"\n\n\"\"\"Import additional modules below this line (starting with unit 6).\"\"\"\n\n\n\"\"\"Write new functions below this line (starting with unit 4).\"\"\"\n\n\ndef main():\n\t\n\t#Initialize some variables\n\tnums = [];\n\tnumnums = 0;\n\tinNum = 0;\n\tsum = 0;\n\t\n\t#Prompt user until they tell us to stop\n\twhile inNum >= 0:\n\t\tinNum = float(input(\"Enter a non-negative number (negative to quit): \"));\n\t\t#Break statment to correct and off-by-one error\n\t\tif(inNum < 0):\n\t\t\tbreak;\n\t\tnums = [nums, inNum];\n\t\tnumnums += 1;\n\t\tsum += inNum;\n\t\n\t\n\t\n\t#Conditional for if the user input no numbers\n\tif(numnums == 0):\n\t\tprint(\" You didn't enter any numbers.\");\n\telse:\n\t\t#Compute Average\n\t\tavg = sum / numnums;\n\t\tprint(\" You entered {:.0f} numbers.\".format(numnums));\n\t\tprint(\" Their sum is {:.3f} and their average is {:.3f}.\".format(sum, avg));\n\t\n\n\n\n\"\"\"Do not change anything below this line.\"\"\"\nif __name__ == \"__main__\":\n main()\n","repo_name":"KeenCanteen/Purdue_EBEC101_Lacy","sub_path":"Chapter03/sum_average_lacyw.py","file_name":"sum_average_lacyw.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"7702113198","text":"from utils import *\nfrom tqdm import tqdm\nfrom urllib3.exceptions import ProtocolError, NewConnectionError\nfrom selenium.common.exceptions import InvalidArgumentException\n\nif __name__ == \"__main__\":\n with open('non-images.txt', 'r') as non_img_file:\n urls = [u for u in non_img_file.read().split('\\n') if u]\n non_img_file.close()\n\n log, psw = (input('Login? '), input('Password? '))\n\n driver = start_driver(log, psw)\n\n towrite = open('for_manual_download', 'a')\n\n actual_non_images = []\n\n for url in tqdm(urls):\n try:\n link = links_exist(driver, url)\n if link:\n towrite.write(f'{link}\\n')\n except (ConnectionRefusedError, ProtocolError, NewConnectionError):\n restart_driver(log, psw)\n link = links_exist(driver, url)\n if link:\n towrite.write(f'{link}\\n')\n except (AttributeError, InvalidArgumentException):\n towrite.write(f'{url}\\n')\n\n driver.quit()\n towrite.close()\n","repo_name":"Pythonimous/python-miscellaneous","sub_path":"reddit_imgs_selenium/removed_posts_handler.py","file_name":"removed_posts_handler.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"1545625871","text":"from fastapi import APIRouter, Depends, HTTPException, status, Query, Path, BackgroundTasks\n\nfrom typing import Optional, Any\nfrom app import schemas, models, database, oauth2\nfrom sqlalchemy.orm import Session\nfrom ..controllers import crud_analytic\n\nrouter = APIRouter(\n prefix=\"/analytics\",\n tags=[\"Analytics\"],\n # dependencies=[Depends(get_token_header)],\n responses={404: {\"description\": \"Analytic data not found\"}},\n)\n\n\n@router.get(\"/\", tags=[])\ndef index(\n db: Session = Depends(database.get_db),\n skip: int = Query(0, description=\"Apply offset to the query\"),\n limit: int = Query(\n 10, description=\"Set a limit of data retrieved\"),\n):\n \"\"\"\n Retrieve product analytics.\n \"\"\"\n return crud_analytic.index(db, skip=skip, limit=limit)\n","repo_name":"JulioJair/catalog-fastapi","sub_path":"backend/app/app/routers/analytics.py","file_name":"analytics.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"24731986804","text":"from xpcom import components, ServerException\n\nfrom koLanguageKeywordBase import KoLanguageKeywordBase\nfrom koLanguageServiceBase import KoLexerLanguageService, FastCharData\n\nsci_constants = components.interfaces.ISciMoz\n\nclass koPascalLanguage(KoLanguageKeywordBase):\n name = \"Pascal\"\n _reg_desc_ = \"%s Language\" % name\n _reg_contractid_ = \"@activestate.com/koLanguage?language=%s;1\" \\\n % (name)\n _reg_clsid_ = \"{8AE35E4C-0EC9-49f2-A534-8FEAB91D261D}\"\n _reg_categories_ = [(\"komodo-language\", name)]\n\n defaultExtension = \".pas\"\n commentDelimiterInfo = {\n # See the following for info on Pascal comments:\n # http://www.math.uni-hamburg.de/it/software/fpk/ref/node7.html\n #XXX The line Pascal comment is a Delphi only thing, so I am leaving\n # it out for now.\n #\"line\": [ \"//\" ],\n \"block\": [ (\"{\", \"}\"),\n (\"(*\", \"*)\") ],\n \"markup\": \"*\",\n }\n supportsSmartIndent = \"keyword\"\n _indenting_statements = ['begin', 'record', 'repeat', 'case', ]\n _dedenting_statements = ['goto', 'halt', ]\n _keyword_dedenting_keywords = ['end', 'until', ]\n\n _stateMap = {\n 'default': ('SCE_PAS_DEFAULT',),\n 'keywords': ('SCE_PAS_WORD', 'SCE_PAS_ASM'),\n 'comments': ('SCE_PAS_COMMENT', 'SCE_PAS_COMMENT2', 'SCE_PAS_COMMENTLINE',),\n 'identifiers': ('SCE_PAS_IDENTIFIER',),\n 'preprocessor': ('SCE_PAS_PREPROCESSOR', 'SCE_PAS_PREPROCESSOR2'),\n 'numbers': ('SCE_PAS_NUMBER', 'SCE_PAS_HEXNUMBER',),\n 'strings': ('SCE_PAS_STRING', 'SCE_PAS_STRINGEOL', 'SCE_PAS_CHARACTER',),\n }\n\n sample = \"\"\"\nprogram MyProg(input, output)\n(* Warning: this program might not compile. *)\n begin\n { that's because there's no\n Pascal compiler on this machine.\n }\n var myVar:integer;\n myVar := 5;\n if (myVar > 3) begin\n writeln(\"Pascal is fun!!!!\")\n end\n end\nend.\n\"\"\" \n def __init__(self):\n KoLanguageKeywordBase.__init__(self)\n self._style_info.update(\n _block_comment_styles = [sci_constants.SCE_PAS_COMMENT,\n sci_constants.SCE_PAS_COMMENT2,\n sci_constants.SCE_PAS_COMMENTLINE],\n _indent_styles = [sci_constants.SCE_PAS_OPERATOR],\n _variable_styles = [sci_constants.SCE_PAS_IDENTIFIER],\n _lineup_close_styles = [sci_constants.SCE_PAS_OPERATOR],\n _lineup_styles = [sci_constants.SCE_PAS_OPERATOR],\n _keyword_styles = [sci_constants.SCE_PAS_WORD],\n _default_styles = [sci_constants.SCE_PAS_DEFAULT],\n _ignorable_styles = [sci_constants.SCE_PAS_COMMENT,\n sci_constants.SCE_PAS_COMMENT2,\n sci_constants.SCE_PAS_COMMENTLINE,\n sci_constants.SCE_PAS_NUMBER],\n )\n self._fastCharData = \\\n FastCharData(trigger_char=\";\",\n style_list=(sci_constants.SCE_PAS_OPERATOR, ),\n skippable_chars_by_style={ sci_constants.SCE_PAS_OPERATOR : \"])\" },\n for_check=True)\n\n def get_lexer(self):\n if self._lexer is None:\n self._lexer = KoLexerLanguageService()\n self._lexer.setLexer(components.interfaces.ISciMoz.SCLEX_PASCAL)\n self._lexer.setKeywords(0, self._keywords)\n self._lexer.setKeywords(1, self._keywords2)\n self._lexer.supportsFolding = 1\n return self._lexer\n\n _keywords = \"\"\"and array asm begin case cdecl class const constructor default \ndestructor div do downto else end end. except exit exports external far file \nfinalization finally for function goto if implementation in index inherited \ninitialization inline interface label library message mod near nil not \nobject of on or out overload override packed pascal private procedure program \nproperty protected public published raise read record register repeat resourcestring \nsafecall set shl shr stdcall stored string then threadvar to try type unit \nuntil uses var virtual while with write writeln xor\"\"\".split()\n\n _keywords2 = \"\"\"write read default public protected private property\n published stored\"\"\".split()\n\n","repo_name":"Komodo/KomodoEdit","sub_path":"src/languages/koPascalLanguage.py","file_name":"koPascalLanguage.py","file_ext":"py","file_size_in_byte":4291,"program_lang":"python","lang":"en","doc_type":"code","stars":2110,"dataset":"github-code","pt":"2"}
+{"seq_id":"28551028226","text":"from numbers import Number\nfrom typing import Tuple, Optional, Generic, List\n\n\nclass Node(object):\n def __init__(self, key: Number, val: Generic) -> None:\n self.key = key\n self.val = val\n\n\nclass BinarySearchTree(object):\n def __init__(self, list_of_values: List[Tuple]) -> None:\n list_of_values.sort(key=lambda x: x[0])\n self.data = list(map(lambda x: Node(*x), list_of_values))\n self.n = len(self.data)\n\n def search(self, target: Number) -> Optional[Node]:\n left, right = 0, self.n - 1\n while left < right:\n mid = (left + right) // 2\n current_node = self.data[mid]\n if current_node.key == target:\n return current_node\n elif current_node.key < target:\n left = mid + 1\n else:\n right = mid - 1\n return None\n\n def search_closest(self, target: Number) -> Node:\n left, right = 0, self.n - 1\n if target >= self.data[right].key:\n return self.data[right]\n\n if target <= self.data[left].key:\n return self.data[left]\n\n while left < right:\n mid = (left + right) // 2\n current_node = self.data[mid]\n\n if current_node.key == target:\n return current_node\n\n if target < current_node.key:\n if mid > 0 and target > self.data[mid - 1].key:\n return self._get_closest(current_node, self.data[mid - 1], target)\n right = mid\n\n else:\n if mid < self.n - 1 and target < self.data[mid + 1].key:\n return self._get_closest(current_node, self.data[mid + 1], target)\n\n left = mid\n\n return current_node\n\n @staticmethod\n def _get_closest(x: Node, y: Node, target: Number):\n if abs(x.key - target) <= abs(y.key - target):\n return x\n return y\n\n\nclass UnionFind(object):\n def __init__(self, n: int) -> None:\n self.items = list(range(n))\n self.sizes = [1 for _ in range(n)]\n self.n_nodes = n\n self.n_connected_sets = n\n\n def find(self, p: int) -> int:\n root = self.items[p]\n while root != self.items[root]:\n self.items[root] = self.items[self.items[root]]\n root = self.items[root]\n self.items[p] = root\n return root\n\n def connected(self, p: int, q: int) -> bool:\n return p == q or self.find(p) == self.find(q)\n\n def union(self, p: int, q: int) -> None:\n if self.connected(p, q):\n return None\n\n p_root = self.find(p)\n q_root = self.find(q)\n\n smaller, larger = sorted([p_root, q_root], key=lambda z: self.sizes[z])\n self.items[smaller] = larger\n self.sizes[larger] += self.sizes[smaller]\n\n self.n_connected_sets -= 1\n\n","repo_name":"jiduque/rosalind","sub_path":"rosalind/_data_structures.py","file_name":"_data_structures.py","file_ext":"py","file_size_in_byte":2861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"19397679148","text":"from typing import Any, Callable, Dict, List, Tuple\nimport numpy as np\n\nimport torch\nfrom torch import Tensor\nimport torchmetrics\nimport torch.nn as nn\nfrom torch.nn import functional as F, Module\nfrom transformers import BertModel, AdamW\n\nfrom pytorch_lightning import LightningModule\n\nfrom utilities import utils\n\n\nAMLBatch = Tuple[Dict[str, Tensor], Tensor]\n\n\nclass LongTextClassifier(LightningModule):\n \"\"\"\n RoBERT model for AML article classification\n \"\"\"\n\n def __init__(\n self, \n num_classes: int,\n config_path: str,\n num_epochs_freeze_pretrained: int,\n ):\n super().__init__()\n self.save_hyperparameters()\n self.num_classes = num_classes\n \n self.num_epochs_freeze_pretrained = num_epochs_freeze_pretrained\n self.config = utils.load_config(config_path)\n self.dropout_rate = self.config.DROPOUT_RATE\n self.pretrained_weights_frozen = False\n\n # define the model architecture:\n self._setup_feature_extractor()\n self._setup_aggregating_network()\n self._setup_predictor()\n\n # define evaluation metrics:\n self.accuracy = torchmetrics.Accuracy(num_classes=self.num_classes).to(self.device)\n\n def _setup_feature_extractor(self):\n \"\"\"\n Defines the base feature extraction model\n \"\"\"\n self.feature_extractor = BertModel.from_pretrained(self.config.BERT_MODEL)\n\n def _setup_aggregating_network(self):\n \"\"\"\n Defines the feature collating network\n \"\"\"\n self.aggregating_network = nn.LSTM(\n self.config.BERT_MODEL_OUTPUT_SIZE, \n self.config.AGGREGATING_NETWORK_OUTPUT_SIZE,\n num_layers=1,\n bidirectional=False,\n )\n\n def _setup_predictor(self):\n \"\"\"\n Defines the model predictor\n \"\"\"\n self.predictor = nn.Sequential(\n nn.Dropout(p=self.dropout_rate),\n nn.Linear(\n self.config.AGGREGATING_NETWORK_OUTPUT_SIZE, 30\n ),\n nn.ReLU(),\n nn.Linear(30, self.num_classes)\n ) \n\n def forward(self, article_batch):\n \"\"\"\n Defines the model forward pass\n \"\"\"\n output_embeddings = []\n # TODO: refactor to allow batch processing:\n article_part_features = self.feature_extractor(\n article_batch['input_ids'].squeeze(0),\n attention_mask=article_batch['attention_mask'].squeeze(0),\n token_type_ids=article_batch['token_type_ids'].squeeze(0)\n )\n output_embeddings = article_part_features['pooler_output'].unsqueeze(0)\n aggregated_outputs, _ = self.aggregating_network(output_embeddings)\n article_idxs = np.arange(len(aggregated_outputs))\n last_time_step_idx = article_batch['num_splits'] - 1\n last_time_step = aggregated_outputs[article_idxs, last_time_step_idx]\n return self.predictor(last_time_step)\n\n def _loss_step(\n self, \n batch: AMLBatch, \n eval: bool, \n criterion: Module = F.cross_entropy\n ) -> Tensor:\n \"\"\"\n Definition of a standard loss step\n \"\"\"\n tokens, labels = batch\n logits = self(tokens)\n loss = criterion(logits, labels)\n if eval:\n self.accuracy.update(logits, labels)\n return loss\n\n def training_step(self, batch: AMLBatch, batch_idx: int) -> Tensor:\n loss = self._loss_step(batch, eval=False)\n self.log('train/loss', loss)\n return loss\n \n def validation_step(self, batch: AMLBatch, batch_idx: int) -> Tensor:\n loss = self._loss_step(batch, eval=True)\n self.log('val/loss', loss)\n\n def validation_epoch_end(self, outputs) -> None:\n accuracy = self.accuracy.compute()\n self.log('val/accuracy', accuracy)\n print('val/accuracy', accuracy.item())\n self.accuracy.reset()\n\n @property\n def optimizer(self) -> Callable:\n \"\"\"\n Returns the configured optimizer\n \"\"\"\n optimizer = self.config.OPTIMIZER\n if optimizer == 'Adam':\n return torch.optim.Adam\n if optimizer == 'AdamW':\n return AdamW\n if optimizer == 'SGD':\n return torch.optim.SGD\n raise NotImplementedError\n\n def configure_optimizers(self):\n parameter_groups = [\n {'params': self.feature_extractor.parameters(), 'weight_decay': float(self.config.FEATURE_EXTRACTOR_WEIGHT_DECAY)},\n {'params': self.aggregating_network.parameters(), 'weight_decay': float(self.config.AGGREGATING_NETWORK_WEIGHT_DECAY)},\n {'params': self.predictor.parameters(), 'weight_decay': float(self.config.PREDICTOR_WEIGHT_DECAY)}\n ]\n return self.optimizer(parameter_groups, lr=float(self.config.LEARNING_RATE))\n\n def predict(self, tokens: Dict[str, Tensor]) -> List[Tuple[str, float]]:\n \"\"\"\n Returns prediction results on a given batch\n \"\"\"\n self.eval()\n logits = self(tokens)\n probabilities = F.softmax(logits)\n predicted_classes = probabilities.argmax(dim=1)\n output_mapping = utils.invert_dictionary(self.config.CLASS_MAPPING)\n output = [\n (\n output_mapping[predicted_class.item()], \n probabilities[idx][predicted_class.item()].item()\n )\n for idx, predicted_class in enumerate(predicted_classes)\n ]\n return output\n","repo_name":"TomaszKaleczyc/AML_news_detection","sub_path":"src/model/long_text_classifier.py","file_name":"long_text_classifier.py","file_ext":"py","file_size_in_byte":5536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"22524185363","text":"import math\n\nclass Hamming(object):\n def __init__(self, trama) -> None:\n self.trama = trama\n self.rotate_trama()\n \n def rotate_trama(self):\n self.trama = self.trama[::-1]\n self.trama = [int(c) for c in self.trama]\n\n def rotate(self, trama):\n return trama[::-1]\n\n def find_r(self):\n return math.floor(math.log2(len(self.trama))) + 1\n \n def get_positions_with_1(self, variable, num_variables):\n positions = []\n for i in range(2**num_variables):\n if (i >> variable) & 1:\n positions.append(i)\n return positions\n \n def base_2_to_10(self, num):\n result = 0\n i = 0\n for element in num:\n result += int(element) * 2**i\n i += 1\n return result\n \n def original_message(self):\n r = self.find_r()\n original = []\n for i in range(len(self.trama)):\n if i + 1 not in [2**j for j in range(r)]:\n original.append(self.trama[i])\n return self.rotate(original)\n \n def correct_errors(self):\n r = self.find_r()\n errors = []\n result = \"\"\n for pow in range(r):\n if 2**pow >= len(self.trama):\n break\n positions = self.get_positions_with_1(pow, r)\n ones = 0\n for position in positions:\n if position <= len(self.trama):\n ones += self.trama[position - 1]\n if ones % 2 != 0:\n errors.append(2**pow)\n result += \"1\"\n else:\n result += \"0\"\n \n if errors:\n error_position = self.base_2_to_10(result)\n self.trama[error_position - 1] = 1 if self.trama[error_position - 1] == 0 else 0\n # self.rotate_trama()\n return error_position, \"\".join(str(element) for element in self.rotate(self.trama)), \"\".join(str(element) for element in self.original_message())\n \n # self.rotate_trama()\n return None, \"\".join(str(element) for element in self.rotate(self.trama)), \"\".join(str(element) for element in self.original_message())\n\n\ndef verify_input(trama):\n for element in trama:\n if element != \"0\" and element != \"1\":\n return False\n \n return True\n\nprint(\"Ingrese la trama\")\ntrama = input()\nif not verify_input(trama):\n print(\"La trama es inválida\")\nelse:\n hamming = Hamming(trama)\n error, corrected, original = hamming.correct_errors()\n if error:\n print(\"Hubo errores en la trama en la posicion:\", error)\n print(\"Trama corregida:\", corrected + \".\", \"Trama original:\", trama)\n else:\n print(\"No hubo errores en la trama:\", corrected + \".\", \"Trama original:\", original)","repo_name":"Aristondo01/Lab2_Redes","sub_path":"HammingReceptor.py","file_name":"HammingReceptor.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"43027567290","text":"import os\nimport os.path as osp\nimport tempfile\nimport warnings\nfrom collections import OrderedDict, defaultdict\n\nimport json_tricks as json\nimport numpy as np\nfrom mmcv import deprecated_api_warning\n\nfrom ....core.post_processing import oks_nms, soft_oks_nms\nfrom ...builder import DATASETS\nfrom ..base import Kpt2dSviewRgbVidTopDownDataset\n\ntry:\n from poseval import eval_helpers\n from poseval.evaluateAP import evaluateAP\n has_poseval = True\nexcept (ImportError, ModuleNotFoundError):\n has_poseval = False\n\n\n@DATASETS.register_module()\nclass TopDownPoseTrack18VideoDataset(Kpt2dSviewRgbVidTopDownDataset):\n \"\"\"PoseTrack18 dataset for top-down pose estimation.\n\n \"Posetrack: A benchmark for human pose estimation and tracking\", CVPR'2018.\n More details can be found in the `paper\n `__ .\n\n The dataset loads raw features and apply specified transforms\n to return a dict containing the image tensors and other information.\n\n PoseTrack2018 keypoint indexes::\n\n 0: 'nose',\n 1: 'head_bottom',\n 2: 'head_top',\n 3: 'left_ear',\n 4: 'right_ear',\n 5: 'left_shoulder',\n 6: 'right_shoulder',\n 7: 'left_elbow',\n 8: 'right_elbow',\n 9: 'left_wrist',\n 10: 'right_wrist',\n 11: 'left_hip',\n 12: 'right_hip',\n 13: 'left_knee',\n 14: 'right_knee',\n 15: 'left_ankle',\n 16: 'right_ankle'\n\n Args:\n ann_file (str): Path to the annotation file.\n img_prefix (str): Path to a directory where videos/images are held.\n Default: None.\n data_cfg (dict): config\n pipeline (list[dict | callable]): A sequence of data transforms.\n dataset_info (DatasetInfo): A class containing all dataset info.\n test_mode (bool): Store True when building test or\n validation dataset. Default: False.\n ph_fill_len (int): The length of the placeholder to fill in the\n image filenames, default: 6 in PoseTrack18.\n \"\"\"\n\n def __init__(self,\n ann_file,\n img_prefix,\n data_cfg,\n pipeline,\n dataset_info=None,\n test_mode=False,\n ph_fill_len=6):\n super().__init__(\n ann_file,\n img_prefix,\n data_cfg,\n pipeline,\n dataset_info=dataset_info,\n test_mode=test_mode)\n\n self.use_gt_bbox = data_cfg['use_gt_bbox']\n self.bbox_file = data_cfg['bbox_file']\n self.det_bbox_thr = data_cfg.get('det_bbox_thr', 0.0)\n self.use_nms = data_cfg.get('use_nms', True)\n self.soft_nms = data_cfg['soft_nms']\n self.nms_thr = data_cfg['nms_thr']\n self.oks_thr = data_cfg['oks_thr']\n self.vis_thr = data_cfg['vis_thr']\n self.frame_weight_train = data_cfg['frame_weight_train']\n self.frame_weight_test = data_cfg['frame_weight_test']\n self.frame_weight = self.frame_weight_test \\\n if self.test_mode else self.frame_weight_train\n\n self.ph_fill_len = ph_fill_len\n\n # select the frame indices\n self.frame_index_rand = data_cfg.get('frame_index_rand', True)\n self.frame_index_range = data_cfg.get('frame_index_range', [-2, 2])\n self.num_adj_frames = data_cfg.get('num_adj_frames', 1)\n self.frame_indices_train = data_cfg.get('frame_indices_train', None)\n self.frame_indices_test = data_cfg.get('frame_indices_test',\n [-2, -1, 0, 1, 2])\n\n if self.frame_indices_train is not None:\n self.frame_indices_train.sort()\n self.frame_indices_test.sort()\n\n self.db = self._get_db()\n\n print(f'=> num_images: {self.num_images}')\n print(f'=> load {len(self.db)} samples')\n\n def _get_db(self):\n \"\"\"Load dataset.\"\"\"\n if (not self.test_mode) or self.use_gt_bbox:\n # use ground truth bbox\n gt_db = self._load_coco_keypoint_annotations()\n else:\n # use bbox from detection\n gt_db = self._load_posetrack_person_detection_results()\n return gt_db\n\n def _load_coco_keypoint_annotations(self):\n \"\"\"Ground truth bbox and keypoints.\"\"\"\n gt_db = []\n for img_id in self.img_ids:\n gt_db.extend(self._load_coco_keypoint_annotation_kernel(img_id))\n return gt_db\n\n def _load_coco_keypoint_annotation_kernel(self, img_id):\n \"\"\"load annotation from COCOAPI.\n\n Note:\n bbox:[x1, y1, w, h]\n Args:\n img_id: coco image id\n Returns:\n dict: db entry\n \"\"\"\n img_ann = self.coco.loadImgs(img_id)[0]\n width = img_ann['width']\n height = img_ann['height']\n num_joints = self.ann_info['num_joints']\n\n file_name = img_ann['file_name']\n nframes = int(img_ann['nframes'])\n frame_id = int(img_ann['frame_id'])\n\n ann_ids = self.coco.getAnnIds(imgIds=img_id, iscrowd=False)\n objs = self.coco.loadAnns(ann_ids)\n\n # sanitize bboxes\n valid_objs = []\n for obj in objs:\n if 'bbox' not in obj:\n continue\n x, y, w, h = obj['bbox']\n x1 = max(0, x)\n y1 = max(0, y)\n x2 = min(width - 1, x1 + max(0, w - 1))\n y2 = min(height - 1, y1 + max(0, h - 1))\n if ('area' not in obj or obj['area'] > 0) and x2 > x1 and y2 > y1:\n obj['clean_bbox'] = [x1, y1, x2 - x1, y2 - y1]\n valid_objs.append(obj)\n objs = valid_objs\n\n bbox_id = 0\n rec = []\n for obj in objs:\n if 'keypoints' not in obj:\n continue\n if max(obj['keypoints']) == 0:\n continue\n if 'num_keypoints' in obj and obj['num_keypoints'] == 0:\n continue\n joints_3d = np.zeros((num_joints, 3), dtype=np.float32)\n joints_3d_visible = np.zeros((num_joints, 3), dtype=np.float32)\n\n keypoints = np.array(obj['keypoints']).reshape(-1, 3)\n joints_3d[:, :2] = keypoints[:, :2]\n joints_3d_visible[:, :2] = np.minimum(1, keypoints[:, 2:3])\n\n center, scale = self._xywh2cs(*obj['clean_bbox'][:4])\n\n image_files = []\n cur_image_file = osp.join(self.img_prefix, self.id2name[img_id])\n image_files.append(cur_image_file)\n\n # \"images/val/012834_mpii_test/000000.jpg\" -->> \"000000.jpg\"\n cur_image_name = file_name.split('/')[-1]\n ref_idx = int(cur_image_name.replace('.jpg', ''))\n\n # select the frame indices\n if not self.test_mode and self.frame_indices_train is not None:\n indices = self.frame_indices_train\n elif not self.test_mode and self.frame_index_rand:\n low, high = self.frame_index_range\n indices = np.random.randint(low, high + 1, self.num_adj_frames)\n else:\n indices = self.frame_indices_test\n\n for index in indices:\n if self.test_mode and index == 0:\n continue\n # the supporting frame index\n support_idx = ref_idx + index\n support_idx = np.clip(support_idx, 0, nframes - 1)\n sup_image_file = cur_image_file.replace(\n cur_image_name,\n str(support_idx).zfill(self.ph_fill_len) + '.jpg')\n\n if osp.exists(sup_image_file):\n image_files.append(sup_image_file)\n else:\n warnings.warn(\n f'{sup_image_file} does not exist, '\n f'use {cur_image_file} instead.', UserWarning)\n image_files.append(cur_image_file)\n rec.append({\n 'image_file': image_files,\n 'center': center,\n 'scale': scale,\n 'bbox': obj['clean_bbox'][:4],\n 'rotation': 0,\n 'joints_3d': joints_3d,\n 'joints_3d_visible': joints_3d_visible,\n 'dataset': self.dataset_name,\n 'bbox_score': 1,\n 'bbox_id': bbox_id,\n 'nframes': nframes,\n 'frame_id': frame_id,\n 'frame_weight': self.frame_weight\n })\n bbox_id = bbox_id + 1\n\n return rec\n\n def _load_posetrack_person_detection_results(self):\n \"\"\"Load Posetrack person detection results.\n\n Only in test mode.\n \"\"\"\n num_joints = self.ann_info['num_joints']\n all_boxes = None\n with open(self.bbox_file, 'r') as f:\n all_boxes = json.load(f)\n\n if not all_boxes:\n raise ValueError('=> Load %s fail!' % self.bbox_file)\n\n print(f'=> Total boxes: {len(all_boxes)}')\n\n kpt_db = []\n bbox_id = 0\n for det_res in all_boxes:\n if det_res['category_id'] != 1:\n continue\n\n score = det_res['score']\n if score < self.det_bbox_thr:\n continue\n\n box = det_res['bbox']\n\n # deal with different bbox file formats\n if 'nframes' in det_res and 'frame_id' in det_res:\n nframes = int(det_res['nframes'])\n frame_id = int(det_res['frame_id'])\n elif 'image_name' in det_res:\n img_id = self.name2id[det_res['image_name']]\n img_ann = self.coco.loadImgs(img_id)[0]\n nframes = int(img_ann['nframes'])\n frame_id = int(img_ann['frame_id'])\n else:\n img_id = det_res['image_id']\n img_ann = self.coco.loadImgs(img_id)[0]\n nframes = int(img_ann['nframes'])\n frame_id = int(img_ann['frame_id'])\n\n image_files = []\n if 'image_name' in det_res:\n file_name = det_res['image_name']\n else:\n file_name = self.id2name[det_res['image_id']]\n\n cur_image_file = osp.join(self.img_prefix, file_name)\n image_files.append(cur_image_file)\n\n # \"images/val/012834_mpii_test/000000.jpg\" -->> \"000000.jpg\"\n cur_image_name = file_name.split('/')[-1]\n ref_idx = int(cur_image_name.replace('.jpg', ''))\n\n indices = self.frame_indices_test\n for index in indices:\n if self.test_mode and index == 0:\n continue\n # the supporting frame index\n support_idx = ref_idx + index\n support_idx = np.clip(support_idx, 0, nframes - 1)\n sup_image_file = cur_image_file.replace(\n cur_image_name,\n str(support_idx).zfill(self.ph_fill_len) + '.jpg')\n\n if osp.exists(sup_image_file):\n image_files.append(sup_image_file)\n else:\n warnings.warn(f'{sup_image_file} does not exist, '\n f'use {cur_image_file} instead.')\n image_files.append(cur_image_file)\n\n center, scale = self._xywh2cs(*box[:4])\n joints_3d = np.zeros((num_joints, 3), dtype=np.float32)\n joints_3d_visible = np.ones((num_joints, 3), dtype=np.float32)\n kpt_db.append({\n 'image_file': image_files,\n 'center': center,\n 'scale': scale,\n 'rotation': 0,\n 'bbox': box[:4],\n 'bbox_score': score,\n 'dataset': self.dataset_name,\n 'joints_3d': joints_3d,\n 'joints_3d_visible': joints_3d_visible,\n 'bbox_id': bbox_id,\n 'nframes': nframes,\n 'frame_id': frame_id,\n 'frame_weight': self.frame_weight\n })\n bbox_id = bbox_id + 1\n print(f'=> Total boxes after filter '\n f'low score@{self.det_bbox_thr}: {bbox_id}')\n return kpt_db\n\n @deprecated_api_warning(name_dict=dict(outputs='results'))\n def evaluate(self, results, res_folder=None, metric='mAP', **kwargs):\n \"\"\"Evaluate posetrack keypoint results. The pose prediction results\n will be saved in ``${res_folder}/result_keypoints.json``.\n\n Note:\n - num_keypoints: K\n\n Args:\n results (list[dict]): Testing results containing the following\n items:\n\n - preds (np.ndarray[N,K,3]): The first two dimensions are \\\n coordinates, score is the third dimension of the array.\n - boxes (np.ndarray[N,6]): [center[0], center[1], scale[0], \\\n scale[1],area, score]\n - image_paths (list[str]): For example, ['val/010016_mpii_test\\\n /000024.jpg']\n - heatmap (np.ndarray[N, K, H, W]): model output heatmap.\n - bbox_id (list(int))\n res_folder (str, optional): The folder to save the testing\n results. If not specified, a temp folder will be created.\n Default: None.\n metric (str | list[str]): Metric to be performed. Defaults: 'mAP'.\n\n Returns:\n dict: Evaluation results for evaluation metric.\n \"\"\"\n metrics = metric if isinstance(metric, list) else [metric]\n allowed_metrics = ['mAP']\n for metric in metrics:\n if metric not in allowed_metrics:\n raise KeyError(f'metric {metric} is not supported')\n\n if res_folder is not None:\n tmp_folder = None\n else:\n tmp_folder = tempfile.TemporaryDirectory()\n res_folder = tmp_folder.name\n\n gt_folder = osp.join(\n osp.dirname(self.ann_file),\n osp.splitext(self.ann_file.split('_')[-1])[0])\n\n kpts = defaultdict(list)\n\n for result in results:\n preds = result['preds']\n boxes = result['boxes']\n image_paths = result['image_paths']\n bbox_ids = result['bbox_ids']\n\n batch_size = len(image_paths)\n for i in range(batch_size):\n if not isinstance(image_paths[i], list):\n image_id = self.name2id[image_paths[i]\n [len(self.img_prefix):]]\n else:\n image_id = self.name2id[image_paths[i][0]\n [len(self.img_prefix):]]\n\n kpts[image_id].append({\n 'keypoints': preds[i],\n 'center': boxes[i][0:2],\n 'scale': boxes[i][2:4],\n 'area': boxes[i][4],\n 'score': boxes[i][5],\n 'image_id': image_id,\n 'bbox_id': bbox_ids[i]\n })\n kpts = self._sort_and_unique_bboxes(kpts)\n\n # rescoring and oks nms\n num_joints = self.ann_info['num_joints']\n vis_thr = self.vis_thr\n oks_thr = self.oks_thr\n valid_kpts = defaultdict(list)\n for image_id in kpts.keys():\n img_kpts = kpts[image_id]\n for n_p in img_kpts:\n box_score = n_p['score']\n kpt_score = 0\n valid_num = 0\n for n_jt in range(0, num_joints):\n t_s = n_p['keypoints'][n_jt][2]\n if t_s > vis_thr:\n kpt_score = kpt_score + t_s\n valid_num = valid_num + 1\n if valid_num != 0:\n kpt_score = kpt_score / valid_num\n # rescoring\n n_p['score'] = kpt_score * box_score\n\n if self.use_nms:\n nms = soft_oks_nms if self.soft_nms else oks_nms\n keep = nms(img_kpts, oks_thr, sigmas=self.sigmas)\n valid_kpts[image_id].append(\n [img_kpts[_keep] for _keep in keep])\n else:\n valid_kpts[image_id].append(img_kpts)\n\n self._write_keypoint_results(valid_kpts, gt_folder, res_folder)\n\n info_str = self._do_keypoint_eval(gt_folder, res_folder)\n name_value = OrderedDict(info_str)\n\n if tmp_folder is not None:\n tmp_folder.cleanup()\n\n return name_value\n\n @staticmethod\n def _write_keypoint_results(keypoint_results, gt_folder, pred_folder):\n \"\"\"Write results into a json file.\n\n Args:\n keypoint_results (dict): keypoint results organized by image_id.\n gt_folder (str): Path of directory for official gt files.\n pred_folder (str): Path of directory to save the results.\n \"\"\"\n categories = []\n\n cat = {}\n cat['supercategory'] = 'person'\n cat['id'] = 1\n cat['name'] = 'person'\n cat['keypoints'] = [\n 'nose', 'head_bottom', 'head_top', 'left_ear', 'right_ear',\n 'left_shoulder', 'right_shoulder', 'left_elbow', 'right_elbow',\n 'left_wrist', 'right_wrist', 'left_hip', 'right_hip', 'left_knee',\n 'right_knee', 'left_ankle', 'right_ankle'\n ]\n cat['skeleton'] = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13],\n [6, 12], [7, 13], [6, 7], [6, 8], [7, 9], [8, 10],\n [9, 11], [2, 3], [1, 2], [1, 3], [2, 4], [3, 5],\n [4, 6], [5, 7]]\n categories.append(cat)\n\n json_files = [\n pos for pos in os.listdir(gt_folder) if pos.endswith('.json')\n ]\n for json_file in json_files:\n\n with open(osp.join(gt_folder, json_file), 'r') as f:\n gt = json.load(f)\n\n annotations = []\n images = []\n\n for image in gt['images']:\n im = {}\n im['id'] = image['id']\n im['file_name'] = image['file_name']\n images.append(im)\n\n img_kpts = keypoint_results[im['id']]\n\n if len(img_kpts) == 0:\n continue\n for track_id, img_kpt in enumerate(img_kpts[0]):\n ann = {}\n ann['image_id'] = img_kpt['image_id']\n ann['keypoints'] = np.array(\n img_kpt['keypoints']).reshape(-1).tolist()\n ann['scores'] = np.array(ann['keypoints']).reshape(\n [-1, 3])[:, 2].tolist()\n ann['score'] = float(img_kpt['score'])\n ann['track_id'] = track_id\n annotations.append(ann)\n\n info = {}\n info['images'] = images\n info['categories'] = categories\n info['annotations'] = annotations\n\n with open(osp.join(pred_folder, json_file), 'w') as f:\n json.dump(info, f, sort_keys=True, indent=4)\n\n def _do_keypoint_eval(self, gt_folder, pred_folder):\n \"\"\"Keypoint evaluation using poseval.\"\"\"\n\n if not has_poseval:\n raise ImportError('Please install poseval package for evaluation'\n 'on PoseTrack dataset '\n '(see requirements/optional.txt)')\n\n argv = ['', gt_folder + '/', pred_folder + '/']\n\n print('Loading data')\n gtFramesAll, prFramesAll = eval_helpers.load_data_dir(argv)\n\n print('# gt frames :', len(gtFramesAll))\n print('# pred frames:', len(prFramesAll))\n\n # evaluate per-frame multi-person pose estimation (AP)\n # compute AP\n print('Evaluation of per-frame multi-person pose estimation')\n apAll, _, _ = evaluateAP(gtFramesAll, prFramesAll, None, False, False)\n\n # print AP\n print('Average Precision (AP) metric:')\n eval_helpers.printTable(apAll)\n\n stats = eval_helpers.getCum(apAll)\n\n stats_names = [\n 'Head AP', 'Shou AP', 'Elb AP', 'Wri AP', 'Hip AP', 'Knee AP',\n 'Ankl AP', 'Total AP'\n ]\n\n info_str = list(zip(stats_names, stats))\n\n return info_str\n","repo_name":"ViTAE-Transformer/ViTPose","sub_path":"mmpose/datasets/datasets/top_down/topdown_posetrack18_video_dataset.py","file_name":"topdown_posetrack18_video_dataset.py","file_ext":"py","file_size_in_byte":20232,"program_lang":"python","lang":"en","doc_type":"code","stars":978,"dataset":"github-code","pt":"2"}
+{"seq_id":"6324674494","text":"import numpy as np \nfrom numpy import array as a \nfrom multiprocessing_generator import ParallelGenerator\nfrom random import random as r\nfrom random import gauss as g\nfrom random import shuffle\nimport matplotlib.pyplot as plt\nfrom time import sleep\n\n# with ParallelGenerator(\n# \tself.xygen(wn1a,wn2a),\n# \tmax_lookahead=200) as xyg:\n# \tfor x,y in xyg:\n# \t\tself.model.fit(x,y,epochs=1)\n\nclass agent:\n\tc = a([0,0])\t# Row,Col\n\tE = []\t\t\t# Sensing Matrix\n\tenv = ''\t\t# Environment Generator\n\tf = 0\t\t\t# Fitness Score\n\n\tG = []\t\t\t# Gene Matrix\n\tP = []\t\t\t# Next Step Policy\n\tcfg = ''\n\n\tdef __init__(self,env,cfg):\n\t\tself.c = [int(g(0,10)),int(g(0,10))]\n\t\tself.env = env\n\t\tself.E = env.getSensingMatrix(self.c)\n\t\tself.G = gene(cfg)\n\t\tself.cfg = cfg\n\n\tdef step(self):\n\n\t\tif np.linalg.norm(self.c) < self.cfg.B + 10:\n\n\n\t\t\tself.E = self.env.getSensingMatrix(self.c)\n\n\t\t\tself.P = self.G.A @ self.E @ self.G.B\n\t\t\tfor i in range(len(self.P)):\n\t\t\t\tif self.P[i] == np.array(self.P).max():\n\t\t\t\t\tbreak\n\t\t\t# Up\n\t\t\tif i == 0:\n\t\t\t\tself.c += a([1,0])\n\t\t\t# Down\n\t\t\telif i == 1:\n\t\t\t\tself.c += a([-1,0])\n\t\t\t# Left\n\t\t\telif i == 2:\n\t\t\t\tself.c += a([0,-1])\n\t\t\t# Right\n\t\t\telif i == 3:\n\t\t\t\tself.c += a([0,1])\n\t\t\t# Up Right\n\t\t\telif i == 4:\n\t\t\t\tself.c += a([1,1])\n\t\t\t# Down Right\n\t\t\telif i == 5:\n\t\t\t\tself.c += a([-1,1])\n\t\t\t# Down Left\n\t\t\telif i == 6:\t\n\t\t\t\tself.c += a([-1,-1])\n\t\t\t# Up Left\n\t\t\telif i == 7:\n\t\t\t\tself.c += a([1,-1])\n\n\t\t\tself.f += self.env.consume(self.c)\n\nclass genePool:\n\n\tP = []\t# Gene Pool [N_A,Dir,Vision]\n\n\tdef getShuffleReducePool(self,agents):\n\t\ttmp = []\n\n\t\tfs = []\n\t\tfor agent in agents:\n\t\t\tfs.append(agent.f)\n\t\tidx = sorted(range(len(fs)), key=lambda k: fs[k])\n\t\tidx = idx[int(len(idx)/2):]\n\n\n\t\tfor i in idx:\n\t\t\ttmp.append(agents[i].G.W)\n\t\ttmp = a(tmp)\n\t\tfor i in range(tmp.shape[1]):\n\t\t\tfor j in range(tmp.shape[2]):\n\t\t\t\tnp.random.shuffle(tmp[:,i,j])\n\n\t\tself.P = tmp\n\n\tdef mutate(self):\n\t\tself.P += np.random.normal(0,1,self.P.shape)\n\n\tdef pltAvg(self):\n\t\tplt.figure('Weights')\n\t\tplt.clf()\n\t\tplt.imshow(self.P.mean(axis=0),aspect='equal')\n\t\tplt.show(block=False)\n\t\tplt.pause(.001)\n\n\tdef reproduce(self,agents):\n\n\t\tself.getShuffleReducePool(agents)\n\t\tself.mutate()\n\n\t\tfs = []\n\t\tfor agent in agents:\n\t\t\tfs.append(agent.f)\n\t\tidx = sorted(range(len(fs)), key=lambda k: fs[k])\n\t\ttry:\n\t\t\tidx = idx[:int(len(idx)/2)]\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\tprint('Use Even Number of Agents')\n\n\t\tPCTR = 0\n\t\tfor i in range(len(agents)):\n\t\t\tif i in idx:\n\t\t\t\tagents[i].G.W = self.P[PCTR]\n\t\t\t\tagents[i].G.A = agents[i].G.W[:7,:]\n\t\t\t\tagents[i].G.B = agents[i].G.W[8,:].T\n\t\t\t\tPCTR += 1\n\n\t\treturn(agents)\n\nclass gene:\n\tA = []\t# Weight Matrix Vertical\n\tB = []\t# Weight Matrix Horizontal\n\tW = []\t# Weigh Matrix [A B.T].T\n\n\tdef __init__(self,c):\n\t\t# [up down left right UR DR DL UL]\n\n\t\t# P = A@E@B\n\t\t# 8X1 = 8XV VXV VX1\n\t\tself.W = np.random.normal(0,1,[9,c.V])\n\t\tself.A = self.W[:7,:]\n\t\tself.B = self.W[8,:].T\n\nclass environment:\n\n\tmaster = {}\n\tc = ''\n\n\tdef __init__(self,c):\n\t\tself.c = c\n\n\tdef getSensingMatrix(self,coord):\n\t\tE = np.zeros([self.c.V,self.c.V])\n\t\tfor i in range(int(coord[0]-(self.c.V-1)/2),int(coord[0]+(self.c.V-1)/2+1)):\n\t\t\tfor j in range(int(coord[1]-(self.c.V-1)/2),int(coord[1]+(self.c.V-1)/2+1)):\n\t\t\t\tir = int(i - coord[0] + (self.c.V-1)/2)\n\t\t\t\tjr = int(j - coord[1] + (self.c.V-1)/2)\n\t\t\t\tcstr = '['+str(i)+','+str(j)+']'\n\t\t\t\tif cstr not in self.master:\n\t\t\t\t\tif r()*100 <= self.c.S and np.linalg.norm([coord[0],coord[1]]) xma:\n\t\t\t\txma = x \n\t\t\tif x < xm:\n\t\t\t\txm = x\n\t\t\tif y > yma:\n\t\t\t\tyma = y \n\t\t\tif y < ym:\n\t\t\t\tym = y\n\n\t\tgrid = np.zeros([xma-xm+1,yma-ym+1])\n\n\t\tfor key in master:\n\t\t\txr = eval(key)[0] - xm\n\t\t\tyr = eval(key)[1] - ym\n\t\t\tgrid[xr,yr] = master[key]\n\n\t\tfor ag in A:\n\t\t\txr = ag.c[0]-xm \n\t\t\tyr = ag.c[1]-ym \n\t\t\tgrid[xr,yr] = -1\n\n\t\tplt.figure('Environment')\n\t\tplt.clf()\n\t\tplt.imshow(grid,aspect='equal')\n\t\tplt.show(block=False)\n\t\tplt.pause(.001)\n\nclass splice:\n\n\tA = []\n\tc = ''\n\tGP = ''\n\n\tdef __init__(self,c):\n\n\t\tself.c = c\n\t\tE = environment(c)\n\t\tself.A = [agent(E,c) for i in range(c.N_A)]\n\t\tself.GP = genePool()\n\n\tdef sharedEnvironmentTrain(self):\n\n\t\tfor j in range(300000):\n\t\t\t# initialize environment\n\t\t\tself.E = environment(c)\n\t\t\tself.E.master = {}\n\t\t\tfor ag in self.A:\n\t\t\t\tag.env = self.E\n\t\t\t\tag.c = [int(g(0,10)),int(g(0,10))]\n\n\t\t\t# For each agent, take step\n\t\t\tfor i in range(self.c.L):\n\t\t\t\tfor a in self.A:\n\t\t\t\t\ta.step()\n\t\t\t\tself.E.plot(self.A)\n\n\t\t\t# Reproduce\n\t\t\tself.A = self.GP.reproduce(self.A)\n\n\t\t\tself.GP.pltAvg()\n\n\t\t\tavg = 0\n\t\t\tmaxv = 0\n\t\t\tfor ag in self.A:\n\t\t\t\tavg += ag.f\n\t\t\t\tif ag.f > maxv:\n\t\t\t\t\tmaxv = ag.f\n\t\t\t\tag.f = 0\n\n\t\t\tavg /= self.c.N_A\n\n\t\t\tprint('gen'+str(j)+': ')\n\t\t\tprint(avg)\n\t\t\tprint(maxv)\n\n\t\tplt.imshow(ag.G.W,aspect='auto')\n\t\tplt.show(block=False)\n\nclass cfg:\n\n\tN_A = 10\t# Num Agents\n\tS \t= 20\t# Sparsity per 100\n\tV\t= 10\t# Vision / Size of sensing matrix\n\tB\t= 1000\t# Resource Boundary || x,y || > 1000\n\tL \t= 100 # Lifespan\n\n\tdef __init__(self,ID):\n\t\tif ID == 1:\n\t\t\tself.N_A \t= 99\n\t\t\tself.S \t\t= 5\n\t\t\tself.V \t\t= 10\n\t\t\tself.B \t\t= 100\n\n\nif __name__ == '__main__':\n\n\n\tc = cfg(1)\n\t\n\ts = splice(c)\n\ts.sharedEnvironmentTrain()\n\n\tplt.show()\n\n\n\n","repo_name":"griffinjb/splice","sub_path":"splice.py","file_name":"splice.py","file_ext":"py","file_size_in_byte":5545,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"1454015228","text":"import tempfile\nimport base64\nimport os\nimport tarfile\nimport tempfile\nfrom typing import List, Union\n\nfrom app.db_models.jobcontainer import db_JobContainer\nfrom app.db_models.mljob import db_MLJob\nfrom app.db_models.model import db_Model\nfrom app.providers.model import ModelProvider\nfrom flask import send_file\n\nfrom . import frontend\n\n\n@frontend.route('/session//model', methods=['GET'])\ndef get_model(session_id):\n \"\"\" Returns the last used model of the session\"\"\"\n # first get the most recent model\n model_id = db_Model.query.\\\n join(db_MLJob).\\\n filter(db_Model.job_id == db_MLJob.uid).\\\n join(db_JobContainer).\\\n filter(db_MLJob.container_id == db_JobContainer.uid).\\\n filter(db_JobContainer.session_id == session_id).\\\n order_by(db_Model.edited.desc()).first().uid\n\n MP = ModelProvider()\n model = MP.get(model_id)\n\n # split the binary and wrap both models(b64 encoded and \n # separated using a ',')\n ner, nel = model['binary'].split(',', 1)\n\n ner_wrapper = B64TarWrapper(\"ner wrapper\", ner)\n nel_wrapper = B64TarWrapper(\"nel wrapper\", nel)\n\n try:\n os.mkdir(\"./tmp\")\n os.mkdir(\"./tmp/ner\")\n os.mkdir(\"./tmp/nel\")\n except FileExistsError:\n pass\n\n ner_dir = ner_wrapper.get_extract(\"./tmp/ner\")\n nel_dir = nel_wrapper.get_extract(\"./tmp/nel\")\n\n with open(\"/tmp/archive.tar.gz\", \"w\") as f:\n f.write(\"\")\n\n with tarfile.open(\"/tmp/archive.tar.gz\", mode='w:gz') as tar:\n tar.add(ner_dir, arcname=\"ner\")\n tar.add(nel_dir, arcname=\"nel\")\n\n try:\n return send_file(\"/tmp/archive.tar.gz\", as_attachment=True, attachment_filename='models.tar.gz')\n except FileNotFoundError:\n abort(404)\n\n\nclass B64TarWrapper:\n\n def __init__(self, identifier: str, tar_base64: str):\n self.identifier: str = identifier\n self.tar_base64: str = tar_base64\n\n @classmethod\n def from_files(cls, paths: List[str], identifier: Union[str, None] = None):\n tar_file = tempfile.mktemp()\n with tarfile.open(tar_file, 'x') as tar:\n for path in paths:\n tar.add(path, arcname=os.path.basename(path))\n with open(tar_file, 'rb') as f:\n encoded = base64.b64encode(f.read()).decode('utf-8')\n return cls(identifier=identifier, tar_base64=encoded)\n\n def get_extract(self, directory: Union[str, None] = None) -> str:\n \"\"\"\n Extracts the content into a temporary directory and returns the path as string\n :param directory: If given, extraction will take place in this directory\n :return: Path as string\n \"\"\"\n temp_tar_file = tempfile.NamedTemporaryFile()\n with open(temp_tar_file.name, 'wb') as f:\n f.write(base64.b64decode(self.tar_base64))\n\n if directory is None:\n directory = tempfile.mkdtemp()\n\n with tarfile.open(temp_tar_file.name, 'r') as tar:\n for member in tar.getmembers():\n tar.extract(member, directory)\n\n return directory\n\n def get_base64_str(self) -> str:\n return self.tar_base64\n\n","repo_name":"DATEXIS/TrainX","sub_path":"TrainX-Backend/app/routes/frontend_backend/get_model.py","file_name":"get_model.py","file_ext":"py","file_size_in_byte":3162,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"862527270","text":"from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom datadog_api_client.model_utils import (\n ModelNormal,\n cached_property,\n)\n\n\nif TYPE_CHECKING:\n from datadog_api_client.v2.model.opsgenie_service_create_data import OpsgenieServiceCreateData\n\n\nclass OpsgenieServiceCreateRequest(ModelNormal):\n @cached_property\n def openapi_types(_):\n from datadog_api_client.v2.model.opsgenie_service_create_data import OpsgenieServiceCreateData\n\n return {\n \"data\": (OpsgenieServiceCreateData,),\n }\n\n attribute_map = {\n \"data\": \"data\",\n }\n\n def __init__(self_, data: OpsgenieServiceCreateData, **kwargs):\n \"\"\"\n Create request for an Opsgenie service.\n\n :param data: Opsgenie service data for a create request.\n :type data: OpsgenieServiceCreateData\n \"\"\"\n super().__init__(kwargs)\n\n self_.data = data\n","repo_name":"DataDog/datadog-api-client-python","sub_path":"src/datadog_api_client/v2/model/opsgenie_service_create_request.py","file_name":"opsgenie_service_create_request.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"2"}
+{"seq_id":"36579319692","text":"import datetime\nimport pytz\nimport pandas as pd\nimport xlrd\nfrom openpyxl import Workbook\n\ndef timp():\n utc_now = pytz.utc.localize(datetime.datetime.utcnow())\n local_now = utc_now.astimezone(pytz.timezone(\"Europe/Chisinau\"))\n current_time = local_now.strftime(\"%H:%M:%S\")\n return current_time\n#strftime = string format time\n\n\nlista = pd.DataFrame(columns=['Nume','Ora sosirii','Ora plecarii'])\n\n\ndef venit():\n \n for row in range(lista.shape[0],75):\n timp()\n nume = input(\"A venit: \")\n if nume == \"/\":\n menu()\n break\n for i,index in lista.iterrows():\n unique = i\n name = index[\"Nume\"]\n if nume ==name:\n lista.loc[unique,\"Ora plecarii\"] = \"\"\n row +=1\n venit()\n lista.loc[row] = [nume,timp(),\"\"]\n print(lista)\n row +=1\n venit()\n\n\ndef plecat():\n left = input(\"A plecat: \")\n for i,row in lista.iterrows():\n unique = i\n name = row[\"Nume\"]\n if left == name:\n lista.loc[unique,\"Ora plecarii\"] =timp()\n \n #print(str(unique) +\". \"+ name)\n \n\ndef menu():\n print(\"1. A venit cineva nou\")\n print(\"2. A plecat cineva\")\n print(\"3. Vezi lista\")\n print(\"4. Ședința s-a terminat\")\n pick = input(\"Pick one: \")\n if pick == str(1):\n venit()\n elif pick== str(2):\n plecat()\n elif pick == str(3):\n print(lista)\n else:\n print(\"Please pick one of above\")\n menu()\n\n\nmenu()","repo_name":"DanielVorobiov/Prezenta-App","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"30472218073","text":"import socket\nfrom threading import Thread\nimport binascii\nimport struct\nimport sys\n\nsend_size = 1000\nfrm = \"\"\nfor i in range(1, 10):\n frm += \"b \"\nfrm = frm[:-1]\nunpacker = struct.Struct(frm)\n\n\nclass My_Receive_Thread(Thread):\n def __init__(self):\n ''' Constructor. '''\n Thread.__init__(self)\n self.time = 0\n self.change = 1\n\n def run(self):\n self.ss = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.ss.bind(('10.14.1.1', 8001)) # Ip and port of the current node\n self.ss.listen(10)\n pp = 0\n a = \"\"\n while (True):\n self.sc, self.addr = self.ss.accept()\n self.l = self.sc.recv(unpacker.size)\n\n print(sys.stderr, 'received ', binascii.hexlify(self.l))\n\n unpacked_data = unpacker.unpack(self.l)\n print(sys.stderr, 'unpacked:', unpacked_data)\n print(\"zzzz\", unpacked_data)\n\n\nserver = My_Receive_Thread()\nserver.start()\n\n","repo_name":"realtime-rutgers/realtime-home","sub_path":"Orbit/client-server-Aliasghar/bit-server.py","file_name":"bit-server.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"}
+{"seq_id":"861774070","text":"from __future__ import annotations\n\n\nfrom datadog_api_client.model_utils import (\n ModelComposed,\n cached_property,\n)\n\n\nclass IncidentFieldAttributes(ModelComposed):\n def __init__(self, **kwargs):\n \"\"\"\n Dynamic fields for which selections can be made, with field names as keys.\n\n :param type: Type of the single value field definitions.\n :type type: IncidentFieldAttributesSingleValueType, optional\n\n :param value: The single value selected for this field.\n :type value: str, none_type, optional\n \"\"\"\n super().__init__(kwargs)\n\n @cached_property\n def _composed_schemas(_):\n # we need this here to make our import statements work\n # we must store _composed_schemas in here so the code is only run\n # when we invoke this method. If we kept this at the class\n # level we would get an error because the class level\n # code would be run when this module is imported, and these composed\n # classes don't exist yet because their module has not finished\n # loading\n from datadog_api_client.v2.model.incident_field_attributes_single_value import (\n IncidentFieldAttributesSingleValue,\n )\n from datadog_api_client.v2.model.incident_field_attributes_multiple_value import (\n IncidentFieldAttributesMultipleValue,\n )\n\n return {\n \"oneOf\": [\n IncidentFieldAttributesSingleValue,\n IncidentFieldAttributesMultipleValue,\n ],\n }\n","repo_name":"DataDog/datadog-api-client-python","sub_path":"src/datadog_api_client/v2/model/incident_field_attributes.py","file_name":"incident_field_attributes.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"2"}
+{"seq_id":"25121348080","text":"#%%\nimport os\nimport geopandas\n\ngeojson_in = os.path.join(\"..\", \"layers\", \"mental_health.geojson\")\ndata = geopandas.read_file(geojson_in)\noutdir = os.path.dirname(geojson_in)\n\ncol = \"AUDIENCE\"\nval = \"Children and Youth\"\n# %%\n\n\nout = data[getattr(data, col)==val]\nout.to_file(os.path.join(outdir, \"child_youth_mental_health.geojson\"), driver=\"GeoJSON\")\n","repo_name":"bcgov/smk-moh-cymh","sub_path":"scripts/geopandas_subset_on_attribute_value.py","file_name":"geopandas_subset_on_attribute_value.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"24745073575","text":"import pandas as pd\nimport numpy as np\nimport pretty_plots as pp\n\n\nxl_path = \"/mnt/c/Users/Shawn/Google Drive/School/Tennessee/Research/Slides, Sheets and Documents/2019/11/match_3dlim_real.xlsx\"\n\ndf = pd.read_excel(xl_path, sheet_name='Python Sheet')\n\nlw=3\nms=14\nmew=3\n\nfig = pp.pplot(df['ITF X'], df['ITF Y (50)'], fmt='-', lw=lw, logy=True)\nfig = pp.pplot(df['ITF X'], df['ITF Y (5)'], fmt='-', lw=lw*2, fig=fig)\nfig = pp.pplot(df['OTF X'], df['OTF Y'], color=8, fmt='-', lw=lw, fig=fig)\nfig = pp.pplot(df['ITF RBS X'], df['ITF RBS Y'], ms=ms, markeredgewidth=mew, logy=True, fig=fig)\nfig = pp.pplot(df['OTF RBS X'], df['OTF RBS Y'], ms=ms, color=8, xrange=[7,14], markeredgewidth=mew, xlabel='R-Rsep OMP (cm)', ylabel='Deposition (Normalized)', fig=fig)\nfig.axes[0].grid(True, which='major')\n","repo_name":"shawnzamperini/d3dscripts","sub_path":"2019/10/stitch_runs.py","file_name":"stitch_runs.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"32548973391","text":"import math\n\nclass Node:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n\n def add_child(self, data):\n if data == self.data:\n return\n\n if data < self.data:\n if self.left:\n self.left.add_child(data)\n else:\n self.left = Node(data)\n else:\n if self.right:\n self.right.add_child(data)\n else:\n self.right = Node(data)\n\n def search(self, value):\n if self.data == value:\n return True\n elif value < self.data:\n if self.left:\n return self.left.search(value)\n else: \n return False\n elif value > self.data:\n if self.right:\n return self.right.search(value)\n else: \n return False\n\n def maximum(self):\n if not self.right:\n return self.data\n else:\n return self.right.maximum()\n\n def minimum(self):\n if not self.left:\n return self.data\n else:\n return self.left.minimum()\n\nclass Tree:\n def __init__(self, element):\n self.root = Node(element)\n\n def insert(self, num):\n self.root.add_child(num)\n\n def default_print(self, node = None, prefix = '', dashes = '', right = False, indent = ' '):\n if node == None:\n node = self.root\n if not node.left and not node.right:\n return\n if right == True:\n print(indent + prefix + str(node.data), end = '' if node.left else \"\\n\") #add count-prefix on the start\n indent += indent + ' '\n elif right == False:\n print(prefix + str(node.data), end = '' if node.left else \"\\n\")\n\n prefix += '-'\n\n if node.left:\n self.default_print(node.left, prefix, dashes, False, indent)\n if node.right:\n self.default_print(node.right, prefix, dashes, True, indent)\n\n return\n\n def pretty_print(self, node = None, prefix = '', is_left = True):\n if node == None:\n node = self.root\n if not node.left and not node.right:\n return\n if node.right != None:\n new_prefix = '| ' if is_left == True else ' '\n self.pretty_print(node.right, prefix + new_prefix, False)\n\n if is_left == True:\n print(prefix + '└── ' + str(node.data))\n else:\n print(prefix + '┌── ' + str(node.data))\n\n if node.left != None:\n new_prefix = ' ' if is_left == True else '│ '\n self.pretty_print(node.left, prefix + new_prefix, True)\n return\n\n def search_for(self, value):\n print(self.root.search(value))\n\n def maximum(self):\n print(self.root.maximum())\n\n def minimum(self):\n print(self.root.minimum())\n\nclass Forest:\n def __init__(self, size):\n self.trees_array = size*[0.5]\n for i in range(0, size):\n self.trees_array[i] = Tree(0.5 + i)\n\n def default_trees_print(self):\n for i in range(0, len(self.trees_array)):\n self.trees_array[i].default_print()\n\n def pretty_trees(self):\n for i in range(1, len(self.trees_array) + 1):\n self.trees_array[len(self.trees_array) - i].pretty_print()\n \n def insert_base(self, elements):\n for i in range(0, len(elements)):\n index = math.floor(elements[i])\n self.trees_array[index].insert(elements[i])\n\n def insert(self, value):\n index = math.floor(value)\n self.trees_array[index].insert(value) \n\n def maximum(self, index):\n return self.trees_array[index].maximum()\n\n def minimum(self, index):\n return self.trees_array[index].minimum()\n\n def search(self, value):\n index = math.floor(value)\n self.trees_array[index].search_for(value)\n\nsherwood = Forest(10)\n\nnumbers = [1.3, 1.6, 3.7, 4.0, 4.99, 7.3, 7.8, 7.7, 7.9, 7.6, 9.3]\nsherwood.insert_base(numbers)\nsherwood.pretty_trees()\nsherwood.default_trees_print()","repo_name":"BartCzech/ASD_Lab","sub_path":"forests.py","file_name":"forests.py","file_ext":"py","file_size_in_byte":4142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"27723971679","text":"import tkinter as tk\nfrom tkinter import ttk\n\n\nclass MainWindow(tk.Frame):\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n self.grid(column=0, row=0, sticky=(tk.W, tk.W, tk.E, tk.S))\n self.columnconfigure(0, weight=1)\n self.rowconfigure(0, weight=1)\n self.master.title('Castalio Podcast')\n\n self.label = ttk.Label(self, text=\"\")\n self.label.grid(column=0, row=0)\n\n self.tree = ttk.Treeview(self, columns=('date',))\n self.tree.heading('#1', text=\"Date\")\n self.tree.insert('', 0, 'episodes', text='Episodes', open=True)\n self.tree.insert(\n 'episodes',\n 1,\n text='Episode 82',\n values=('Jan. 8, 2017',)\n )\n self.tree.grid(column=0, row=0, sticky=(tk.W, tk.W, tk.E, tk.S))\n\n self.pack(fill=tk.X)\n\n\nwin = tk.Tk()\napp = MainWindow(master=win)\napp.mainloop()\n\n","repo_name":"CastalioPodcast/playground","sub_path":"episode82/tkcastalio.py","file_name":"tkcastalio.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"2"}
+{"seq_id":"8726763880","text":"from django.conf.urls import patterns, include, url\nfrom django.views.generic.simple import direct_to_template\nfrom django.conf import settings\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^accounts/login/$', 'django.contrib.auth.views.login'),\n # url(r'^$', 'IntentoTreiky.views.home', name='home'),\n # url(r'^IntentoTreiky/', include('IntentoTreiky.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n # MediaURL\n url(r'^media/(?P.*)$', 'django.views.static.serve', {\n 'document_root': settings.MEDIA_ROOT,\n }),\n\n\n # Mi URL:\n url(r'^$', direct_to_template,\n {'template': 'index.html'}, \"home\"),\n url(r'^accounts/profile/$', direct_to_template,\n {'template': 'index.html'}, \"home\"),\n url(r'^home$', direct_to_template,\n {'template': 'index.html'}, \"home\"),\n url(r'^write_req/$',\n 'apps.requerimiento.views.write_req', name='write_req'),\n url(r'^view_req/$',\n 'apps.requerimiento.views.view_req', name=\"view_req\"),\n url(r'^write_project/$',\n 'apps.requerimiento.views.write_project', name='write_project'),\n url(r'^new_user/$',\n 'apps.requerimiento.views.new_user', name='new_user'),\n url(r'^resultado_usuario/$',\n 'apps.requerimiento.views.resultado_alta_usuario',\n name='resultado_alta_usuario'),\n url(r'^resultado_proyecto/$',\n 'apps.requerimiento.views.resultado_alta_proyecto',\n name='resultado_alta_proyecto'),\n url(r'^logout/$',\n 'apps.requerimiento.views.logoutuser', name='logoutuser'),\n url(r'^search_project/$',\n 'apps.requerimiento.views.searchProject', name='searchProject'),\n url(r'^update_project/$',\n 'apps.requerimiento.views.update_project', name='updateproject'),\n url(r'^asigned_user/$',\n 'apps.requerimiento.views.asig_user', name='asiguser'),\n url(r'^edit_user/$',\n 'apps.requerimiento.views.edit_user', name='editUserForm'),\n)\n","repo_name":"Treiky/Treiky","sub_path":"Treiky/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"}
+{"seq_id":"36869234528","text":"import logging\nimport os\nfrom functools import wraps\nimport re\n\nfrom dotenv import load_dotenv\nfrom flask import Flask, jsonify, make_response, request\nfrom flask_pymongo import PyMongo\n\n\ndef init_logger(app):\n gunicorn_error_logger = logging.getLogger('gunicorn.error')\n app.logger.handlers.extend(gunicorn_error_logger.handlers)\n app.logger.setLevel(logging.DEBUG)\n\n\ndef cross_orgin(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n if request.method == 'OPTIONS':\n response = make_response()\n response.headers.add(\"Access-Control-Allow-Origin\", \"*\")\n response.headers.add('Access-Control-Allow-Headers', \"*\")\n response.headers.add('Access-Control-Allow-Methods', \"*\")\n return response\n\n resp = func(*args, **kwargs)\n resp.headers.add(\"Access-Control-Allow-Origin\", \"*\")\n return resp\n\n return wrapper\n\n\ndef create_app():\n load_dotenv()\n app = Flask(__name__)\n app.config[\"MONGO_URI\"] = os.environ.get(\"MONGO_URI\")\n mongo = PyMongo(app)\n db = mongo.db\n init_logger(app)\n\n @app.route('/', methods=['GET', 'OPTIONS'])\n @cross_orgin\n def index():\n return make_response(\n \"\"\"
\", 404)\n\n return app\n","repo_name":"shambu09/manga-utils-server","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"8986101314","text":"from flask import Flask, render_template, request, redirect, session\nimport os\n\nTask07 = Flask(__name__)\n\nTask07.config['SECRET_KEY'] = os.urandom(24)\n\n\n@Task07.route('/')\ndef index():\n if 'username' in session:\n return render_template('index07.html', username=str(session['username']))\n else:\n return render_template('login.html')\n\n\n@Task07.route('/login', methods=['POST'])\ndef login():\n if request.form.get('username'):\n session['username'] = request.form.get('username')\n return redirect('/')\n\n\n@Task07.route('/logout')\ndef logout():\n session.pop('username', None)\n return render_template('login.html')\n\n\nif __name__ == \"__main__\":\n Task07.debug = True\n Task07.run()\n","repo_name":"k018c1072/flaskworks","sub_path":"Task07.py","file_name":"Task07.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"2495499840","text":"import rclpy\nfrom rclpy.node import Node\nfrom example_interfaces.msg import Int16\n\n\nclass NumberCounter(Node):\n\n def __init__(self):\n super().__init__(\"number_counter\")\n self.get_logger().info(\"Hello from \" + self.get_name())\n self.counter_ = 0\n self.publisher = self.create_publisher(Int16, \"number_counter\", 10)\n self.subscription = self.create_subscription(Int16, \"numbers\", self.callback, 10)\n\n def callback(self, msg):\n self.counter_ += 1\n data = Int16()\n data.data = self.counter_\n self.publisher.publish(data)\n\n\ndef main(args=None):\n rclpy.init(args=args)\n node = NumberCounter()\n \n rclpy.spin(node)\n rclpy.shutdown()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"peterborkuti/rostutorial","sub_path":"my_py_pkg/my_py_pkg/number_counter.py","file_name":"number_counter.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"8147859826","text":"# --------------\nimport pandas as pd\nfrom collections import Counter\n\n# Load dataset\ndata = pd.read_csv(path)\n\nprint('Null values :/n')\nprint(data.isnull().sum())\nprint('/n')\n\nprint('Statistics :/n')\nprint(data.describe())\n\n\n# --------------\nimport seaborn as sns\nfrom matplotlib import pyplot as plt\nsns.set_style(style='darkgrid')\n\n# Store the label values \nlabel = data['Activity'].copy()\n\nplt.figure(figsize=(10,5))\nchart = sns.countplot(\n data=data,\n x=label,\n)\n\nchart.set_xticklabels(chart.get_xticklabels(), rotation=90)\n\n# plot the countplot\n\n\n\n# --------------\n# make the copy of dataset\ndata_copy = data.copy()\n\n# Create an empty column \ndata_copy['duration'] = ''\n\n# Calculate the duration\nduration_df = (data_copy.groupby([label[label.isin(['WALKING_UPSTAIRS', 'WALKING_DOWNSTAIRS'])], 'subject'])['duration'].count() * 1.28)\nduration_df = pd.DataFrame(duration_df)\n\n# Sort the values of duration\nplot_data = duration_df.reset_index().sort_values('duration', ascending=False)\nplot_data['Activity'] = plot_data['Activity'].map({'WALKING_UPSTAIRS':'Upstairs', 'WALKING_DOWNSTAIRS':'Downstairs'})\n\n\n# Plot the durations for staircase use\nplt.figure(figsize=(15,5))\nsns.barplot(data=plot_data, x='subject', y='duration', hue='Activity')\nplt.title('Participants Compared By Their Staircase Walking Duration')\nplt.xlabel('Participants')\nplt.ylabel('Total Duration [s]')\nplt.show()\n\n\n# --------------\n#exclude the Activity column and the subject column\nfeature_cols = data.columns[: -2] \n\n#Calculate the correlation values\ncorrelated_values = data[feature_cols].corr()\n#stack the data and convert to a dataframe\n\ncorrelated_values = (correlated_values.stack().to_frame().reset_index()\n .rename(columns={'level_0': 'Feature_1', 'level_1': 'Feature_2', 0:'Correlation_score'}))\n\n\n#create an abs_correlation column\ncorrelated_values['abs_correlation'] = correlated_values.Correlation_score.abs()\n\n#Picking most correlated features without having self correlated pairs\ntop_corr_fields = correlated_values.sort_values('Correlation_score', ascending = False).query('abs_correlation>0.8 ')\ntop_corr_fields = top_corr_fields[top_corr_fields['Feature_1'] != top_corr_fields['Feature_2']].reset_index(drop=True)\n\n\n\n# --------------\n# importing neccessary libraries\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import precision_recall_fscore_support as error_metric\nfrom sklearn.metrics import confusion_matrix, accuracy_score\n\n# Encoding the target variable\nle = LabelEncoder()\ndata['Activity'] = le.fit_transform(data['Activity'])\n\n# split the dataset into train and test\nX = data.drop('Activity',1)\ny = data['Activity']\n\nX_train, X_test, y_train , y_test = train_test_split(X,y,test_size = 0.3, random_state = 40)\n\n# Baseline model \nclassifier = SVC()\nclf = classifier.fit(X_train, y_train)\ny_pred = clf.predict(X_test)\n\nprecision, recall, f_score, support = error_metric(y_test, y_pred, average = 'weighted')\n\nmodel1_score = classifier.score(X_test, y_test)\n\nprint('precision',precision)\nprint('\\n')\n\nprint('recall',recall)\nprint('\\n')\n\nprint('f1_score',f_score)\nprint('\\n')\n\nprint('score',model1_score)\nprint('\\n')\n\n\n\n\n\n# --------------\n# importing libraries\nfrom sklearn.svm import LinearSVC\nfrom sklearn.feature_selection import SelectFromModel\n\nlsvc = LinearSVC(penalty = 'l1', dual = False, C = 0.01, random_state =42)\nlsvc.fit(X_train, y_train) \n\n# Feature selection using Linear SVC\nmodel_2 = SelectFromModel(lsvc, prefit=True)\n\nnew_train_features = model_2.transform(X_train)\nnew_test_features = model_2.transform(X_test)\n\nclassifier_2 = SVC()\nclf_2 = classifier_2.fit(new_train_features, y_train)\ny_pred_new = clf_2.predict(new_test_features)\n\nmodel2_score = classifier_2.score(new_test_features, y_test)\n\nprecision, recall, f_score, support = error_metric(y_test, y_pred_new, average = 'weighted')\n\nprint('precision',precision)\nprint('\\n')\n\nprint('recall',recall)\nprint('\\n')\n\nprint('f1_score',f_score)\nprint('\\n')\n\nprint('score',model2_score)\nprint('\\n')\n\n# model building on reduced set of features\n\n\n\n\n# --------------\n# Importing Libraries\nfrom sklearn.model_selection import GridSearchCV\n\n# Set the hyperparmeters\nparameters = {'kernel':['linear', 'rbf'], 'C': [100, 20, 1, 0.1]}\nselector = GridSearchCV(SVC(), parameters,'accuracy') \n\nselector.fit(new_train_features, y_train)\n\nmeans = selector.best_score_\nstds = selector.cv_results_['std_test_score'][selector.best_index_]\nprint('Params',selector.best_params_)\nprint('\\n')\n\nprint('means',means)\nprint('\\n')\n\nprint('stds',stds)\nprint('\\n')\n\n# Usage of grid search to select the best hyperparmeters\n# Model building after Hyperparameter tuning\nclassifier_3 = SVC(kernel = selector.best_params_['kernel'], C = selector.best_params_['C'])\n\nclf_3 = classifier_3.fit(new_train_features, y_train)\n\ny_pred_final = clf_3.predict(new_test_features)\n\nmodel3_score = classifier_3.score(new_test_features,y_test)\n\nprecision, recall, f_score, support = error_metric(y_test, y_pred_final, average = 'weighted')\n\nprint('precision',precision)\nprint('\\n')\n\nprint('recall',recall)\nprint('\\n')\n\nprint('f1_score',f_score)\nprint('\\n')\n\nprint('score',model3_score)\nprint('\\n')\n\n\n\n\n","repo_name":"ninadangchekar96/ga-learner-dsmp-repo","sub_path":"SVM-(Human-Activity-Recognition-with-Smartphones)/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":5261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"13221904513","text":"import numpy as np\ndef make_gini_ranges():\n gini_ranges = []\n \n# x = np.linspace(1000, 12000, num = 45)\n # x = np.linspace(1000, 12000, num = 23)\n x = np.linspace(1000, 12000, num = 12)\n # x = np.linspace(1000, 12000, num = 2)\n for i in range(len(x)-1):\n gini_ranges.append((x[i], x[i+1]))\n # print gini_ranges\n return gini_ranges\n\ndef set_SN_gweights(sn_arr, gini_ranges):\n for SN in sn_arr:\n g_weights = []\n for g_r in gini_ranges:\n g_locs = np.where((SN.wavelength >= g_r[0]) & (SN.wavelength < g_r[1]))[0]\n g_w = np.sum(SN.ivar[g_locs])\n if g_w == 0.:\n g_w = np.nan\n g_weights.append(g_w)\n SN.g_array = g_weights\n\ndef calc_gini_coeffs(sn_arr):\n num_specs = []\n for i in range(len(sn_arr[0].g_array)):\n num_specs.append(len(sn_arr))\n for SN in sn_arr:\n if np.isnan(SN.g_array[i]):\n num_specs[i] -= 1\n # print num_specs\n \n gini_coeffs = []\n for i in range(len(sn_arr[0].g_array)):\n gini_num = 0.\n gini_denom = 0.\n for SNi in sn_arr:\n g_wi = SNi.g_array[i]\n jsum = 0.\n for SNj in sn_arr:\n g_wj = SNj.g_array[i]\n g_diff = np.absolute(g_wi - g_wj)\n jsum = np.nansum([jsum, g_diff])\n gini_num = np.nansum([gini_num,jsum])\n gini_denom = np.nansum([gini_denom, g_wi])\n gini_denom *= 2.*num_specs[i]\n gini_coeffs.append(gini_num/gini_denom)\n return gini_coeffs, num_specs\n\ndef gini_coeffs(sn_arr):\n\tgini_ranges = make_gini_ranges()\n\tset_SN_gweights(sn_arr, gini_ranges)\n\tgini_coeffs, num_specs = calc_gini_coeffs(sn_arr)\n\treturn gini_coeffs, num_specs, gini_ranges\n\ndef calc_deweight_ranges(sn_arr, gini_coeffs, gini_ranges, gini_range_meds, tol=.5):\n\tdeweight_spec = []\n\tdeweight_ranges = []\n\treplace_ivar_sns = []\n\tswaps = []\n\t# max_weight_list = []\n\n\t# g_list = []\n\t# sum_gs = []\n\t# for j, SN in enumerate(sn_arr):\n\t# g_list.append(SN.g_array[i])\n\t# sum_gs.append(np.nansum(SN.g_array[i]))\n\t# g_T = np.transpose(g_list)\n\n\t# print len(g_T), len(gini_coeffs)\n\n\t# for i in range(len(gini_coeffs)):\n\t# if gini_coeffs[i] >= tol:\n\t# \tmax_weight_SN_ind = np.argmax(g_T[i])\n\t# \tprint sn_arr[max_weight_SN_ind].name\n\t# raise TypeError\n\tbiasing_SNs = []\n\tbiasing_tuples = []\n\n\t#first find where coeffs are greater than tolerance\n\tfor i in range(len(gini_coeffs)):\n\t if gini_coeffs[i] >= tol:\n\t g_list = []\n\t #populate g_list with individual SN weights in this wave range\n\t for j, SN in enumerate(sn_arr):\n\t g_list.append(SN.g_array[i])\n\t #if SN has no weight, set to nan\n\t for k, g in enumerate(g_list):\n\t if np.isnan(g_list[k]):\n\t g_list[k] = 0.\n\t #sort list to pick out SN with the largest weight in this wave range\n\t g_sort = np.argsort(g_list)\n\t g_sort_vals = np.sort(g_list)\n\t biasing_tuples.append((i, sn_arr[g_sort[-1]])) #store index of wave range and biasing SN\n\n\n\t#biasing_tuples contains most weighted SN for each gini coeff > tol\n\n\t# print g_sort\n\tscale_dict = {}\n\tscale_ref_dict = {}\n\tdeweight_SNs = []\n\tfor tup in biasing_tuples:\n\t\t# print tup[1].g_array[tup[0]]\n\t\tg = gini_ranges[tup[0]] # wavelength range\n\t\tg_locs = np.where((tup[1].wavelength >= g[0]) & (tup[1].wavelength < g[1]))[0] #indices valid for this gini range\n\t\tnon_zero_locs = np.where(tup[1].ivar[g_locs] != 0.)\n\t\t# print tup[0], tup[1].name, gini_range_meds[tup[0]], np.nansum(tup[1].ivar[g_locs][non_zero_locs]), gini_range_meds[tup[0]]/np.nansum(tup[1].ivar[g_locs][non_zero_locs])\n\t\tif tup[1].name in scale_dict:\n\t\t\tmed_in_range = np.nansum(tup[1].ivar[g_locs][non_zero_locs])\n\t\t\tscale_dict[tup[1].name] = np.amax([scale_dict[tup[1].name],med_in_range]) #store max of biasing SN in the unbalanced region\n\t\t\tscale_ref_dict[tup[1].name] = np.amax([scale_ref_dict[tup[1].name],gini_range_meds[tup[0]]]) #store max of all SNe in the unbalanced region\n\t\telse:\n\t\t\tscale_dict[tup[1].name] = np.nansum(tup[1].ivar[g_locs][non_zero_locs])\n\t\t\tscale_ref_dict[tup[1].name] = gini_range_meds[tup[0]]\n\t\tdeweight_SNs.append(tup[1])\n\t# for el in scale_dict.keys():\n\t# \tprint el, scale_dict[el], 'Ref: ', scale_ref_dict[el], 'Fraction: ', scale_dict[el]/scale_ref_dict[el]\n\n\tdeweight_SNs = set(deweight_SNs)\n\t# for SN in deweight_SNs:\n\t# \tprint SN.name\n\t# scale_to = np.median(tot_weights)\n\t# print scale_to\n\t# tot_sort = np.argsort(tot_weights)\n\t# print \n\t# for SN in deweight_SNs:\n\t# \tprint SN.name, np.nanmedian(SN.ivar[SN.x1:SN.x2])\n\t# raise TypeError\n\n\t# s = 0\n\t# found_scale_spec = False\n\t# while not found_scale_spec:\n\t# \ts+=1\n\t# \tmed_weights = []\n\t# \tfor SN in deweight_SNs:\n\t# \t\tmed_weights.append(np.nanmedian(SN.ivar[SN.x1:SN.x2]))\n\n\t# \ttemp = sn_arr[tot_sort[-s]]\n\t# \tif sn_arr[tot_sort[-s]] not in deweight_SNs and all(np.nanmedian(temp.ivar[temp.x1:temp.x2]) < w for w in med_weights):\n\t# \t\tfound_scale_spec = True\n\t# \t\tscale_ivar_sn = sn_arr[tot_sort[-s]]\n\n\t# for SN in deweight_SNs:\n\t\t# print 'Scaling', SN.name, 'to', sn_arr[tot_sort[-s]].name, 'Fraction: ', np.nanmedian(SN.ivar[SN.x1:SN.x2])/np.nanmedian(scale_ivar_sn.ivar[scale_ivar_sn.x1:scale_ivar_sn.x2]), np.nanmedian(SN.ivar[SN.x1:SN.x2])\n\t\t\n\t\t# if len(prev_swaps) < 1:\n\t\t# \tfound_scale_spec = True\n\t\t# elif (sn_arr[g_sort[-1]].name == prev_swaps[i][0] and sn_arr[g_sort[-s]].name != prev_swaps[i][1]) and g_list[g_sort[-1]]/g_list[g_sort[-s]] > 1.01:\n\t\t# \tfound_scale_spec = True\n\t\t# elif (sn_arr[g_sort[-1]].name != prev_swaps[i][0] and sn_arr[g_sort[-s]].name != prev_swaps[i][1]) and g_list[g_sort[-1]]/g_list[g_sort[-s]] > 1.01:\n\t\t# \tfound_scale_spec = True\n\n\t# print 'Scale ', sn_arr[g_sort[-1]].name, ' with ', sn_arr[g_sort[-s]].name, 'Fraction: ', g_list[g_sort[-1]]/g_list[g_sort[-s]], 'in range', gini_ranges[i], s\n\t# # print 'Deweighting ', sn_arr[g_sort[-1]].name, ' by factor of 1/2 in range ', gini_ranges[i]\n\t# for g in g_list:\n\t# \ttot_weight = np.nansum(g)\n\t# max_weight_list.append(sn_arr[g_sort[-1]])\n\n\t# swaps.append((sn_arr[g_sort[-1]].name, sn_arr[g_sort[-s]].name))\n\t# scale_ivar_sns.append(sn_arr[g_sort[-s]])\n\t# max_ind = np.nanargmax(g_list)\n\t# deweight_spec.append(sn_arr[max_ind])\n\t# deweight_ranges.append(gini_ranges[i])\n\t# else:\n\t# \tswaps.append((None,None))\n\t# return deweight_SNs, scale_ivar_sn, deweight_ranges, scale_to\n\treturn deweight_SNs, scale_dict, scale_ref_dict\n\ndef deweight_biasing_SNe(deweight_SNs, scale_dict, scale_ref_dict):\n\tfor SN in deweight_SNs:\n\t\t# scale_median = np.nanmedian(scale_ivar_sn.ivar[scale_ivar_sn.x1:scale_ivar_sn.x2])\n\t\t# sn_median = np.nanmedian(SN.ivar[SN.x1:SN.x2])\n\t\t# print sn_median/scale_to\n\t\t# SN.ivar = SN.ivar*(scale_median/sn_median)\n\n\t\t# print SN.name, scale_ref_dict[SN.name], scale_dict[SN.name], scale_ref_dict[SN.name]/scale_dict[SN.name]\n\t\tscale = scale_ref_dict[SN.name]/scale_dict[SN.name]\n\t\tSN.ivar = SN.ivar*scale\n\t# print ","repo_name":"msiebert1/kaepora","sub_path":"src/gini.py","file_name":"gini.py","file_ext":"py","file_size_in_byte":7068,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"2"}
+{"seq_id":"36804292543","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\n\ntry:\n from importlib import import_module\nexcept ImportError:\n # Python 2.6 fallback\n from django.utils.importlib import import_module\n\nfrom django.conf import settings\nfrom django.conf.urls import include, url\n\n_cache = {}\n\n\nclass BaseAddon(object):\n\n \"\"\"Add-ons should inherit from this\"\"\"\n # Must fill in!\n slug = None\n # fontawesome 3.2.1 icon class name\n iconclass = 'fa fa-circle-blank'\n # General urlpatterns that will reside in /background/addon-slug/...\n urlpatterns = []\n navs = {\n # Like this: _('Users'): {\"url_name\":\"wxrbot:user_list\", \"iconclass\": \"fa fa-user\", \"nav_id\":\"nav-user\"},\n }\n sort_id = 0\n\n class RenderMedia:\n js = []\n css = {}\n\n\ndef register(AddonsClass):\n \"\"\"\n Register a add-on class. This function will call back your add-on's\n constructor.\n \"\"\"\n if AddonsClass in list(_cache.keys()):\n raise Exception(\"Addons class already registered\")\n addon = AddonsClass()\n _cache[AddonsClass] = addon\n\n\ndef get_addons():\n \"\"\"Get loaded addons - do not call before all addons are loaded.\"\"\"\n return _cache\n\n\ndef get_addons_urls():\n urlpatterns = []\n for addon in list(get_addons().values()):\n slug = getattr(addon, 'slug', None)\n if slug:\n urlpatterns += [\n url('^' + slug + '/', include(addon.urlpatterns)),\n ]\n else:\n urlpatterns += [\n url('^', include(addon.urlpatterns)),\n ]\n return urlpatterns\n\n\ndef get_module(app, modname, verbose=False, failfast=False, success=True):\n \"\"\"\n Internal function to load a module from a single app.\n \"\"\"\n module_name = '%s.%s' % (app, modname)\n try:\n module = import_module(module_name)\n except ImportError as e:\n if failfast:\n if 'No module named' in str(e):\n pass\n else:\n raise e\n elif verbose:\n print(\"Could not load %r from %r: %s\" % (modname, app, e))\n return None\n if success or verbose:\n print(\"Loaded %r from %r\" % (modname, app))\n return module\n\n\ndef load(modname, verbose, failfast, success):\n \"\"\"\n Loads all modules with name 'modname' from all installed apps.\n If verbose is True, debug information will be printed to stdout.\n If failfast is True, import errors will not be surpressed.\n \"\"\"\n for app in settings.INSTALLED_APPS:\n get_module(app, modname, verbose, failfast, success)\n\n\ndef load_bgframework_addons(verbose=False, failfast=False, success=True):\n load('bgf_addons', verbose, failfast, success)\n","repo_name":"tkliuxing/bgframework","sub_path":"src/bgframework/add_ons.py","file_name":"add_ons.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"10639230138","text":"import sys\n\nREDMINE_URL = \"https://redmine.openinfosecfoundation.org\"\n\ndef get_api_key():\n try:\n with open(\"api.keys\", \"r\") as f:\n key = f.readline().strip()\n except OSError as e:\n print(e)\n key = None\n return key\n\nREDMINE_KEY = get_api_key()\nUSER_NAME = \"current\"\n","repo_name":"inashivb/bp-feats","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"18269759373","text":"import pandas as pd\r\nimport dash\r\nfrom dash import dcc, html\r\nfrom dash.dependencies import Input, Output\r\n\r\n# Assuming your dataset is stored in a CSV file named 'your_data.csv'\r\n# Replace 'your_data.csv' with the actual file name or provide the DataFrame directly if you have it loaded\r\nfile_path = 'data1.csv'\r\ndf = pd.read_csv(file_path)\r\n\r\n# Removing non-numeric characters and converting 'kWh' column to numeric\r\ndf['kWh'] = pd.to_numeric(df['kWh'].replace('[^\\d.]', '', regex=True), errors='coerce')\r\n\r\n# Convert 'kWh' to 'MWh'\r\ndf['MWh'] = df['kWh'] / 1000 # 1 MWh = 1000 kWh\r\n\r\n# Rounding the 'MWh' values to 3 significant figures\r\ndf['MWh'] = df['MWh'].round(3)\r\n\r\n# Remove 'London' data\r\ndf = df[df['Area'] != 'London']\r\n\r\n# Create a Dash web application\r\napp = dash.Dash(__name__)\r\n\r\n# Define layout of the web application\r\napp.layout = html.Div([\r\n html.H1(\"Energy Consumption Dashboard\"),\r\n\r\n # Tabs for different pages\r\n dcc.Tabs([\r\n dcc.Tab(label='Borough Comparison', children=[\r\n # Dropdown for selecting the years\r\n dcc.Dropdown(\r\n id='year-dropdown',\r\n options=[\r\n {'label': str(year), 'value': year} for year in df['LEGGI_Year'].unique()\r\n ],\r\n multi=True,\r\n value=list(df['LEGGI_Year'].unique()), # Set the default to all years\r\n style={'width': '50%'}\r\n ),\r\n\r\n # Graph to display the comparison\r\n dcc.Graph(id='borough-comparison-graph'),\r\n ]),\r\n\r\n dcc.Tab(label='Sector Pie Chart', children=[\r\n # Dropdown for selecting the year\r\n dcc.Dropdown(\r\n id='pie-chart-year-dropdown',\r\n options=[\r\n {'label': str(year), 'value': year} for year in df['LEGGI_Year'].unique()\r\n ],\r\n multi=False,\r\n value=df['LEGGI_Year'].min(), # Set the default to the minimum year\r\n style={'width': '50%'}\r\n ),\r\n\r\n # Graph to display the pie chart\r\n dcc.Graph(id='pie-chart'),\r\n ]),\r\n\r\n dcc.Tab(label='Fuel-wise MWh per Year', children=[\r\n # Graph to display the fuel-wise MWh per year\r\n dcc.Graph(id='fuel-wise-mwh'),\r\n ]),\r\n ]),\r\n])\r\n\r\n# Define callback to update the borough comparison graph based on user input\r\n@app.callback(\r\n Output('borough-comparison-graph', 'figure'),\r\n [Input('year-dropdown', 'value')]\r\n)\r\ndef update_borough_comparison(selected_years):\r\n filtered_df = df[df['LEGGI_Year'].isin(selected_years)]\r\n\r\n borough_comparison_data = []\r\n\r\n for year in selected_years:\r\n year_data = filtered_df[filtered_df['LEGGI_Year'] == year]\r\n borough_comparison_data.append({'x': year_data['Area'], 'y': year_data['MWh'], 'type': 'bar', 'name': year})\r\n\r\n fig = {\r\n 'data': borough_comparison_data,\r\n 'layout': {\r\n 'title': f'MWh Comparison ({\", \".join(map(str, selected_years))})',\r\n 'barmode': 'group'\r\n }\r\n }\r\n\r\n return fig\r\n\r\n# Define callback to update the pie chart based on the selected year\r\n@app.callback(\r\n Output('pie-chart', 'figure'),\r\n [Input('pie-chart-year-dropdown', 'value')]\r\n)\r\ndef update_pie_chart(selected_year):\r\n filtered_df = df[df['LEGGI_Year'] == selected_year]\r\n\r\n # Exclude 'Total' from the pie chart\r\n filtered_df = filtered_df[filtered_df['Sector'] != 'Total']\r\n\r\n # Calculate total MWh for each sector\r\n total_mwh_per_sector = filtered_df.groupby('Sector')['MWh'].sum().reset_index()\r\n\r\n # Create the pie chart\r\n fig = {\r\n 'data': [\r\n {\r\n 'labels': total_mwh_per_sector['Sector'],\r\n 'values': total_mwh_per_sector['MWh'],\r\n 'type': 'pie',\r\n 'name': 'MWh distribution'\r\n }\r\n ],\r\n 'layout': {\r\n 'title': f'MWh Distribution by Sector for {selected_year}'\r\n }\r\n }\r\n\r\n return fig\r\n\r\n# Define callback to update the fuel-wise MWh per year graph\r\n@app.callback(\r\n Output('fuel-wise-mwh', 'figure'),\r\n [Input('year-dropdown', 'value')]\r\n)\r\ndef update_fuel_wise_mwh(selected_years):\r\n try:\r\n filtered_df = df[df['LEGGI_Year'].isin(selected_years)]\r\n\r\n fuel_wise_mwh_data = []\r\n\r\n max_y_per_fuel = {} # Dictionary to store the maximum y-axis value for each fuel type\r\n\r\n for fuel_type in filtered_df['Fuel'].unique():\r\n # Exclude 'Total' from fuel types\r\n if fuel_type != 'Total':\r\n fuel_data = filtered_df[filtered_df['Fuel'] == fuel_type]\r\n max_y_per_fuel[fuel_type] = fuel_data['MWh'].max() # Store the maximum value for the current fuel type\r\n fuel_wise_mwh_data.append({'x': selected_years, 'y': fuel_data['MWh'], 'type': 'bar', 'name': fuel_type})\r\n\r\n max_y = max(max_y_per_fuel.values(), default=0) # Get the overall maximum y-axis value\r\n\r\n fig = {\r\n 'data': fuel_wise_mwh_data,\r\n 'layout': {\r\n 'title': 'Fuel-wise MWh per Year',\r\n 'xaxis': {'title': 'Year'},\r\n 'yaxis': {'title': 'MWh', 'range': [0, max_y * 1.1]}, # Set the y-axis range based on max_y\r\n 'shapes': [], # Ensure shapes are defined to avoid potential issues\r\n }\r\n }\r\n\r\n # Add faint lines between different years\r\n for i in range(1, len(selected_years)):\r\n fig['layout']['shapes'].append({\r\n 'type': 'line',\r\n 'x0': selected_years[i],\r\n 'y0': 0,\r\n 'x1': selected_years[i],\r\n 'y1': max_y,\r\n 'line': {\r\n 'color': 'rgba(128, 128, 128, 0.5)',\r\n 'width': 2,\r\n 'dash': 'dash',\r\n },\r\n })\r\n\r\n return fig\r\n\r\n except Exception as e:\r\n print(e)\r\n return {'data': [], 'layout': {}}\r\n\r\n# Run the web application\r\nif __name__ == '__main__':\r\n app.run_server(debug=True)\r\n","repo_name":"Nimosteve88/ACHACK","sub_path":"csv KWH data per borough.py","file_name":"csv KWH data per borough.py","file_ext":"py","file_size_in_byte":6149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"41564287900","text":"import sys\nimport glob\nimport socket\nimport argparse\nimport urllib.request\nimport os\nimport os.path\nimport subprocess\nimport platform\nimport json\nimport pandas as pd\nfrom time import sleep\nfrom sys import exit\nfrom html.parser import HTMLParser\n\ndef report():\n return \"report.html\"\n\n# BEGIN: Check if file is text\nENC = {}\ndef is_binary(file_name):\n try:\n with open(file_name, 'tr', encoding='cp1251') as check_file: # try open file in text mode\n check_file.read()\n ENC[file_name] = 'cp1251'\n return False\n except: # if fail then file is non-text (binary)\n try:\n with open(file_name, 'tr', encoding='utf-8-sig') as check_file: # try open file in text mode\n check_file.read()\n ENC[file_name] = 'utf-8-sig'\n return False\n except: # if fail then file is non-text (binary)\n return True\n# END: Check if file is text\n\n\nclass TableHTMLParser(HTMLParser):\n\n def __init__(self):\n self.table = False\n self.data = {\n 'Source': [],\n 'Compared': [],\n 'Percent': [],\n 'Lines': [],\n 'URL': []\n }\n self.tind = -1\n self.tag = None\n super().__init__()\n\n def handle_starttag(self, tag, attrs):\n self.tag = tag\n if self.table:\n if tag == 'tr':\n self.tind = -1\n elif tag == 'td':\n self.tind += 1\n elif tag == 'a' and self.tind == 0:\n self.data['URL'].append(attrs[0][1])\n elif tag == 'table':\n self.table = True\n\n def handle_endtag(self, tag):\n self.tag = ''\n if tag == 'table':\n self.table = False\n self.tind = -1\n\n def handle_data(self, data):\n if self.table:\n if self.tind == 2 and self.tag == 'td':\n self.data['Lines'].append(int(data))\n elif self.tind == 0 and self.tag == 'a':\n _, p = data.split()\n self.data['Source'].append(data)\n self.data['Percent'].append(int(p[1:-2]))\n elif self.tind == 1 and self.tag == 'a':\n _, p = data.split()\n self.data['Compared'].append(data)\n self.data['Percent'][-1] = max(self.data['Percent'][-1], int(p[1:-2]))\n\ndef get_args_parser():\n parser = argparse.ArgumentParser(description='Run MOSS')\n parser.add_argument('files', type=str, nargs='+',\n help='files to send')\n parser.add_argument('-X', action='store_true',\n help='use experimental server')\n parser.add_argument('-d', action='store_true',\n help='specifies that submissions are by directory, not by file')\n parser.add_argument('-uid', type=int, default=939277019,\n help='ID to login')\n parser.add_argument('-m', type=int, default=100,\n help='maximum number of times a given passage may appear before it is ignored')\n parser.add_argument('-n', type=int, default=250,\n help='the number of matching files to show in the results')\n parser.add_argument('-l', type=str, default='c',\n help='the source language of the tested programs')\n parser.add_argument('-b', type=str, nargs='*', default=[],\n help='base files - code that appears in the base files is not counted in matches')\n parser.add_argument('-c', type=str, default='',\n help='comment string that is attached to the generated report')\n\n return parser\n\n\ndef load_args(fname='moss.json'):\n with open(fname, 'r') as f:\n args = json.load(f)\n return args\n\n\ndef call_moss(**args):\n \"\"\"\n Arguments same as for argparse. See get args_parser.\n \"\"\"\n\n noreq = 'Request not sent.';\n\n print('Checking files . . .')\n b = []\n for f in args['b']:\n for arg in glob.glob(f, recursive=True):\n if not os.path.isfile(arg):\n print('Base file %s does not exist. %s' % (arg, noreq))\n exit()\n if not os.access(arg, os.R_OK):\n print('Base file %s is not readable. %s' % (arg, noreq))\n exit()\n if is_binary(arg):\n print('Base file %s is not a text file. %s' % (arg, noreq))\n exit()\n b.append(arg)\n\n files = []\n for f in args['files']:\n for arg in glob.glob(f, recursive=True):\n if not os.path.isfile(arg):\n print('File %s does not exist. %s' % (arg, noreq))\n exit()\n if not os.access(arg, os.R_OK):\n print('File %s is not readable. %s' % (arg, noreq))\n exit()\n if is_binary(arg):\n print('File %s is not a text file. %s' % (arg, noreq))\n exit()\n files.append(arg)\n\n if not files:\n print('No files submitted.')\n exit()\n\n print('OK')\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(600)\n server_address = ('moss.stanford.edu', 7690) # 171.64.78.49\n\n try:\n sock.connect(server_address)\n except Exception:\n print('Could not connect to server %s: %s' % server_address)\n exit()\n\n\n def upload_file(filename, i, lang):\n name = filename.replace(' ', '_')\n name = os.path.join(\n os.path.basename(os.path.dirname(name)),\n os.path.basename(name)).replace('\\\\', '/')\n print('Uploading %s ...' % name)\n res = b''\n with open(filename, 'rt', encoding=ENC[filename]) as file:\n v = (file.read() + '\\r\\n').encode(ENC[filename]) #.decode('utf8').encode('utf8')\n size = len(v)\n res += ('file %s %s %s %s\\n' % (i, lang, size, name)).encode('utf8')\n res += v\n print('done')\n return res\n\n\n sock.sendall((('moss %s\\n' % (args['uid'])) +\n ('directory %s\\n' % (int(args['d']))) +\n ('directory %s\\n' % (int(args['d']))) +\n ('X %s\\n' % (int(args['X']))) +\n ('maxmatches %s\\n' % (args['m'])) +\n ('show %s\\n' % (args['n'])) +\n ('language %s\\n' % (args['l']))).encode('utf8'))\n\n res = sock.recv(3600)\n if res == b'no\\n':\n print('Unrecognized language %s' % (args['l']))\n sock.sendall(b'end\\n')\n sock.close()\n exit()\n\n to_send = b''\n for f in b:\n to_send += upload_file(f, 0, args['l'])\n\n for i, f in enumerate(files):\n to_send += upload_file(f, i+1, args['l'])\n\n to_send += ('query 0 %s\\n' % (args['c'])).encode('utf8')\n sock.sendall(to_send)\n print(\"Query submitted. Waiting for the server's response.\")\n url = sock.recv(100).decode('utf-8')\n print(url)\n sock.sendall(b'end\\n')\n sock.close()\n return url\n\n\ndef sort_moss(url):\n page = urllib.request.urlopen(url).read().decode('utf8')\n parserHTML = TableHTMLParser()\n parserHTML.feed(page)\n table = pd.DataFrame(parserHTML.data)\n return page, table.sort_values('Percent', ascending=False)\n \n\ndef report_moss(page, sortedtable, url):\n prepage = page.split('
')[0] + url\n\n body = '\\n
\\n'\n body += '
Source
Compared
Lines Matched\\n'\n for i in range(len(sortedtable)):\n row = sortedtable.iloc[i]\n body += '